1 /* 2 * linux/kernel/signal.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson 7 * 8 * 2003-06-02 Jim Houston - Concurrent Computer Corp. 9 * Changes to use preallocated sigqueue structures 10 * to allow signals to be sent reliably. 11 */ 12 13 #include <linux/slab.h> 14 #include <linux/module.h> 15 #include <linux/init.h> 16 #include <linux/sched.h> 17 #include <linux/fs.h> 18 #include <linux/tty.h> 19 #include <linux/binfmts.h> 20 #include <linux/security.h> 21 #include <linux/syscalls.h> 22 #include <linux/ptrace.h> 23 #include <linux/signal.h> 24 #include <linux/signalfd.h> 25 #include <linux/tracehook.h> 26 #include <linux/capability.h> 27 #include <linux/freezer.h> 28 #include <linux/pid_namespace.h> 29 #include <linux/nsproxy.h> 30 #include <trace/sched.h> 31 32 #include <asm/param.h> 33 #include <asm/uaccess.h> 34 #include <asm/unistd.h> 35 #include <asm/siginfo.h> 36 #include "audit.h" /* audit_signal_info() */ 37 38 /* 39 * SLAB caches for signal bits. 40 */ 41 42 static struct kmem_cache *sigqueue_cachep; 43 44 DEFINE_TRACE(sched_signal_send); 45 46 static void __user *sig_handler(struct task_struct *t, int sig) 47 { 48 return t->sighand->action[sig - 1].sa.sa_handler; 49 } 50 51 static int sig_handler_ignored(void __user *handler, int sig) 52 { 53 /* Is it explicitly or implicitly ignored? */ 54 return handler == SIG_IGN || 55 (handler == SIG_DFL && sig_kernel_ignore(sig)); 56 } 57 58 static int sig_ignored(struct task_struct *t, int sig) 59 { 60 void __user *handler; 61 62 /* 63 * Blocked signals are never ignored, since the 64 * signal handler may change by the time it is 65 * unblocked. 66 */ 67 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 68 return 0; 69 70 handler = sig_handler(t, sig); 71 if (!sig_handler_ignored(handler, sig)) 72 return 0; 73 74 /* 75 * Tracers may want to know about even ignored signals. 76 */ 77 return !tracehook_consider_ignored_signal(t, sig, handler); 78 } 79 80 /* 81 * Re-calculate pending state from the set of locally pending 82 * signals, globally pending signals, and blocked signals. 83 */ 84 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) 85 { 86 unsigned long ready; 87 long i; 88 89 switch (_NSIG_WORDS) { 90 default: 91 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) 92 ready |= signal->sig[i] &~ blocked->sig[i]; 93 break; 94 95 case 4: ready = signal->sig[3] &~ blocked->sig[3]; 96 ready |= signal->sig[2] &~ blocked->sig[2]; 97 ready |= signal->sig[1] &~ blocked->sig[1]; 98 ready |= signal->sig[0] &~ blocked->sig[0]; 99 break; 100 101 case 2: ready = signal->sig[1] &~ blocked->sig[1]; 102 ready |= signal->sig[0] &~ blocked->sig[0]; 103 break; 104 105 case 1: ready = signal->sig[0] &~ blocked->sig[0]; 106 } 107 return ready != 0; 108 } 109 110 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) 111 112 static int recalc_sigpending_tsk(struct task_struct *t) 113 { 114 if (t->signal->group_stop_count > 0 || 115 PENDING(&t->pending, &t->blocked) || 116 PENDING(&t->signal->shared_pending, &t->blocked)) { 117 set_tsk_thread_flag(t, TIF_SIGPENDING); 118 return 1; 119 } 120 /* 121 * We must never clear the flag in another thread, or in current 122 * when it's possible the current syscall is returning -ERESTART*. 123 * So we don't clear it here, and only callers who know they should do. 124 */ 125 return 0; 126 } 127 128 /* 129 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. 130 * This is superfluous when called on current, the wakeup is a harmless no-op. 131 */ 132 void recalc_sigpending_and_wake(struct task_struct *t) 133 { 134 if (recalc_sigpending_tsk(t)) 135 signal_wake_up(t, 0); 136 } 137 138 void recalc_sigpending(void) 139 { 140 if (unlikely(tracehook_force_sigpending())) 141 set_thread_flag(TIF_SIGPENDING); 142 else if (!recalc_sigpending_tsk(current) && !freezing(current)) 143 clear_thread_flag(TIF_SIGPENDING); 144 145 } 146 147 /* Given the mask, find the first available signal that should be serviced. */ 148 149 int next_signal(struct sigpending *pending, sigset_t *mask) 150 { 151 unsigned long i, *s, *m, x; 152 int sig = 0; 153 154 s = pending->signal.sig; 155 m = mask->sig; 156 switch (_NSIG_WORDS) { 157 default: 158 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m) 159 if ((x = *s &~ *m) != 0) { 160 sig = ffz(~x) + i*_NSIG_BPW + 1; 161 break; 162 } 163 break; 164 165 case 2: if ((x = s[0] &~ m[0]) != 0) 166 sig = 1; 167 else if ((x = s[1] &~ m[1]) != 0) 168 sig = _NSIG_BPW + 1; 169 else 170 break; 171 sig += ffz(~x); 172 break; 173 174 case 1: if ((x = *s &~ *m) != 0) 175 sig = ffz(~x) + 1; 176 break; 177 } 178 179 return sig; 180 } 181 182 /* 183 * allocate a new signal queue record 184 * - this may be called without locks if and only if t == current, otherwise an 185 * appopriate lock must be held to stop the target task from exiting 186 */ 187 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, 188 int override_rlimit) 189 { 190 struct sigqueue *q = NULL; 191 struct user_struct *user; 192 193 /* 194 * We won't get problems with the target's UID changing under us 195 * because changing it requires RCU be used, and if t != current, the 196 * caller must be holding the RCU readlock (by way of a spinlock) and 197 * we use RCU protection here 198 */ 199 user = get_uid(__task_cred(t)->user); 200 atomic_inc(&user->sigpending); 201 if (override_rlimit || 202 atomic_read(&user->sigpending) <= 203 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) 204 q = kmem_cache_alloc(sigqueue_cachep, flags); 205 if (unlikely(q == NULL)) { 206 atomic_dec(&user->sigpending); 207 free_uid(user); 208 } else { 209 INIT_LIST_HEAD(&q->list); 210 q->flags = 0; 211 q->user = user; 212 } 213 214 return q; 215 } 216 217 static void __sigqueue_free(struct sigqueue *q) 218 { 219 if (q->flags & SIGQUEUE_PREALLOC) 220 return; 221 atomic_dec(&q->user->sigpending); 222 free_uid(q->user); 223 kmem_cache_free(sigqueue_cachep, q); 224 } 225 226 void flush_sigqueue(struct sigpending *queue) 227 { 228 struct sigqueue *q; 229 230 sigemptyset(&queue->signal); 231 while (!list_empty(&queue->list)) { 232 q = list_entry(queue->list.next, struct sigqueue , list); 233 list_del_init(&q->list); 234 __sigqueue_free(q); 235 } 236 } 237 238 /* 239 * Flush all pending signals for a task. 240 */ 241 void flush_signals(struct task_struct *t) 242 { 243 unsigned long flags; 244 245 spin_lock_irqsave(&t->sighand->siglock, flags); 246 clear_tsk_thread_flag(t, TIF_SIGPENDING); 247 flush_sigqueue(&t->pending); 248 flush_sigqueue(&t->signal->shared_pending); 249 spin_unlock_irqrestore(&t->sighand->siglock, flags); 250 } 251 252 static void __flush_itimer_signals(struct sigpending *pending) 253 { 254 sigset_t signal, retain; 255 struct sigqueue *q, *n; 256 257 signal = pending->signal; 258 sigemptyset(&retain); 259 260 list_for_each_entry_safe(q, n, &pending->list, list) { 261 int sig = q->info.si_signo; 262 263 if (likely(q->info.si_code != SI_TIMER)) { 264 sigaddset(&retain, sig); 265 } else { 266 sigdelset(&signal, sig); 267 list_del_init(&q->list); 268 __sigqueue_free(q); 269 } 270 } 271 272 sigorsets(&pending->signal, &signal, &retain); 273 } 274 275 void flush_itimer_signals(void) 276 { 277 struct task_struct *tsk = current; 278 unsigned long flags; 279 280 spin_lock_irqsave(&tsk->sighand->siglock, flags); 281 __flush_itimer_signals(&tsk->pending); 282 __flush_itimer_signals(&tsk->signal->shared_pending); 283 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 284 } 285 286 void ignore_signals(struct task_struct *t) 287 { 288 int i; 289 290 for (i = 0; i < _NSIG; ++i) 291 t->sighand->action[i].sa.sa_handler = SIG_IGN; 292 293 flush_signals(t); 294 } 295 296 /* 297 * Flush all handlers for a task. 298 */ 299 300 void 301 flush_signal_handlers(struct task_struct *t, int force_default) 302 { 303 int i; 304 struct k_sigaction *ka = &t->sighand->action[0]; 305 for (i = _NSIG ; i != 0 ; i--) { 306 if (force_default || ka->sa.sa_handler != SIG_IGN) 307 ka->sa.sa_handler = SIG_DFL; 308 ka->sa.sa_flags = 0; 309 sigemptyset(&ka->sa.sa_mask); 310 ka++; 311 } 312 } 313 314 int unhandled_signal(struct task_struct *tsk, int sig) 315 { 316 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; 317 if (is_global_init(tsk)) 318 return 1; 319 if (handler != SIG_IGN && handler != SIG_DFL) 320 return 0; 321 return !tracehook_consider_fatal_signal(tsk, sig, handler); 322 } 323 324 325 /* Notify the system that a driver wants to block all signals for this 326 * process, and wants to be notified if any signals at all were to be 327 * sent/acted upon. If the notifier routine returns non-zero, then the 328 * signal will be acted upon after all. If the notifier routine returns 0, 329 * then then signal will be blocked. Only one block per process is 330 * allowed. priv is a pointer to private data that the notifier routine 331 * can use to determine if the signal should be blocked or not. */ 332 333 void 334 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) 335 { 336 unsigned long flags; 337 338 spin_lock_irqsave(¤t->sighand->siglock, flags); 339 current->notifier_mask = mask; 340 current->notifier_data = priv; 341 current->notifier = notifier; 342 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 343 } 344 345 /* Notify the system that blocking has ended. */ 346 347 void 348 unblock_all_signals(void) 349 { 350 unsigned long flags; 351 352 spin_lock_irqsave(¤t->sighand->siglock, flags); 353 current->notifier = NULL; 354 current->notifier_data = NULL; 355 recalc_sigpending(); 356 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 357 } 358 359 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) 360 { 361 struct sigqueue *q, *first = NULL; 362 363 /* 364 * Collect the siginfo appropriate to this signal. Check if 365 * there is another siginfo for the same signal. 366 */ 367 list_for_each_entry(q, &list->list, list) { 368 if (q->info.si_signo == sig) { 369 if (first) 370 goto still_pending; 371 first = q; 372 } 373 } 374 375 sigdelset(&list->signal, sig); 376 377 if (first) { 378 still_pending: 379 list_del_init(&first->list); 380 copy_siginfo(info, &first->info); 381 __sigqueue_free(first); 382 } else { 383 /* Ok, it wasn't in the queue. This must be 384 a fast-pathed signal or we must have been 385 out of queue space. So zero out the info. 386 */ 387 info->si_signo = sig; 388 info->si_errno = 0; 389 info->si_code = 0; 390 info->si_pid = 0; 391 info->si_uid = 0; 392 } 393 } 394 395 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 396 siginfo_t *info) 397 { 398 int sig = next_signal(pending, mask); 399 400 if (sig) { 401 if (current->notifier) { 402 if (sigismember(current->notifier_mask, sig)) { 403 if (!(current->notifier)(current->notifier_data)) { 404 clear_thread_flag(TIF_SIGPENDING); 405 return 0; 406 } 407 } 408 } 409 410 collect_signal(sig, pending, info); 411 } 412 413 return sig; 414 } 415 416 /* 417 * Dequeue a signal and return the element to the caller, which is 418 * expected to free it. 419 * 420 * All callers have to hold the siglock. 421 */ 422 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 423 { 424 int signr; 425 426 /* We only dequeue private signals from ourselves, we don't let 427 * signalfd steal them 428 */ 429 signr = __dequeue_signal(&tsk->pending, mask, info); 430 if (!signr) { 431 signr = __dequeue_signal(&tsk->signal->shared_pending, 432 mask, info); 433 /* 434 * itimer signal ? 435 * 436 * itimers are process shared and we restart periodic 437 * itimers in the signal delivery path to prevent DoS 438 * attacks in the high resolution timer case. This is 439 * compliant with the old way of self restarting 440 * itimers, as the SIGALRM is a legacy signal and only 441 * queued once. Changing the restart behaviour to 442 * restart the timer in the signal dequeue path is 443 * reducing the timer noise on heavy loaded !highres 444 * systems too. 445 */ 446 if (unlikely(signr == SIGALRM)) { 447 struct hrtimer *tmr = &tsk->signal->real_timer; 448 449 if (!hrtimer_is_queued(tmr) && 450 tsk->signal->it_real_incr.tv64 != 0) { 451 hrtimer_forward(tmr, tmr->base->get_time(), 452 tsk->signal->it_real_incr); 453 hrtimer_restart(tmr); 454 } 455 } 456 } 457 458 recalc_sigpending(); 459 if (!signr) 460 return 0; 461 462 if (unlikely(sig_kernel_stop(signr))) { 463 /* 464 * Set a marker that we have dequeued a stop signal. Our 465 * caller might release the siglock and then the pending 466 * stop signal it is about to process is no longer in the 467 * pending bitmasks, but must still be cleared by a SIGCONT 468 * (and overruled by a SIGKILL). So those cases clear this 469 * shared flag after we've set it. Note that this flag may 470 * remain set after the signal we return is ignored or 471 * handled. That doesn't matter because its only purpose 472 * is to alert stop-signal processing code when another 473 * processor has come along and cleared the flag. 474 */ 475 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; 476 } 477 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { 478 /* 479 * Release the siglock to ensure proper locking order 480 * of timer locks outside of siglocks. Note, we leave 481 * irqs disabled here, since the posix-timers code is 482 * about to disable them again anyway. 483 */ 484 spin_unlock(&tsk->sighand->siglock); 485 do_schedule_next_timer(info); 486 spin_lock(&tsk->sighand->siglock); 487 } 488 return signr; 489 } 490 491 /* 492 * Tell a process that it has a new active signal.. 493 * 494 * NOTE! we rely on the previous spin_lock to 495 * lock interrupts for us! We can only be called with 496 * "siglock" held, and the local interrupt must 497 * have been disabled when that got acquired! 498 * 499 * No need to set need_resched since signal event passing 500 * goes through ->blocked 501 */ 502 void signal_wake_up(struct task_struct *t, int resume) 503 { 504 unsigned int mask; 505 506 set_tsk_thread_flag(t, TIF_SIGPENDING); 507 508 /* 509 * For SIGKILL, we want to wake it up in the stopped/traced/killable 510 * case. We don't check t->state here because there is a race with it 511 * executing another processor and just now entering stopped state. 512 * By using wake_up_state, we ensure the process will wake up and 513 * handle its death signal. 514 */ 515 mask = TASK_INTERRUPTIBLE; 516 if (resume) 517 mask |= TASK_WAKEKILL; 518 if (!wake_up_state(t, mask)) 519 kick_process(t); 520 } 521 522 /* 523 * Remove signals in mask from the pending set and queue. 524 * Returns 1 if any signals were found. 525 * 526 * All callers must be holding the siglock. 527 * 528 * This version takes a sigset mask and looks at all signals, 529 * not just those in the first mask word. 530 */ 531 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) 532 { 533 struct sigqueue *q, *n; 534 sigset_t m; 535 536 sigandsets(&m, mask, &s->signal); 537 if (sigisemptyset(&m)) 538 return 0; 539 540 signandsets(&s->signal, &s->signal, mask); 541 list_for_each_entry_safe(q, n, &s->list, list) { 542 if (sigismember(mask, q->info.si_signo)) { 543 list_del_init(&q->list); 544 __sigqueue_free(q); 545 } 546 } 547 return 1; 548 } 549 /* 550 * Remove signals in mask from the pending set and queue. 551 * Returns 1 if any signals were found. 552 * 553 * All callers must be holding the siglock. 554 */ 555 static int rm_from_queue(unsigned long mask, struct sigpending *s) 556 { 557 struct sigqueue *q, *n; 558 559 if (!sigtestsetmask(&s->signal, mask)) 560 return 0; 561 562 sigdelsetmask(&s->signal, mask); 563 list_for_each_entry_safe(q, n, &s->list, list) { 564 if (q->info.si_signo < SIGRTMIN && 565 (mask & sigmask(q->info.si_signo))) { 566 list_del_init(&q->list); 567 __sigqueue_free(q); 568 } 569 } 570 return 1; 571 } 572 573 /* 574 * Bad permissions for sending the signal 575 * - the caller must hold at least the RCU read lock 576 */ 577 static int check_kill_permission(int sig, struct siginfo *info, 578 struct task_struct *t) 579 { 580 const struct cred *cred = current_cred(), *tcred; 581 struct pid *sid; 582 int error; 583 584 if (!valid_signal(sig)) 585 return -EINVAL; 586 587 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info))) 588 return 0; 589 590 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 591 if (error) 592 return error; 593 594 tcred = __task_cred(t); 595 if ((cred->euid ^ tcred->suid) && 596 (cred->euid ^ tcred->uid) && 597 (cred->uid ^ tcred->suid) && 598 (cred->uid ^ tcred->uid) && 599 !capable(CAP_KILL)) { 600 switch (sig) { 601 case SIGCONT: 602 sid = task_session(t); 603 /* 604 * We don't return the error if sid == NULL. The 605 * task was unhashed, the caller must notice this. 606 */ 607 if (!sid || sid == task_session(current)) 608 break; 609 default: 610 return -EPERM; 611 } 612 } 613 614 return security_task_kill(t, info, sig, 0); 615 } 616 617 /* 618 * Handle magic process-wide effects of stop/continue signals. Unlike 619 * the signal actions, these happen immediately at signal-generation 620 * time regardless of blocking, ignoring, or handling. This does the 621 * actual continuing for SIGCONT, but not the actual stopping for stop 622 * signals. The process stop is done as a signal action for SIG_DFL. 623 * 624 * Returns true if the signal should be actually delivered, otherwise 625 * it should be dropped. 626 */ 627 static int prepare_signal(int sig, struct task_struct *p) 628 { 629 struct signal_struct *signal = p->signal; 630 struct task_struct *t; 631 632 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { 633 /* 634 * The process is in the middle of dying, nothing to do. 635 */ 636 } else if (sig_kernel_stop(sig)) { 637 /* 638 * This is a stop signal. Remove SIGCONT from all queues. 639 */ 640 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); 641 t = p; 642 do { 643 rm_from_queue(sigmask(SIGCONT), &t->pending); 644 } while_each_thread(p, t); 645 } else if (sig == SIGCONT) { 646 unsigned int why; 647 /* 648 * Remove all stop signals from all queues, 649 * and wake all threads. 650 */ 651 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); 652 t = p; 653 do { 654 unsigned int state; 655 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); 656 /* 657 * If there is a handler for SIGCONT, we must make 658 * sure that no thread returns to user mode before 659 * we post the signal, in case it was the only 660 * thread eligible to run the signal handler--then 661 * it must not do anything between resuming and 662 * running the handler. With the TIF_SIGPENDING 663 * flag set, the thread will pause and acquire the 664 * siglock that we hold now and until we've queued 665 * the pending signal. 666 * 667 * Wake up the stopped thread _after_ setting 668 * TIF_SIGPENDING 669 */ 670 state = __TASK_STOPPED; 671 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { 672 set_tsk_thread_flag(t, TIF_SIGPENDING); 673 state |= TASK_INTERRUPTIBLE; 674 } 675 wake_up_state(t, state); 676 } while_each_thread(p, t); 677 678 /* 679 * Notify the parent with CLD_CONTINUED if we were stopped. 680 * 681 * If we were in the middle of a group stop, we pretend it 682 * was already finished, and then continued. Since SIGCHLD 683 * doesn't queue we report only CLD_STOPPED, as if the next 684 * CLD_CONTINUED was dropped. 685 */ 686 why = 0; 687 if (signal->flags & SIGNAL_STOP_STOPPED) 688 why |= SIGNAL_CLD_CONTINUED; 689 else if (signal->group_stop_count) 690 why |= SIGNAL_CLD_STOPPED; 691 692 if (why) { 693 /* 694 * The first thread which returns from finish_stop() 695 * will take ->siglock, notice SIGNAL_CLD_MASK, and 696 * notify its parent. See get_signal_to_deliver(). 697 */ 698 signal->flags = why | SIGNAL_STOP_CONTINUED; 699 signal->group_stop_count = 0; 700 signal->group_exit_code = 0; 701 } else { 702 /* 703 * We are not stopped, but there could be a stop 704 * signal in the middle of being processed after 705 * being removed from the queue. Clear that too. 706 */ 707 signal->flags &= ~SIGNAL_STOP_DEQUEUED; 708 } 709 } 710 711 return !sig_ignored(p, sig); 712 } 713 714 /* 715 * Test if P wants to take SIG. After we've checked all threads with this, 716 * it's equivalent to finding no threads not blocking SIG. Any threads not 717 * blocking SIG were ruled out because they are not running and already 718 * have pending signals. Such threads will dequeue from the shared queue 719 * as soon as they're available, so putting the signal on the shared queue 720 * will be equivalent to sending it to one such thread. 721 */ 722 static inline int wants_signal(int sig, struct task_struct *p) 723 { 724 if (sigismember(&p->blocked, sig)) 725 return 0; 726 if (p->flags & PF_EXITING) 727 return 0; 728 if (sig == SIGKILL) 729 return 1; 730 if (task_is_stopped_or_traced(p)) 731 return 0; 732 return task_curr(p) || !signal_pending(p); 733 } 734 735 static void complete_signal(int sig, struct task_struct *p, int group) 736 { 737 struct signal_struct *signal = p->signal; 738 struct task_struct *t; 739 740 /* 741 * Now find a thread we can wake up to take the signal off the queue. 742 * 743 * If the main thread wants the signal, it gets first crack. 744 * Probably the least surprising to the average bear. 745 */ 746 if (wants_signal(sig, p)) 747 t = p; 748 else if (!group || thread_group_empty(p)) 749 /* 750 * There is just one thread and it does not need to be woken. 751 * It will dequeue unblocked signals before it runs again. 752 */ 753 return; 754 else { 755 /* 756 * Otherwise try to find a suitable thread. 757 */ 758 t = signal->curr_target; 759 while (!wants_signal(sig, t)) { 760 t = next_thread(t); 761 if (t == signal->curr_target) 762 /* 763 * No thread needs to be woken. 764 * Any eligible threads will see 765 * the signal in the queue soon. 766 */ 767 return; 768 } 769 signal->curr_target = t; 770 } 771 772 /* 773 * Found a killable thread. If the signal will be fatal, 774 * then start taking the whole group down immediately. 775 */ 776 if (sig_fatal(p, sig) && 777 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && 778 !sigismember(&t->real_blocked, sig) && 779 (sig == SIGKILL || 780 !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) { 781 /* 782 * This signal will be fatal to the whole group. 783 */ 784 if (!sig_kernel_coredump(sig)) { 785 /* 786 * Start a group exit and wake everybody up. 787 * This way we don't have other threads 788 * running and doing things after a slower 789 * thread has the fatal signal pending. 790 */ 791 signal->flags = SIGNAL_GROUP_EXIT; 792 signal->group_exit_code = sig; 793 signal->group_stop_count = 0; 794 t = p; 795 do { 796 sigaddset(&t->pending.signal, SIGKILL); 797 signal_wake_up(t, 1); 798 } while_each_thread(p, t); 799 return; 800 } 801 } 802 803 /* 804 * The signal is already in the shared-pending queue. 805 * Tell the chosen thread to wake up and dequeue it. 806 */ 807 signal_wake_up(t, sig == SIGKILL); 808 return; 809 } 810 811 static inline int legacy_queue(struct sigpending *signals, int sig) 812 { 813 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); 814 } 815 816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t, 817 int group) 818 { 819 struct sigpending *pending; 820 struct sigqueue *q; 821 822 trace_sched_signal_send(sig, t); 823 824 assert_spin_locked(&t->sighand->siglock); 825 if (!prepare_signal(sig, t)) 826 return 0; 827 828 pending = group ? &t->signal->shared_pending : &t->pending; 829 /* 830 * Short-circuit ignored signals and support queuing 831 * exactly one non-rt signal, so that we can get more 832 * detailed information about the cause of the signal. 833 */ 834 if (legacy_queue(pending, sig)) 835 return 0; 836 /* 837 * fast-pathed signals for kernel-internal things like SIGSTOP 838 * or SIGKILL. 839 */ 840 if (info == SEND_SIG_FORCED) 841 goto out_set; 842 843 /* Real-time signals must be queued if sent by sigqueue, or 844 some other real-time mechanism. It is implementation 845 defined whether kill() does so. We attempt to do so, on 846 the principle of least surprise, but since kill is not 847 allowed to fail with EAGAIN when low on memory we just 848 make sure at least one signal gets delivered and don't 849 pass on the info struct. */ 850 851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && 852 (is_si_special(info) || 853 info->si_code >= 0))); 854 if (q) { 855 list_add_tail(&q->list, &pending->list); 856 switch ((unsigned long) info) { 857 case (unsigned long) SEND_SIG_NOINFO: 858 q->info.si_signo = sig; 859 q->info.si_errno = 0; 860 q->info.si_code = SI_USER; 861 q->info.si_pid = task_tgid_nr_ns(current, 862 task_active_pid_ns(t)); 863 q->info.si_uid = current_uid(); 864 break; 865 case (unsigned long) SEND_SIG_PRIV: 866 q->info.si_signo = sig; 867 q->info.si_errno = 0; 868 q->info.si_code = SI_KERNEL; 869 q->info.si_pid = 0; 870 q->info.si_uid = 0; 871 break; 872 default: 873 copy_siginfo(&q->info, info); 874 break; 875 } 876 } else if (!is_si_special(info)) { 877 if (sig >= SIGRTMIN && info->si_code != SI_USER) 878 /* 879 * Queue overflow, abort. We may abort if the signal was rt 880 * and sent by user using something other than kill(). 881 */ 882 return -EAGAIN; 883 } 884 885 out_set: 886 signalfd_notify(t, sig); 887 sigaddset(&pending->signal, sig); 888 complete_signal(sig, t, group); 889 return 0; 890 } 891 892 int print_fatal_signals; 893 894 static void print_fatal_signal(struct pt_regs *regs, int signr) 895 { 896 printk("%s/%d: potentially unexpected fatal signal %d.\n", 897 current->comm, task_pid_nr(current), signr); 898 899 #if defined(__i386__) && !defined(__arch_um__) 900 printk("code at %08lx: ", regs->ip); 901 { 902 int i; 903 for (i = 0; i < 16; i++) { 904 unsigned char insn; 905 906 __get_user(insn, (unsigned char *)(regs->ip + i)); 907 printk("%02x ", insn); 908 } 909 } 910 #endif 911 printk("\n"); 912 preempt_disable(); 913 show_regs(regs); 914 preempt_enable(); 915 } 916 917 static int __init setup_print_fatal_signals(char *str) 918 { 919 get_option (&str, &print_fatal_signals); 920 921 return 1; 922 } 923 924 __setup("print-fatal-signals=", setup_print_fatal_signals); 925 926 int 927 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 928 { 929 return send_signal(sig, info, p, 1); 930 } 931 932 static int 933 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) 934 { 935 return send_signal(sig, info, t, 0); 936 } 937 938 /* 939 * Force a signal that the process can't ignore: if necessary 940 * we unblock the signal and change any SIG_IGN to SIG_DFL. 941 * 942 * Note: If we unblock the signal, we always reset it to SIG_DFL, 943 * since we do not want to have a signal handler that was blocked 944 * be invoked when user space had explicitly blocked it. 945 * 946 * We don't want to have recursive SIGSEGV's etc, for example, 947 * that is why we also clear SIGNAL_UNKILLABLE. 948 */ 949 int 950 force_sig_info(int sig, struct siginfo *info, struct task_struct *t) 951 { 952 unsigned long int flags; 953 int ret, blocked, ignored; 954 struct k_sigaction *action; 955 956 spin_lock_irqsave(&t->sighand->siglock, flags); 957 action = &t->sighand->action[sig-1]; 958 ignored = action->sa.sa_handler == SIG_IGN; 959 blocked = sigismember(&t->blocked, sig); 960 if (blocked || ignored) { 961 action->sa.sa_handler = SIG_DFL; 962 if (blocked) { 963 sigdelset(&t->blocked, sig); 964 recalc_sigpending_and_wake(t); 965 } 966 } 967 if (action->sa.sa_handler == SIG_DFL) 968 t->signal->flags &= ~SIGNAL_UNKILLABLE; 969 ret = specific_send_sig_info(sig, info, t); 970 spin_unlock_irqrestore(&t->sighand->siglock, flags); 971 972 return ret; 973 } 974 975 void 976 force_sig_specific(int sig, struct task_struct *t) 977 { 978 force_sig_info(sig, SEND_SIG_FORCED, t); 979 } 980 981 /* 982 * Nuke all other threads in the group. 983 */ 984 void zap_other_threads(struct task_struct *p) 985 { 986 struct task_struct *t; 987 988 p->signal->group_stop_count = 0; 989 990 for (t = next_thread(p); t != p; t = next_thread(t)) { 991 /* 992 * Don't bother with already dead threads 993 */ 994 if (t->exit_state) 995 continue; 996 997 /* SIGKILL will be handled before any pending SIGSTOP */ 998 sigaddset(&t->pending.signal, SIGKILL); 999 signal_wake_up(t, 1); 1000 } 1001 } 1002 1003 int __fatal_signal_pending(struct task_struct *tsk) 1004 { 1005 return sigismember(&tsk->pending.signal, SIGKILL); 1006 } 1007 EXPORT_SYMBOL(__fatal_signal_pending); 1008 1009 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) 1010 { 1011 struct sighand_struct *sighand; 1012 1013 rcu_read_lock(); 1014 for (;;) { 1015 sighand = rcu_dereference(tsk->sighand); 1016 if (unlikely(sighand == NULL)) 1017 break; 1018 1019 spin_lock_irqsave(&sighand->siglock, *flags); 1020 if (likely(sighand == tsk->sighand)) 1021 break; 1022 spin_unlock_irqrestore(&sighand->siglock, *flags); 1023 } 1024 rcu_read_unlock(); 1025 1026 return sighand; 1027 } 1028 1029 /* 1030 * send signal info to all the members of a group 1031 * - the caller must hold the RCU read lock at least 1032 */ 1033 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1034 { 1035 unsigned long flags; 1036 int ret; 1037 1038 ret = check_kill_permission(sig, info, p); 1039 1040 if (!ret && sig) { 1041 ret = -ESRCH; 1042 if (lock_task_sighand(p, &flags)) { 1043 ret = __group_send_sig_info(sig, info, p); 1044 unlock_task_sighand(p, &flags); 1045 } 1046 } 1047 1048 return ret; 1049 } 1050 1051 /* 1052 * __kill_pgrp_info() sends a signal to a process group: this is what the tty 1053 * control characters do (^C, ^Z etc) 1054 * - the caller must hold at least a readlock on tasklist_lock 1055 */ 1056 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) 1057 { 1058 struct task_struct *p = NULL; 1059 int retval, success; 1060 1061 success = 0; 1062 retval = -ESRCH; 1063 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 1064 int err = group_send_sig_info(sig, info, p); 1065 success |= !err; 1066 retval = err; 1067 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 1068 return success ? 0 : retval; 1069 } 1070 1071 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) 1072 { 1073 int error = -ESRCH; 1074 struct task_struct *p; 1075 1076 rcu_read_lock(); 1077 retry: 1078 p = pid_task(pid, PIDTYPE_PID); 1079 if (p) { 1080 error = group_send_sig_info(sig, info, p); 1081 if (unlikely(error == -ESRCH)) 1082 /* 1083 * The task was unhashed in between, try again. 1084 * If it is dead, pid_task() will return NULL, 1085 * if we race with de_thread() it will find the 1086 * new leader. 1087 */ 1088 goto retry; 1089 } 1090 rcu_read_unlock(); 1091 1092 return error; 1093 } 1094 1095 int 1096 kill_proc_info(int sig, struct siginfo *info, pid_t pid) 1097 { 1098 int error; 1099 rcu_read_lock(); 1100 error = kill_pid_info(sig, info, find_vpid(pid)); 1101 rcu_read_unlock(); 1102 return error; 1103 } 1104 1105 /* like kill_pid_info(), but doesn't use uid/euid of "current" */ 1106 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, 1107 uid_t uid, uid_t euid, u32 secid) 1108 { 1109 int ret = -EINVAL; 1110 struct task_struct *p; 1111 const struct cred *pcred; 1112 1113 if (!valid_signal(sig)) 1114 return ret; 1115 1116 read_lock(&tasklist_lock); 1117 p = pid_task(pid, PIDTYPE_PID); 1118 if (!p) { 1119 ret = -ESRCH; 1120 goto out_unlock; 1121 } 1122 pcred = __task_cred(p); 1123 if ((info == SEND_SIG_NOINFO || 1124 (!is_si_special(info) && SI_FROMUSER(info))) && 1125 euid != pcred->suid && euid != pcred->uid && 1126 uid != pcred->suid && uid != pcred->uid) { 1127 ret = -EPERM; 1128 goto out_unlock; 1129 } 1130 ret = security_task_kill(p, info, sig, secid); 1131 if (ret) 1132 goto out_unlock; 1133 if (sig && p->sighand) { 1134 unsigned long flags; 1135 spin_lock_irqsave(&p->sighand->siglock, flags); 1136 ret = __group_send_sig_info(sig, info, p); 1137 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1138 } 1139 out_unlock: 1140 read_unlock(&tasklist_lock); 1141 return ret; 1142 } 1143 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); 1144 1145 /* 1146 * kill_something_info() interprets pid in interesting ways just like kill(2). 1147 * 1148 * POSIX specifies that kill(-1,sig) is unspecified, but what we have 1149 * is probably wrong. Should make it like BSD or SYSV. 1150 */ 1151 1152 static int kill_something_info(int sig, struct siginfo *info, pid_t pid) 1153 { 1154 int ret; 1155 1156 if (pid > 0) { 1157 rcu_read_lock(); 1158 ret = kill_pid_info(sig, info, find_vpid(pid)); 1159 rcu_read_unlock(); 1160 return ret; 1161 } 1162 1163 read_lock(&tasklist_lock); 1164 if (pid != -1) { 1165 ret = __kill_pgrp_info(sig, info, 1166 pid ? find_vpid(-pid) : task_pgrp(current)); 1167 } else { 1168 int retval = 0, count = 0; 1169 struct task_struct * p; 1170 1171 for_each_process(p) { 1172 if (task_pid_vnr(p) > 1 && 1173 !same_thread_group(p, current)) { 1174 int err = group_send_sig_info(sig, info, p); 1175 ++count; 1176 if (err != -EPERM) 1177 retval = err; 1178 } 1179 } 1180 ret = count ? retval : -ESRCH; 1181 } 1182 read_unlock(&tasklist_lock); 1183 1184 return ret; 1185 } 1186 1187 /* 1188 * These are for backward compatibility with the rest of the kernel source. 1189 */ 1190 1191 /* 1192 * The caller must ensure the task can't exit. 1193 */ 1194 int 1195 send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1196 { 1197 int ret; 1198 unsigned long flags; 1199 1200 /* 1201 * Make sure legacy kernel users don't send in bad values 1202 * (normal paths check this in check_kill_permission). 1203 */ 1204 if (!valid_signal(sig)) 1205 return -EINVAL; 1206 1207 spin_lock_irqsave(&p->sighand->siglock, flags); 1208 ret = specific_send_sig_info(sig, info, p); 1209 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1210 return ret; 1211 } 1212 1213 #define __si_special(priv) \ 1214 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) 1215 1216 int 1217 send_sig(int sig, struct task_struct *p, int priv) 1218 { 1219 return send_sig_info(sig, __si_special(priv), p); 1220 } 1221 1222 void 1223 force_sig(int sig, struct task_struct *p) 1224 { 1225 force_sig_info(sig, SEND_SIG_PRIV, p); 1226 } 1227 1228 /* 1229 * When things go south during signal handling, we 1230 * will force a SIGSEGV. And if the signal that caused 1231 * the problem was already a SIGSEGV, we'll want to 1232 * make sure we don't even try to deliver the signal.. 1233 */ 1234 int 1235 force_sigsegv(int sig, struct task_struct *p) 1236 { 1237 if (sig == SIGSEGV) { 1238 unsigned long flags; 1239 spin_lock_irqsave(&p->sighand->siglock, flags); 1240 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; 1241 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1242 } 1243 force_sig(SIGSEGV, p); 1244 return 0; 1245 } 1246 1247 int kill_pgrp(struct pid *pid, int sig, int priv) 1248 { 1249 int ret; 1250 1251 read_lock(&tasklist_lock); 1252 ret = __kill_pgrp_info(sig, __si_special(priv), pid); 1253 read_unlock(&tasklist_lock); 1254 1255 return ret; 1256 } 1257 EXPORT_SYMBOL(kill_pgrp); 1258 1259 int kill_pid(struct pid *pid, int sig, int priv) 1260 { 1261 return kill_pid_info(sig, __si_special(priv), pid); 1262 } 1263 EXPORT_SYMBOL(kill_pid); 1264 1265 /* 1266 * These functions support sending signals using preallocated sigqueue 1267 * structures. This is needed "because realtime applications cannot 1268 * afford to lose notifications of asynchronous events, like timer 1269 * expirations or I/O completions". In the case of Posix Timers 1270 * we allocate the sigqueue structure from the timer_create. If this 1271 * allocation fails we are able to report the failure to the application 1272 * with an EAGAIN error. 1273 */ 1274 1275 struct sigqueue *sigqueue_alloc(void) 1276 { 1277 struct sigqueue *q; 1278 1279 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) 1280 q->flags |= SIGQUEUE_PREALLOC; 1281 return(q); 1282 } 1283 1284 void sigqueue_free(struct sigqueue *q) 1285 { 1286 unsigned long flags; 1287 spinlock_t *lock = ¤t->sighand->siglock; 1288 1289 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1290 /* 1291 * We must hold ->siglock while testing q->list 1292 * to serialize with collect_signal() or with 1293 * __exit_signal()->flush_sigqueue(). 1294 */ 1295 spin_lock_irqsave(lock, flags); 1296 q->flags &= ~SIGQUEUE_PREALLOC; 1297 /* 1298 * If it is queued it will be freed when dequeued, 1299 * like the "regular" sigqueue. 1300 */ 1301 if (!list_empty(&q->list)) 1302 q = NULL; 1303 spin_unlock_irqrestore(lock, flags); 1304 1305 if (q) 1306 __sigqueue_free(q); 1307 } 1308 1309 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) 1310 { 1311 int sig = q->info.si_signo; 1312 struct sigpending *pending; 1313 unsigned long flags; 1314 int ret; 1315 1316 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1317 1318 ret = -1; 1319 if (!likely(lock_task_sighand(t, &flags))) 1320 goto ret; 1321 1322 ret = 1; /* the signal is ignored */ 1323 if (!prepare_signal(sig, t)) 1324 goto out; 1325 1326 ret = 0; 1327 if (unlikely(!list_empty(&q->list))) { 1328 /* 1329 * If an SI_TIMER entry is already queue just increment 1330 * the overrun count. 1331 */ 1332 BUG_ON(q->info.si_code != SI_TIMER); 1333 q->info.si_overrun++; 1334 goto out; 1335 } 1336 q->info.si_overrun = 0; 1337 1338 signalfd_notify(t, sig); 1339 pending = group ? &t->signal->shared_pending : &t->pending; 1340 list_add_tail(&q->list, &pending->list); 1341 sigaddset(&pending->signal, sig); 1342 complete_signal(sig, t, group); 1343 out: 1344 unlock_task_sighand(t, &flags); 1345 ret: 1346 return ret; 1347 } 1348 1349 /* 1350 * Wake up any threads in the parent blocked in wait* syscalls. 1351 */ 1352 static inline void __wake_up_parent(struct task_struct *p, 1353 struct task_struct *parent) 1354 { 1355 wake_up_interruptible_sync(&parent->signal->wait_chldexit); 1356 } 1357 1358 /* 1359 * Let a parent know about the death of a child. 1360 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1361 * 1362 * Returns -1 if our parent ignored us and so we've switched to 1363 * self-reaping, or else @sig. 1364 */ 1365 int do_notify_parent(struct task_struct *tsk, int sig) 1366 { 1367 struct siginfo info; 1368 unsigned long flags; 1369 struct sighand_struct *psig; 1370 int ret = sig; 1371 1372 BUG_ON(sig == -1); 1373 1374 /* do_notify_parent_cldstop should have been called instead. */ 1375 BUG_ON(task_is_stopped_or_traced(tsk)); 1376 1377 BUG_ON(!tsk->ptrace && 1378 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 1379 1380 info.si_signo = sig; 1381 info.si_errno = 0; 1382 /* 1383 * we are under tasklist_lock here so our parent is tied to 1384 * us and cannot exit and release its namespace. 1385 * 1386 * the only it can is to switch its nsproxy with sys_unshare, 1387 * bu uncharing pid namespaces is not allowed, so we'll always 1388 * see relevant namespace 1389 * 1390 * write_lock() currently calls preempt_disable() which is the 1391 * same as rcu_read_lock(), but according to Oleg, this is not 1392 * correct to rely on this 1393 */ 1394 rcu_read_lock(); 1395 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); 1396 info.si_uid = __task_cred(tsk)->uid; 1397 rcu_read_unlock(); 1398 1399 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, 1400 tsk->signal->utime)); 1401 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, 1402 tsk->signal->stime)); 1403 1404 info.si_status = tsk->exit_code & 0x7f; 1405 if (tsk->exit_code & 0x80) 1406 info.si_code = CLD_DUMPED; 1407 else if (tsk->exit_code & 0x7f) 1408 info.si_code = CLD_KILLED; 1409 else { 1410 info.si_code = CLD_EXITED; 1411 info.si_status = tsk->exit_code >> 8; 1412 } 1413 1414 psig = tsk->parent->sighand; 1415 spin_lock_irqsave(&psig->siglock, flags); 1416 if (!tsk->ptrace && sig == SIGCHLD && 1417 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || 1418 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { 1419 /* 1420 * We are exiting and our parent doesn't care. POSIX.1 1421 * defines special semantics for setting SIGCHLD to SIG_IGN 1422 * or setting the SA_NOCLDWAIT flag: we should be reaped 1423 * automatically and not left for our parent's wait4 call. 1424 * Rather than having the parent do it as a magic kind of 1425 * signal handler, we just set this to tell do_exit that we 1426 * can be cleaned up without becoming a zombie. Note that 1427 * we still call __wake_up_parent in this case, because a 1428 * blocked sys_wait4 might now return -ECHILD. 1429 * 1430 * Whether we send SIGCHLD or not for SA_NOCLDWAIT 1431 * is implementation-defined: we do (if you don't want 1432 * it, just use SIG_IGN instead). 1433 */ 1434 ret = tsk->exit_signal = -1; 1435 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1436 sig = -1; 1437 } 1438 if (valid_signal(sig) && sig > 0) 1439 __group_send_sig_info(sig, &info, tsk->parent); 1440 __wake_up_parent(tsk, tsk->parent); 1441 spin_unlock_irqrestore(&psig->siglock, flags); 1442 1443 return ret; 1444 } 1445 1446 static void do_notify_parent_cldstop(struct task_struct *tsk, int why) 1447 { 1448 struct siginfo info; 1449 unsigned long flags; 1450 struct task_struct *parent; 1451 struct sighand_struct *sighand; 1452 1453 if (tsk->ptrace & PT_PTRACED) 1454 parent = tsk->parent; 1455 else { 1456 tsk = tsk->group_leader; 1457 parent = tsk->real_parent; 1458 } 1459 1460 info.si_signo = SIGCHLD; 1461 info.si_errno = 0; 1462 /* 1463 * see comment in do_notify_parent() abot the following 3 lines 1464 */ 1465 rcu_read_lock(); 1466 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); 1467 info.si_uid = __task_cred(tsk)->uid; 1468 rcu_read_unlock(); 1469 1470 info.si_utime = cputime_to_clock_t(tsk->utime); 1471 info.si_stime = cputime_to_clock_t(tsk->stime); 1472 1473 info.si_code = why; 1474 switch (why) { 1475 case CLD_CONTINUED: 1476 info.si_status = SIGCONT; 1477 break; 1478 case CLD_STOPPED: 1479 info.si_status = tsk->signal->group_exit_code & 0x7f; 1480 break; 1481 case CLD_TRAPPED: 1482 info.si_status = tsk->exit_code & 0x7f; 1483 break; 1484 default: 1485 BUG(); 1486 } 1487 1488 sighand = parent->sighand; 1489 spin_lock_irqsave(&sighand->siglock, flags); 1490 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && 1491 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) 1492 __group_send_sig_info(SIGCHLD, &info, parent); 1493 /* 1494 * Even if SIGCHLD is not generated, we must wake up wait4 calls. 1495 */ 1496 __wake_up_parent(tsk, parent); 1497 spin_unlock_irqrestore(&sighand->siglock, flags); 1498 } 1499 1500 static inline int may_ptrace_stop(void) 1501 { 1502 if (!likely(current->ptrace & PT_PTRACED)) 1503 return 0; 1504 /* 1505 * Are we in the middle of do_coredump? 1506 * If so and our tracer is also part of the coredump stopping 1507 * is a deadlock situation, and pointless because our tracer 1508 * is dead so don't allow us to stop. 1509 * If SIGKILL was already sent before the caller unlocked 1510 * ->siglock we must see ->core_state != NULL. Otherwise it 1511 * is safe to enter schedule(). 1512 */ 1513 if (unlikely(current->mm->core_state) && 1514 unlikely(current->mm == current->parent->mm)) 1515 return 0; 1516 1517 return 1; 1518 } 1519 1520 /* 1521 * Return nonzero if there is a SIGKILL that should be waking us up. 1522 * Called with the siglock held. 1523 */ 1524 static int sigkill_pending(struct task_struct *tsk) 1525 { 1526 return sigismember(&tsk->pending.signal, SIGKILL) || 1527 sigismember(&tsk->signal->shared_pending.signal, SIGKILL); 1528 } 1529 1530 /* 1531 * This must be called with current->sighand->siglock held. 1532 * 1533 * This should be the path for all ptrace stops. 1534 * We always set current->last_siginfo while stopped here. 1535 * That makes it a way to test a stopped process for 1536 * being ptrace-stopped vs being job-control-stopped. 1537 * 1538 * If we actually decide not to stop at all because the tracer 1539 * is gone, we keep current->exit_code unless clear_code. 1540 */ 1541 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) 1542 { 1543 if (arch_ptrace_stop_needed(exit_code, info)) { 1544 /* 1545 * The arch code has something special to do before a 1546 * ptrace stop. This is allowed to block, e.g. for faults 1547 * on user stack pages. We can't keep the siglock while 1548 * calling arch_ptrace_stop, so we must release it now. 1549 * To preserve proper semantics, we must do this before 1550 * any signal bookkeeping like checking group_stop_count. 1551 * Meanwhile, a SIGKILL could come in before we retake the 1552 * siglock. That must prevent us from sleeping in TASK_TRACED. 1553 * So after regaining the lock, we must check for SIGKILL. 1554 */ 1555 spin_unlock_irq(¤t->sighand->siglock); 1556 arch_ptrace_stop(exit_code, info); 1557 spin_lock_irq(¤t->sighand->siglock); 1558 if (sigkill_pending(current)) 1559 return; 1560 } 1561 1562 /* 1563 * If there is a group stop in progress, 1564 * we must participate in the bookkeeping. 1565 */ 1566 if (current->signal->group_stop_count > 0) 1567 --current->signal->group_stop_count; 1568 1569 current->last_siginfo = info; 1570 current->exit_code = exit_code; 1571 1572 /* Let the debugger run. */ 1573 __set_current_state(TASK_TRACED); 1574 spin_unlock_irq(¤t->sighand->siglock); 1575 read_lock(&tasklist_lock); 1576 if (may_ptrace_stop()) { 1577 do_notify_parent_cldstop(current, CLD_TRAPPED); 1578 /* 1579 * Don't want to allow preemption here, because 1580 * sys_ptrace() needs this task to be inactive. 1581 * 1582 * XXX: implement read_unlock_no_resched(). 1583 */ 1584 preempt_disable(); 1585 read_unlock(&tasklist_lock); 1586 preempt_enable_no_resched(); 1587 schedule(); 1588 } else { 1589 /* 1590 * By the time we got the lock, our tracer went away. 1591 * Don't drop the lock yet, another tracer may come. 1592 */ 1593 __set_current_state(TASK_RUNNING); 1594 if (clear_code) 1595 current->exit_code = 0; 1596 read_unlock(&tasklist_lock); 1597 } 1598 1599 /* 1600 * While in TASK_TRACED, we were considered "frozen enough". 1601 * Now that we woke up, it's crucial if we're supposed to be 1602 * frozen that we freeze now before running anything substantial. 1603 */ 1604 try_to_freeze(); 1605 1606 /* 1607 * We are back. Now reacquire the siglock before touching 1608 * last_siginfo, so that we are sure to have synchronized with 1609 * any signal-sending on another CPU that wants to examine it. 1610 */ 1611 spin_lock_irq(¤t->sighand->siglock); 1612 current->last_siginfo = NULL; 1613 1614 /* 1615 * Queued signals ignored us while we were stopped for tracing. 1616 * So check for any that we should take before resuming user mode. 1617 * This sets TIF_SIGPENDING, but never clears it. 1618 */ 1619 recalc_sigpending_tsk(current); 1620 } 1621 1622 void ptrace_notify(int exit_code) 1623 { 1624 siginfo_t info; 1625 1626 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); 1627 1628 memset(&info, 0, sizeof info); 1629 info.si_signo = SIGTRAP; 1630 info.si_code = exit_code; 1631 info.si_pid = task_pid_vnr(current); 1632 info.si_uid = current_uid(); 1633 1634 /* Let the debugger run. */ 1635 spin_lock_irq(¤t->sighand->siglock); 1636 ptrace_stop(exit_code, 1, &info); 1637 spin_unlock_irq(¤t->sighand->siglock); 1638 } 1639 1640 static void 1641 finish_stop(int stop_count) 1642 { 1643 /* 1644 * If there are no other threads in the group, or if there is 1645 * a group stop in progress and we are the last to stop, 1646 * report to the parent. When ptraced, every thread reports itself. 1647 */ 1648 if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) { 1649 read_lock(&tasklist_lock); 1650 do_notify_parent_cldstop(current, CLD_STOPPED); 1651 read_unlock(&tasklist_lock); 1652 } 1653 1654 do { 1655 schedule(); 1656 } while (try_to_freeze()); 1657 /* 1658 * Now we don't run again until continued. 1659 */ 1660 current->exit_code = 0; 1661 } 1662 1663 /* 1664 * This performs the stopping for SIGSTOP and other stop signals. 1665 * We have to stop all threads in the thread group. 1666 * Returns nonzero if we've actually stopped and released the siglock. 1667 * Returns zero if we didn't stop and still hold the siglock. 1668 */ 1669 static int do_signal_stop(int signr) 1670 { 1671 struct signal_struct *sig = current->signal; 1672 int stop_count; 1673 1674 if (sig->group_stop_count > 0) { 1675 /* 1676 * There is a group stop in progress. We don't need to 1677 * start another one. 1678 */ 1679 stop_count = --sig->group_stop_count; 1680 } else { 1681 struct task_struct *t; 1682 1683 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || 1684 unlikely(signal_group_exit(sig))) 1685 return 0; 1686 /* 1687 * There is no group stop already in progress. 1688 * We must initiate one now. 1689 */ 1690 sig->group_exit_code = signr; 1691 1692 stop_count = 0; 1693 for (t = next_thread(current); t != current; t = next_thread(t)) 1694 /* 1695 * Setting state to TASK_STOPPED for a group 1696 * stop is always done with the siglock held, 1697 * so this check has no races. 1698 */ 1699 if (!(t->flags & PF_EXITING) && 1700 !task_is_stopped_or_traced(t)) { 1701 stop_count++; 1702 signal_wake_up(t, 0); 1703 } 1704 sig->group_stop_count = stop_count; 1705 } 1706 1707 if (stop_count == 0) 1708 sig->flags = SIGNAL_STOP_STOPPED; 1709 current->exit_code = sig->group_exit_code; 1710 __set_current_state(TASK_STOPPED); 1711 1712 spin_unlock_irq(¤t->sighand->siglock); 1713 finish_stop(stop_count); 1714 return 1; 1715 } 1716 1717 static int ptrace_signal(int signr, siginfo_t *info, 1718 struct pt_regs *regs, void *cookie) 1719 { 1720 if (!(current->ptrace & PT_PTRACED)) 1721 return signr; 1722 1723 ptrace_signal_deliver(regs, cookie); 1724 1725 /* Let the debugger run. */ 1726 ptrace_stop(signr, 0, info); 1727 1728 /* We're back. Did the debugger cancel the sig? */ 1729 signr = current->exit_code; 1730 if (signr == 0) 1731 return signr; 1732 1733 current->exit_code = 0; 1734 1735 /* Update the siginfo structure if the signal has 1736 changed. If the debugger wanted something 1737 specific in the siginfo structure then it should 1738 have updated *info via PTRACE_SETSIGINFO. */ 1739 if (signr != info->si_signo) { 1740 info->si_signo = signr; 1741 info->si_errno = 0; 1742 info->si_code = SI_USER; 1743 info->si_pid = task_pid_vnr(current->parent); 1744 info->si_uid = task_uid(current->parent); 1745 } 1746 1747 /* If the (new) signal is now blocked, requeue it. */ 1748 if (sigismember(¤t->blocked, signr)) { 1749 specific_send_sig_info(signr, info, current); 1750 signr = 0; 1751 } 1752 1753 return signr; 1754 } 1755 1756 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, 1757 struct pt_regs *regs, void *cookie) 1758 { 1759 struct sighand_struct *sighand = current->sighand; 1760 struct signal_struct *signal = current->signal; 1761 int signr; 1762 1763 relock: 1764 /* 1765 * We'll jump back here after any time we were stopped in TASK_STOPPED. 1766 * While in TASK_STOPPED, we were considered "frozen enough". 1767 * Now that we woke up, it's crucial if we're supposed to be 1768 * frozen that we freeze now before running anything substantial. 1769 */ 1770 try_to_freeze(); 1771 1772 spin_lock_irq(&sighand->siglock); 1773 /* 1774 * Every stopped thread goes here after wakeup. Check to see if 1775 * we should notify the parent, prepare_signal(SIGCONT) encodes 1776 * the CLD_ si_code into SIGNAL_CLD_MASK bits. 1777 */ 1778 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { 1779 int why = (signal->flags & SIGNAL_STOP_CONTINUED) 1780 ? CLD_CONTINUED : CLD_STOPPED; 1781 signal->flags &= ~SIGNAL_CLD_MASK; 1782 spin_unlock_irq(&sighand->siglock); 1783 1784 if (unlikely(!tracehook_notify_jctl(1, why))) 1785 goto relock; 1786 1787 read_lock(&tasklist_lock); 1788 do_notify_parent_cldstop(current->group_leader, why); 1789 read_unlock(&tasklist_lock); 1790 goto relock; 1791 } 1792 1793 for (;;) { 1794 struct k_sigaction *ka; 1795 1796 if (unlikely(signal->group_stop_count > 0) && 1797 do_signal_stop(0)) 1798 goto relock; 1799 1800 /* 1801 * Tracing can induce an artifical signal and choose sigaction. 1802 * The return value in @signr determines the default action, 1803 * but @info->si_signo is the signal number we will report. 1804 */ 1805 signr = tracehook_get_signal(current, regs, info, return_ka); 1806 if (unlikely(signr < 0)) 1807 goto relock; 1808 if (unlikely(signr != 0)) 1809 ka = return_ka; 1810 else { 1811 signr = dequeue_signal(current, ¤t->blocked, 1812 info); 1813 1814 if (!signr) 1815 break; /* will return 0 */ 1816 1817 if (signr != SIGKILL) { 1818 signr = ptrace_signal(signr, info, 1819 regs, cookie); 1820 if (!signr) 1821 continue; 1822 } 1823 1824 ka = &sighand->action[signr-1]; 1825 } 1826 1827 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 1828 continue; 1829 if (ka->sa.sa_handler != SIG_DFL) { 1830 /* Run the handler. */ 1831 *return_ka = *ka; 1832 1833 if (ka->sa.sa_flags & SA_ONESHOT) 1834 ka->sa.sa_handler = SIG_DFL; 1835 1836 break; /* will return non-zero "signr" value */ 1837 } 1838 1839 /* 1840 * Now we are doing the default action for this signal. 1841 */ 1842 if (sig_kernel_ignore(signr)) /* Default is nothing. */ 1843 continue; 1844 1845 /* 1846 * Global init gets no signals it doesn't want. 1847 */ 1848 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && 1849 !signal_group_exit(signal)) 1850 continue; 1851 1852 if (sig_kernel_stop(signr)) { 1853 /* 1854 * The default action is to stop all threads in 1855 * the thread group. The job control signals 1856 * do nothing in an orphaned pgrp, but SIGSTOP 1857 * always works. Note that siglock needs to be 1858 * dropped during the call to is_orphaned_pgrp() 1859 * because of lock ordering with tasklist_lock. 1860 * This allows an intervening SIGCONT to be posted. 1861 * We need to check for that and bail out if necessary. 1862 */ 1863 if (signr != SIGSTOP) { 1864 spin_unlock_irq(&sighand->siglock); 1865 1866 /* signals can be posted during this window */ 1867 1868 if (is_current_pgrp_orphaned()) 1869 goto relock; 1870 1871 spin_lock_irq(&sighand->siglock); 1872 } 1873 1874 if (likely(do_signal_stop(info->si_signo))) { 1875 /* It released the siglock. */ 1876 goto relock; 1877 } 1878 1879 /* 1880 * We didn't actually stop, due to a race 1881 * with SIGCONT or something like that. 1882 */ 1883 continue; 1884 } 1885 1886 spin_unlock_irq(&sighand->siglock); 1887 1888 /* 1889 * Anything else is fatal, maybe with a core dump. 1890 */ 1891 current->flags |= PF_SIGNALED; 1892 1893 if (sig_kernel_coredump(signr)) { 1894 if (print_fatal_signals) 1895 print_fatal_signal(regs, info->si_signo); 1896 /* 1897 * If it was able to dump core, this kills all 1898 * other threads in the group and synchronizes with 1899 * their demise. If we lost the race with another 1900 * thread getting here, it set group_exit_code 1901 * first and our do_group_exit call below will use 1902 * that value and ignore the one we pass it. 1903 */ 1904 do_coredump(info->si_signo, info->si_signo, regs); 1905 } 1906 1907 /* 1908 * Death signals, no core dump. 1909 */ 1910 do_group_exit(info->si_signo); 1911 /* NOTREACHED */ 1912 } 1913 spin_unlock_irq(&sighand->siglock); 1914 return signr; 1915 } 1916 1917 void exit_signals(struct task_struct *tsk) 1918 { 1919 int group_stop = 0; 1920 struct task_struct *t; 1921 1922 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { 1923 tsk->flags |= PF_EXITING; 1924 return; 1925 } 1926 1927 spin_lock_irq(&tsk->sighand->siglock); 1928 /* 1929 * From now this task is not visible for group-wide signals, 1930 * see wants_signal(), do_signal_stop(). 1931 */ 1932 tsk->flags |= PF_EXITING; 1933 if (!signal_pending(tsk)) 1934 goto out; 1935 1936 /* It could be that __group_complete_signal() choose us to 1937 * notify about group-wide signal. Another thread should be 1938 * woken now to take the signal since we will not. 1939 */ 1940 for (t = tsk; (t = next_thread(t)) != tsk; ) 1941 if (!signal_pending(t) && !(t->flags & PF_EXITING)) 1942 recalc_sigpending_and_wake(t); 1943 1944 if (unlikely(tsk->signal->group_stop_count) && 1945 !--tsk->signal->group_stop_count) { 1946 tsk->signal->flags = SIGNAL_STOP_STOPPED; 1947 group_stop = 1; 1948 } 1949 out: 1950 spin_unlock_irq(&tsk->sighand->siglock); 1951 1952 if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) { 1953 read_lock(&tasklist_lock); 1954 do_notify_parent_cldstop(tsk, CLD_STOPPED); 1955 read_unlock(&tasklist_lock); 1956 } 1957 } 1958 1959 EXPORT_SYMBOL(recalc_sigpending); 1960 EXPORT_SYMBOL_GPL(dequeue_signal); 1961 EXPORT_SYMBOL(flush_signals); 1962 EXPORT_SYMBOL(force_sig); 1963 EXPORT_SYMBOL(send_sig); 1964 EXPORT_SYMBOL(send_sig_info); 1965 EXPORT_SYMBOL(sigprocmask); 1966 EXPORT_SYMBOL(block_all_signals); 1967 EXPORT_SYMBOL(unblock_all_signals); 1968 1969 1970 /* 1971 * System call entry points. 1972 */ 1973 1974 SYSCALL_DEFINE0(restart_syscall) 1975 { 1976 struct restart_block *restart = ¤t_thread_info()->restart_block; 1977 return restart->fn(restart); 1978 } 1979 1980 long do_no_restart_syscall(struct restart_block *param) 1981 { 1982 return -EINTR; 1983 } 1984 1985 /* 1986 * We don't need to get the kernel lock - this is all local to this 1987 * particular thread.. (and that's good, because this is _heavily_ 1988 * used by various programs) 1989 */ 1990 1991 /* 1992 * This is also useful for kernel threads that want to temporarily 1993 * (or permanently) block certain signals. 1994 * 1995 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel 1996 * interface happily blocks "unblockable" signals like SIGKILL 1997 * and friends. 1998 */ 1999 int sigprocmask(int how, sigset_t *set, sigset_t *oldset) 2000 { 2001 int error; 2002 2003 spin_lock_irq(¤t->sighand->siglock); 2004 if (oldset) 2005 *oldset = current->blocked; 2006 2007 error = 0; 2008 switch (how) { 2009 case SIG_BLOCK: 2010 sigorsets(¤t->blocked, ¤t->blocked, set); 2011 break; 2012 case SIG_UNBLOCK: 2013 signandsets(¤t->blocked, ¤t->blocked, set); 2014 break; 2015 case SIG_SETMASK: 2016 current->blocked = *set; 2017 break; 2018 default: 2019 error = -EINVAL; 2020 } 2021 recalc_sigpending(); 2022 spin_unlock_irq(¤t->sighand->siglock); 2023 2024 return error; 2025 } 2026 2027 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set, 2028 sigset_t __user *, oset, size_t, sigsetsize) 2029 { 2030 int error = -EINVAL; 2031 sigset_t old_set, new_set; 2032 2033 /* XXX: Don't preclude handling different sized sigset_t's. */ 2034 if (sigsetsize != sizeof(sigset_t)) 2035 goto out; 2036 2037 if (set) { 2038 error = -EFAULT; 2039 if (copy_from_user(&new_set, set, sizeof(*set))) 2040 goto out; 2041 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2042 2043 error = sigprocmask(how, &new_set, &old_set); 2044 if (error) 2045 goto out; 2046 if (oset) 2047 goto set_old; 2048 } else if (oset) { 2049 spin_lock_irq(¤t->sighand->siglock); 2050 old_set = current->blocked; 2051 spin_unlock_irq(¤t->sighand->siglock); 2052 2053 set_old: 2054 error = -EFAULT; 2055 if (copy_to_user(oset, &old_set, sizeof(*oset))) 2056 goto out; 2057 } 2058 error = 0; 2059 out: 2060 return error; 2061 } 2062 2063 long do_sigpending(void __user *set, unsigned long sigsetsize) 2064 { 2065 long error = -EINVAL; 2066 sigset_t pending; 2067 2068 if (sigsetsize > sizeof(sigset_t)) 2069 goto out; 2070 2071 spin_lock_irq(¤t->sighand->siglock); 2072 sigorsets(&pending, ¤t->pending.signal, 2073 ¤t->signal->shared_pending.signal); 2074 spin_unlock_irq(¤t->sighand->siglock); 2075 2076 /* Outside the lock because only this thread touches it. */ 2077 sigandsets(&pending, ¤t->blocked, &pending); 2078 2079 error = -EFAULT; 2080 if (!copy_to_user(set, &pending, sigsetsize)) 2081 error = 0; 2082 2083 out: 2084 return error; 2085 } 2086 2087 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) 2088 { 2089 return do_sigpending(set, sigsetsize); 2090 } 2091 2092 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER 2093 2094 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) 2095 { 2096 int err; 2097 2098 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) 2099 return -EFAULT; 2100 if (from->si_code < 0) 2101 return __copy_to_user(to, from, sizeof(siginfo_t)) 2102 ? -EFAULT : 0; 2103 /* 2104 * If you change siginfo_t structure, please be sure 2105 * this code is fixed accordingly. 2106 * Please remember to update the signalfd_copyinfo() function 2107 * inside fs/signalfd.c too, in case siginfo_t changes. 2108 * It should never copy any pad contained in the structure 2109 * to avoid security leaks, but must copy the generic 2110 * 3 ints plus the relevant union member. 2111 */ 2112 err = __put_user(from->si_signo, &to->si_signo); 2113 err |= __put_user(from->si_errno, &to->si_errno); 2114 err |= __put_user((short)from->si_code, &to->si_code); 2115 switch (from->si_code & __SI_MASK) { 2116 case __SI_KILL: 2117 err |= __put_user(from->si_pid, &to->si_pid); 2118 err |= __put_user(from->si_uid, &to->si_uid); 2119 break; 2120 case __SI_TIMER: 2121 err |= __put_user(from->si_tid, &to->si_tid); 2122 err |= __put_user(from->si_overrun, &to->si_overrun); 2123 err |= __put_user(from->si_ptr, &to->si_ptr); 2124 break; 2125 case __SI_POLL: 2126 err |= __put_user(from->si_band, &to->si_band); 2127 err |= __put_user(from->si_fd, &to->si_fd); 2128 break; 2129 case __SI_FAULT: 2130 err |= __put_user(from->si_addr, &to->si_addr); 2131 #ifdef __ARCH_SI_TRAPNO 2132 err |= __put_user(from->si_trapno, &to->si_trapno); 2133 #endif 2134 break; 2135 case __SI_CHLD: 2136 err |= __put_user(from->si_pid, &to->si_pid); 2137 err |= __put_user(from->si_uid, &to->si_uid); 2138 err |= __put_user(from->si_status, &to->si_status); 2139 err |= __put_user(from->si_utime, &to->si_utime); 2140 err |= __put_user(from->si_stime, &to->si_stime); 2141 break; 2142 case __SI_RT: /* This is not generated by the kernel as of now. */ 2143 case __SI_MESGQ: /* But this is */ 2144 err |= __put_user(from->si_pid, &to->si_pid); 2145 err |= __put_user(from->si_uid, &to->si_uid); 2146 err |= __put_user(from->si_ptr, &to->si_ptr); 2147 break; 2148 default: /* this is just in case for now ... */ 2149 err |= __put_user(from->si_pid, &to->si_pid); 2150 err |= __put_user(from->si_uid, &to->si_uid); 2151 break; 2152 } 2153 return err; 2154 } 2155 2156 #endif 2157 2158 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, 2159 siginfo_t __user *, uinfo, const struct timespec __user *, uts, 2160 size_t, sigsetsize) 2161 { 2162 int ret, sig; 2163 sigset_t these; 2164 struct timespec ts; 2165 siginfo_t info; 2166 long timeout = 0; 2167 2168 /* XXX: Don't preclude handling different sized sigset_t's. */ 2169 if (sigsetsize != sizeof(sigset_t)) 2170 return -EINVAL; 2171 2172 if (copy_from_user(&these, uthese, sizeof(these))) 2173 return -EFAULT; 2174 2175 /* 2176 * Invert the set of allowed signals to get those we 2177 * want to block. 2178 */ 2179 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2180 signotset(&these); 2181 2182 if (uts) { 2183 if (copy_from_user(&ts, uts, sizeof(ts))) 2184 return -EFAULT; 2185 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 2186 || ts.tv_sec < 0) 2187 return -EINVAL; 2188 } 2189 2190 spin_lock_irq(¤t->sighand->siglock); 2191 sig = dequeue_signal(current, &these, &info); 2192 if (!sig) { 2193 timeout = MAX_SCHEDULE_TIMEOUT; 2194 if (uts) 2195 timeout = (timespec_to_jiffies(&ts) 2196 + (ts.tv_sec || ts.tv_nsec)); 2197 2198 if (timeout) { 2199 /* None ready -- temporarily unblock those we're 2200 * interested while we are sleeping in so that we'll 2201 * be awakened when they arrive. */ 2202 current->real_blocked = current->blocked; 2203 sigandsets(¤t->blocked, ¤t->blocked, &these); 2204 recalc_sigpending(); 2205 spin_unlock_irq(¤t->sighand->siglock); 2206 2207 timeout = schedule_timeout_interruptible(timeout); 2208 2209 spin_lock_irq(¤t->sighand->siglock); 2210 sig = dequeue_signal(current, &these, &info); 2211 current->blocked = current->real_blocked; 2212 siginitset(¤t->real_blocked, 0); 2213 recalc_sigpending(); 2214 } 2215 } 2216 spin_unlock_irq(¤t->sighand->siglock); 2217 2218 if (sig) { 2219 ret = sig; 2220 if (uinfo) { 2221 if (copy_siginfo_to_user(uinfo, &info)) 2222 ret = -EFAULT; 2223 } 2224 } else { 2225 ret = -EAGAIN; 2226 if (timeout) 2227 ret = -EINTR; 2228 } 2229 2230 return ret; 2231 } 2232 2233 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) 2234 { 2235 struct siginfo info; 2236 2237 info.si_signo = sig; 2238 info.si_errno = 0; 2239 info.si_code = SI_USER; 2240 info.si_pid = task_tgid_vnr(current); 2241 info.si_uid = current_uid(); 2242 2243 return kill_something_info(sig, &info, pid); 2244 } 2245 2246 static int do_tkill(pid_t tgid, pid_t pid, int sig) 2247 { 2248 int error; 2249 struct siginfo info; 2250 struct task_struct *p; 2251 unsigned long flags; 2252 2253 error = -ESRCH; 2254 info.si_signo = sig; 2255 info.si_errno = 0; 2256 info.si_code = SI_TKILL; 2257 info.si_pid = task_tgid_vnr(current); 2258 info.si_uid = current_uid(); 2259 2260 rcu_read_lock(); 2261 p = find_task_by_vpid(pid); 2262 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { 2263 error = check_kill_permission(sig, &info, p); 2264 /* 2265 * The null signal is a permissions and process existence 2266 * probe. No signal is actually delivered. 2267 * 2268 * If lock_task_sighand() fails we pretend the task dies 2269 * after receiving the signal. The window is tiny, and the 2270 * signal is private anyway. 2271 */ 2272 if (!error && sig && lock_task_sighand(p, &flags)) { 2273 error = specific_send_sig_info(sig, &info, p); 2274 unlock_task_sighand(p, &flags); 2275 } 2276 } 2277 rcu_read_unlock(); 2278 2279 return error; 2280 } 2281 2282 /** 2283 * sys_tgkill - send signal to one specific thread 2284 * @tgid: the thread group ID of the thread 2285 * @pid: the PID of the thread 2286 * @sig: signal to be sent 2287 * 2288 * This syscall also checks the @tgid and returns -ESRCH even if the PID 2289 * exists but it's not belonging to the target process anymore. This 2290 * method solves the problem of threads exiting and PIDs getting reused. 2291 */ 2292 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) 2293 { 2294 /* This is only valid for single tasks */ 2295 if (pid <= 0 || tgid <= 0) 2296 return -EINVAL; 2297 2298 return do_tkill(tgid, pid, sig); 2299 } 2300 2301 /* 2302 * Send a signal to only one task, even if it's a CLONE_THREAD task. 2303 */ 2304 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) 2305 { 2306 /* This is only valid for single tasks */ 2307 if (pid <= 0) 2308 return -EINVAL; 2309 2310 return do_tkill(0, pid, sig); 2311 } 2312 2313 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, 2314 siginfo_t __user *, uinfo) 2315 { 2316 siginfo_t info; 2317 2318 if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) 2319 return -EFAULT; 2320 2321 /* Not even root can pretend to send signals from the kernel. 2322 Nor can they impersonate a kill(), which adds source info. */ 2323 if (info.si_code >= 0) 2324 return -EPERM; 2325 info.si_signo = sig; 2326 2327 /* POSIX.1b doesn't mention process groups. */ 2328 return kill_proc_info(sig, &info, pid); 2329 } 2330 2331 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) 2332 { 2333 struct task_struct *t = current; 2334 struct k_sigaction *k; 2335 sigset_t mask; 2336 2337 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) 2338 return -EINVAL; 2339 2340 k = &t->sighand->action[sig-1]; 2341 2342 spin_lock_irq(¤t->sighand->siglock); 2343 if (oact) 2344 *oact = *k; 2345 2346 if (act) { 2347 sigdelsetmask(&act->sa.sa_mask, 2348 sigmask(SIGKILL) | sigmask(SIGSTOP)); 2349 *k = *act; 2350 /* 2351 * POSIX 3.3.1.3: 2352 * "Setting a signal action to SIG_IGN for a signal that is 2353 * pending shall cause the pending signal to be discarded, 2354 * whether or not it is blocked." 2355 * 2356 * "Setting a signal action to SIG_DFL for a signal that is 2357 * pending and whose default action is to ignore the signal 2358 * (for example, SIGCHLD), shall cause the pending signal to 2359 * be discarded, whether or not it is blocked" 2360 */ 2361 if (sig_handler_ignored(sig_handler(t, sig), sig)) { 2362 sigemptyset(&mask); 2363 sigaddset(&mask, sig); 2364 rm_from_queue_full(&mask, &t->signal->shared_pending); 2365 do { 2366 rm_from_queue_full(&mask, &t->pending); 2367 t = next_thread(t); 2368 } while (t != current); 2369 } 2370 } 2371 2372 spin_unlock_irq(¤t->sighand->siglock); 2373 return 0; 2374 } 2375 2376 int 2377 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) 2378 { 2379 stack_t oss; 2380 int error; 2381 2382 if (uoss) { 2383 oss.ss_sp = (void __user *) current->sas_ss_sp; 2384 oss.ss_size = current->sas_ss_size; 2385 oss.ss_flags = sas_ss_flags(sp); 2386 } 2387 2388 if (uss) { 2389 void __user *ss_sp; 2390 size_t ss_size; 2391 int ss_flags; 2392 2393 error = -EFAULT; 2394 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) 2395 || __get_user(ss_sp, &uss->ss_sp) 2396 || __get_user(ss_flags, &uss->ss_flags) 2397 || __get_user(ss_size, &uss->ss_size)) 2398 goto out; 2399 2400 error = -EPERM; 2401 if (on_sig_stack(sp)) 2402 goto out; 2403 2404 error = -EINVAL; 2405 /* 2406 * 2407 * Note - this code used to test ss_flags incorrectly 2408 * old code may have been written using ss_flags==0 2409 * to mean ss_flags==SS_ONSTACK (as this was the only 2410 * way that worked) - this fix preserves that older 2411 * mechanism 2412 */ 2413 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) 2414 goto out; 2415 2416 if (ss_flags == SS_DISABLE) { 2417 ss_size = 0; 2418 ss_sp = NULL; 2419 } else { 2420 error = -ENOMEM; 2421 if (ss_size < MINSIGSTKSZ) 2422 goto out; 2423 } 2424 2425 current->sas_ss_sp = (unsigned long) ss_sp; 2426 current->sas_ss_size = ss_size; 2427 } 2428 2429 if (uoss) { 2430 error = -EFAULT; 2431 if (copy_to_user(uoss, &oss, sizeof(oss))) 2432 goto out; 2433 } 2434 2435 error = 0; 2436 out: 2437 return error; 2438 } 2439 2440 #ifdef __ARCH_WANT_SYS_SIGPENDING 2441 2442 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) 2443 { 2444 return do_sigpending(set, sizeof(*set)); 2445 } 2446 2447 #endif 2448 2449 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 2450 /* Some platforms have their own version with special arguments others 2451 support only sys_rt_sigprocmask. */ 2452 2453 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set, 2454 old_sigset_t __user *, oset) 2455 { 2456 int error; 2457 old_sigset_t old_set, new_set; 2458 2459 if (set) { 2460 error = -EFAULT; 2461 if (copy_from_user(&new_set, set, sizeof(*set))) 2462 goto out; 2463 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); 2464 2465 spin_lock_irq(¤t->sighand->siglock); 2466 old_set = current->blocked.sig[0]; 2467 2468 error = 0; 2469 switch (how) { 2470 default: 2471 error = -EINVAL; 2472 break; 2473 case SIG_BLOCK: 2474 sigaddsetmask(¤t->blocked, new_set); 2475 break; 2476 case SIG_UNBLOCK: 2477 sigdelsetmask(¤t->blocked, new_set); 2478 break; 2479 case SIG_SETMASK: 2480 current->blocked.sig[0] = new_set; 2481 break; 2482 } 2483 2484 recalc_sigpending(); 2485 spin_unlock_irq(¤t->sighand->siglock); 2486 if (error) 2487 goto out; 2488 if (oset) 2489 goto set_old; 2490 } else if (oset) { 2491 old_set = current->blocked.sig[0]; 2492 set_old: 2493 error = -EFAULT; 2494 if (copy_to_user(oset, &old_set, sizeof(*oset))) 2495 goto out; 2496 } 2497 error = 0; 2498 out: 2499 return error; 2500 } 2501 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 2502 2503 #ifdef __ARCH_WANT_SYS_RT_SIGACTION 2504 SYSCALL_DEFINE4(rt_sigaction, int, sig, 2505 const struct sigaction __user *, act, 2506 struct sigaction __user *, oact, 2507 size_t, sigsetsize) 2508 { 2509 struct k_sigaction new_sa, old_sa; 2510 int ret = -EINVAL; 2511 2512 /* XXX: Don't preclude handling different sized sigset_t's. */ 2513 if (sigsetsize != sizeof(sigset_t)) 2514 goto out; 2515 2516 if (act) { 2517 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) 2518 return -EFAULT; 2519 } 2520 2521 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); 2522 2523 if (!ret && oact) { 2524 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) 2525 return -EFAULT; 2526 } 2527 out: 2528 return ret; 2529 } 2530 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ 2531 2532 #ifdef __ARCH_WANT_SYS_SGETMASK 2533 2534 /* 2535 * For backwards compatibility. Functionality superseded by sigprocmask. 2536 */ 2537 SYSCALL_DEFINE0(sgetmask) 2538 { 2539 /* SMP safe */ 2540 return current->blocked.sig[0]; 2541 } 2542 2543 SYSCALL_DEFINE1(ssetmask, int, newmask) 2544 { 2545 int old; 2546 2547 spin_lock_irq(¤t->sighand->siglock); 2548 old = current->blocked.sig[0]; 2549 2550 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)| 2551 sigmask(SIGSTOP))); 2552 recalc_sigpending(); 2553 spin_unlock_irq(¤t->sighand->siglock); 2554 2555 return old; 2556 } 2557 #endif /* __ARCH_WANT_SGETMASK */ 2558 2559 #ifdef __ARCH_WANT_SYS_SIGNAL 2560 /* 2561 * For backwards compatibility. Functionality superseded by sigaction. 2562 */ 2563 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) 2564 { 2565 struct k_sigaction new_sa, old_sa; 2566 int ret; 2567 2568 new_sa.sa.sa_handler = handler; 2569 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; 2570 sigemptyset(&new_sa.sa.sa_mask); 2571 2572 ret = do_sigaction(sig, &new_sa, &old_sa); 2573 2574 return ret ? ret : (unsigned long)old_sa.sa.sa_handler; 2575 } 2576 #endif /* __ARCH_WANT_SYS_SIGNAL */ 2577 2578 #ifdef __ARCH_WANT_SYS_PAUSE 2579 2580 SYSCALL_DEFINE0(pause) 2581 { 2582 current->state = TASK_INTERRUPTIBLE; 2583 schedule(); 2584 return -ERESTARTNOHAND; 2585 } 2586 2587 #endif 2588 2589 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND 2590 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) 2591 { 2592 sigset_t newset; 2593 2594 /* XXX: Don't preclude handling different sized sigset_t's. */ 2595 if (sigsetsize != sizeof(sigset_t)) 2596 return -EINVAL; 2597 2598 if (copy_from_user(&newset, unewset, sizeof(newset))) 2599 return -EFAULT; 2600 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2601 2602 spin_lock_irq(¤t->sighand->siglock); 2603 current->saved_sigmask = current->blocked; 2604 current->blocked = newset; 2605 recalc_sigpending(); 2606 spin_unlock_irq(¤t->sighand->siglock); 2607 2608 current->state = TASK_INTERRUPTIBLE; 2609 schedule(); 2610 set_restore_sigmask(); 2611 return -ERESTARTNOHAND; 2612 } 2613 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ 2614 2615 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) 2616 { 2617 return NULL; 2618 } 2619 2620 void __init signals_init(void) 2621 { 2622 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); 2623 } 2624