1 /* 2 * linux/kernel/signal.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson 7 * 8 * 2003-06-02 Jim Houston - Concurrent Computer Corp. 9 * Changes to use preallocated sigqueue structures 10 * to allow signals to be sent reliably. 11 */ 12 13 #include <linux/slab.h> 14 #include <linux/module.h> 15 #include <linux/init.h> 16 #include <linux/sched.h> 17 #include <linux/fs.h> 18 #include <linux/tty.h> 19 #include <linux/binfmts.h> 20 #include <linux/security.h> 21 #include <linux/syscalls.h> 22 #include <linux/ptrace.h> 23 #include <linux/signal.h> 24 #include <linux/signalfd.h> 25 #include <linux/capability.h> 26 #include <linux/freezer.h> 27 #include <linux/pid_namespace.h> 28 #include <linux/nsproxy.h> 29 30 #include <asm/param.h> 31 #include <asm/uaccess.h> 32 #include <asm/unistd.h> 33 #include <asm/siginfo.h> 34 #include "audit.h" /* audit_signal_info() */ 35 36 /* 37 * SLAB caches for signal bits. 38 */ 39 40 static struct kmem_cache *sigqueue_cachep; 41 42 43 static int sig_ignored(struct task_struct *t, int sig) 44 { 45 void __user * handler; 46 47 /* 48 * Tracers always want to know about signals.. 49 */ 50 if (t->ptrace & PT_PTRACED) 51 return 0; 52 53 /* 54 * Blocked signals are never ignored, since the 55 * signal handler may change by the time it is 56 * unblocked. 57 */ 58 if (sigismember(&t->blocked, sig)) 59 return 0; 60 61 /* Is it explicitly or implicitly ignored? */ 62 handler = t->sighand->action[sig-1].sa.sa_handler; 63 return handler == SIG_IGN || 64 (handler == SIG_DFL && sig_kernel_ignore(sig)); 65 } 66 67 /* 68 * Re-calculate pending state from the set of locally pending 69 * signals, globally pending signals, and blocked signals. 70 */ 71 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) 72 { 73 unsigned long ready; 74 long i; 75 76 switch (_NSIG_WORDS) { 77 default: 78 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) 79 ready |= signal->sig[i] &~ blocked->sig[i]; 80 break; 81 82 case 4: ready = signal->sig[3] &~ blocked->sig[3]; 83 ready |= signal->sig[2] &~ blocked->sig[2]; 84 ready |= signal->sig[1] &~ blocked->sig[1]; 85 ready |= signal->sig[0] &~ blocked->sig[0]; 86 break; 87 88 case 2: ready = signal->sig[1] &~ blocked->sig[1]; 89 ready |= signal->sig[0] &~ blocked->sig[0]; 90 break; 91 92 case 1: ready = signal->sig[0] &~ blocked->sig[0]; 93 } 94 return ready != 0; 95 } 96 97 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) 98 99 static int recalc_sigpending_tsk(struct task_struct *t) 100 { 101 if (t->signal->group_stop_count > 0 || 102 (freezing(t)) || 103 PENDING(&t->pending, &t->blocked) || 104 PENDING(&t->signal->shared_pending, &t->blocked)) { 105 set_tsk_thread_flag(t, TIF_SIGPENDING); 106 return 1; 107 } 108 /* 109 * We must never clear the flag in another thread, or in current 110 * when it's possible the current syscall is returning -ERESTART*. 111 * So we don't clear it here, and only callers who know they should do. 112 */ 113 return 0; 114 } 115 116 /* 117 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. 118 * This is superfluous when called on current, the wakeup is a harmless no-op. 119 */ 120 void recalc_sigpending_and_wake(struct task_struct *t) 121 { 122 if (recalc_sigpending_tsk(t)) 123 signal_wake_up(t, 0); 124 } 125 126 void recalc_sigpending(void) 127 { 128 if (!recalc_sigpending_tsk(current)) 129 clear_thread_flag(TIF_SIGPENDING); 130 131 } 132 133 /* Given the mask, find the first available signal that should be serviced. */ 134 135 int next_signal(struct sigpending *pending, sigset_t *mask) 136 { 137 unsigned long i, *s, *m, x; 138 int sig = 0; 139 140 s = pending->signal.sig; 141 m = mask->sig; 142 switch (_NSIG_WORDS) { 143 default: 144 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m) 145 if ((x = *s &~ *m) != 0) { 146 sig = ffz(~x) + i*_NSIG_BPW + 1; 147 break; 148 } 149 break; 150 151 case 2: if ((x = s[0] &~ m[0]) != 0) 152 sig = 1; 153 else if ((x = s[1] &~ m[1]) != 0) 154 sig = _NSIG_BPW + 1; 155 else 156 break; 157 sig += ffz(~x); 158 break; 159 160 case 1: if ((x = *s &~ *m) != 0) 161 sig = ffz(~x) + 1; 162 break; 163 } 164 165 return sig; 166 } 167 168 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, 169 int override_rlimit) 170 { 171 struct sigqueue *q = NULL; 172 struct user_struct *user; 173 174 /* 175 * In order to avoid problems with "switch_user()", we want to make 176 * sure that the compiler doesn't re-load "t->user" 177 */ 178 user = t->user; 179 barrier(); 180 atomic_inc(&user->sigpending); 181 if (override_rlimit || 182 atomic_read(&user->sigpending) <= 183 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) 184 q = kmem_cache_alloc(sigqueue_cachep, flags); 185 if (unlikely(q == NULL)) { 186 atomic_dec(&user->sigpending); 187 } else { 188 INIT_LIST_HEAD(&q->list); 189 q->flags = 0; 190 q->user = get_uid(user); 191 } 192 return(q); 193 } 194 195 static void __sigqueue_free(struct sigqueue *q) 196 { 197 if (q->flags & SIGQUEUE_PREALLOC) 198 return; 199 atomic_dec(&q->user->sigpending); 200 free_uid(q->user); 201 kmem_cache_free(sigqueue_cachep, q); 202 } 203 204 void flush_sigqueue(struct sigpending *queue) 205 { 206 struct sigqueue *q; 207 208 sigemptyset(&queue->signal); 209 while (!list_empty(&queue->list)) { 210 q = list_entry(queue->list.next, struct sigqueue , list); 211 list_del_init(&q->list); 212 __sigqueue_free(q); 213 } 214 } 215 216 /* 217 * Flush all pending signals for a task. 218 */ 219 void flush_signals(struct task_struct *t) 220 { 221 unsigned long flags; 222 223 spin_lock_irqsave(&t->sighand->siglock, flags); 224 clear_tsk_thread_flag(t,TIF_SIGPENDING); 225 flush_sigqueue(&t->pending); 226 flush_sigqueue(&t->signal->shared_pending); 227 spin_unlock_irqrestore(&t->sighand->siglock, flags); 228 } 229 230 void ignore_signals(struct task_struct *t) 231 { 232 int i; 233 234 for (i = 0; i < _NSIG; ++i) 235 t->sighand->action[i].sa.sa_handler = SIG_IGN; 236 237 flush_signals(t); 238 } 239 240 /* 241 * Flush all handlers for a task. 242 */ 243 244 void 245 flush_signal_handlers(struct task_struct *t, int force_default) 246 { 247 int i; 248 struct k_sigaction *ka = &t->sighand->action[0]; 249 for (i = _NSIG ; i != 0 ; i--) { 250 if (force_default || ka->sa.sa_handler != SIG_IGN) 251 ka->sa.sa_handler = SIG_DFL; 252 ka->sa.sa_flags = 0; 253 sigemptyset(&ka->sa.sa_mask); 254 ka++; 255 } 256 } 257 258 259 /* Notify the system that a driver wants to block all signals for this 260 * process, and wants to be notified if any signals at all were to be 261 * sent/acted upon. If the notifier routine returns non-zero, then the 262 * signal will be acted upon after all. If the notifier routine returns 0, 263 * then then signal will be blocked. Only one block per process is 264 * allowed. priv is a pointer to private data that the notifier routine 265 * can use to determine if the signal should be blocked or not. */ 266 267 void 268 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) 269 { 270 unsigned long flags; 271 272 spin_lock_irqsave(¤t->sighand->siglock, flags); 273 current->notifier_mask = mask; 274 current->notifier_data = priv; 275 current->notifier = notifier; 276 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 277 } 278 279 /* Notify the system that blocking has ended. */ 280 281 void 282 unblock_all_signals(void) 283 { 284 unsigned long flags; 285 286 spin_lock_irqsave(¤t->sighand->siglock, flags); 287 current->notifier = NULL; 288 current->notifier_data = NULL; 289 recalc_sigpending(); 290 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 291 } 292 293 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) 294 { 295 struct sigqueue *q, *first = NULL; 296 int still_pending = 0; 297 298 if (unlikely(!sigismember(&list->signal, sig))) 299 return 0; 300 301 /* 302 * Collect the siginfo appropriate to this signal. Check if 303 * there is another siginfo for the same signal. 304 */ 305 list_for_each_entry(q, &list->list, list) { 306 if (q->info.si_signo == sig) { 307 if (first) { 308 still_pending = 1; 309 break; 310 } 311 first = q; 312 } 313 } 314 if (first) { 315 list_del_init(&first->list); 316 copy_siginfo(info, &first->info); 317 __sigqueue_free(first); 318 if (!still_pending) 319 sigdelset(&list->signal, sig); 320 } else { 321 322 /* Ok, it wasn't in the queue. This must be 323 a fast-pathed signal or we must have been 324 out of queue space. So zero out the info. 325 */ 326 sigdelset(&list->signal, sig); 327 info->si_signo = sig; 328 info->si_errno = 0; 329 info->si_code = 0; 330 info->si_pid = 0; 331 info->si_uid = 0; 332 } 333 return 1; 334 } 335 336 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 337 siginfo_t *info) 338 { 339 int sig = next_signal(pending, mask); 340 341 if (sig) { 342 if (current->notifier) { 343 if (sigismember(current->notifier_mask, sig)) { 344 if (!(current->notifier)(current->notifier_data)) { 345 clear_thread_flag(TIF_SIGPENDING); 346 return 0; 347 } 348 } 349 } 350 351 if (!collect_signal(sig, pending, info)) 352 sig = 0; 353 } 354 355 return sig; 356 } 357 358 /* 359 * Dequeue a signal and return the element to the caller, which is 360 * expected to free it. 361 * 362 * All callers have to hold the siglock. 363 */ 364 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 365 { 366 int signr = 0; 367 368 /* We only dequeue private signals from ourselves, we don't let 369 * signalfd steal them 370 */ 371 if (tsk == current) 372 signr = __dequeue_signal(&tsk->pending, mask, info); 373 if (!signr) { 374 signr = __dequeue_signal(&tsk->signal->shared_pending, 375 mask, info); 376 /* 377 * itimer signal ? 378 * 379 * itimers are process shared and we restart periodic 380 * itimers in the signal delivery path to prevent DoS 381 * attacks in the high resolution timer case. This is 382 * compliant with the old way of self restarting 383 * itimers, as the SIGALRM is a legacy signal and only 384 * queued once. Changing the restart behaviour to 385 * restart the timer in the signal dequeue path is 386 * reducing the timer noise on heavy loaded !highres 387 * systems too. 388 */ 389 if (unlikely(signr == SIGALRM)) { 390 struct hrtimer *tmr = &tsk->signal->real_timer; 391 392 if (!hrtimer_is_queued(tmr) && 393 tsk->signal->it_real_incr.tv64 != 0) { 394 hrtimer_forward(tmr, tmr->base->get_time(), 395 tsk->signal->it_real_incr); 396 hrtimer_restart(tmr); 397 } 398 } 399 } 400 if (likely(tsk == current)) 401 recalc_sigpending(); 402 if (signr && unlikely(sig_kernel_stop(signr))) { 403 /* 404 * Set a marker that we have dequeued a stop signal. Our 405 * caller might release the siglock and then the pending 406 * stop signal it is about to process is no longer in the 407 * pending bitmasks, but must still be cleared by a SIGCONT 408 * (and overruled by a SIGKILL). So those cases clear this 409 * shared flag after we've set it. Note that this flag may 410 * remain set after the signal we return is ignored or 411 * handled. That doesn't matter because its only purpose 412 * is to alert stop-signal processing code when another 413 * processor has come along and cleared the flag. 414 */ 415 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) 416 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; 417 } 418 if ( signr && 419 ((info->si_code & __SI_MASK) == __SI_TIMER) && 420 info->si_sys_private){ 421 /* 422 * Release the siglock to ensure proper locking order 423 * of timer locks outside of siglocks. Note, we leave 424 * irqs disabled here, since the posix-timers code is 425 * about to disable them again anyway. 426 */ 427 spin_unlock(&tsk->sighand->siglock); 428 do_schedule_next_timer(info); 429 spin_lock(&tsk->sighand->siglock); 430 } 431 return signr; 432 } 433 434 /* 435 * Tell a process that it has a new active signal.. 436 * 437 * NOTE! we rely on the previous spin_lock to 438 * lock interrupts for us! We can only be called with 439 * "siglock" held, and the local interrupt must 440 * have been disabled when that got acquired! 441 * 442 * No need to set need_resched since signal event passing 443 * goes through ->blocked 444 */ 445 void signal_wake_up(struct task_struct *t, int resume) 446 { 447 unsigned int mask; 448 449 set_tsk_thread_flag(t, TIF_SIGPENDING); 450 451 /* 452 * For SIGKILL, we want to wake it up in the stopped/traced case. 453 * We don't check t->state here because there is a race with it 454 * executing another processor and just now entering stopped state. 455 * By using wake_up_state, we ensure the process will wake up and 456 * handle its death signal. 457 */ 458 mask = TASK_INTERRUPTIBLE; 459 if (resume) 460 mask |= TASK_STOPPED | TASK_TRACED; 461 if (!wake_up_state(t, mask)) 462 kick_process(t); 463 } 464 465 /* 466 * Remove signals in mask from the pending set and queue. 467 * Returns 1 if any signals were found. 468 * 469 * All callers must be holding the siglock. 470 * 471 * This version takes a sigset mask and looks at all signals, 472 * not just those in the first mask word. 473 */ 474 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) 475 { 476 struct sigqueue *q, *n; 477 sigset_t m; 478 479 sigandsets(&m, mask, &s->signal); 480 if (sigisemptyset(&m)) 481 return 0; 482 483 signandsets(&s->signal, &s->signal, mask); 484 list_for_each_entry_safe(q, n, &s->list, list) { 485 if (sigismember(mask, q->info.si_signo)) { 486 list_del_init(&q->list); 487 __sigqueue_free(q); 488 } 489 } 490 return 1; 491 } 492 /* 493 * Remove signals in mask from the pending set and queue. 494 * Returns 1 if any signals were found. 495 * 496 * All callers must be holding the siglock. 497 */ 498 static int rm_from_queue(unsigned long mask, struct sigpending *s) 499 { 500 struct sigqueue *q, *n; 501 502 if (!sigtestsetmask(&s->signal, mask)) 503 return 0; 504 505 sigdelsetmask(&s->signal, mask); 506 list_for_each_entry_safe(q, n, &s->list, list) { 507 if (q->info.si_signo < SIGRTMIN && 508 (mask & sigmask(q->info.si_signo))) { 509 list_del_init(&q->list); 510 __sigqueue_free(q); 511 } 512 } 513 return 1; 514 } 515 516 /* 517 * Bad permissions for sending the signal 518 */ 519 static int check_kill_permission(int sig, struct siginfo *info, 520 struct task_struct *t) 521 { 522 int error = -EINVAL; 523 if (!valid_signal(sig)) 524 return error; 525 526 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 527 if (error) 528 return error; 529 530 error = -EPERM; 531 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) 532 && ((sig != SIGCONT) || 533 (process_session(current) != process_session(t))) 534 && (current->euid ^ t->suid) && (current->euid ^ t->uid) 535 && (current->uid ^ t->suid) && (current->uid ^ t->uid) 536 && !capable(CAP_KILL)) 537 return error; 538 539 return security_task_kill(t, info, sig, 0); 540 } 541 542 /* forward decl */ 543 static void do_notify_parent_cldstop(struct task_struct *tsk, int why); 544 545 /* 546 * Handle magic process-wide effects of stop/continue signals. 547 * Unlike the signal actions, these happen immediately at signal-generation 548 * time regardless of blocking, ignoring, or handling. This does the 549 * actual continuing for SIGCONT, but not the actual stopping for stop 550 * signals. The process stop is done as a signal action for SIG_DFL. 551 */ 552 static void handle_stop_signal(int sig, struct task_struct *p) 553 { 554 struct task_struct *t; 555 556 if (p->signal->flags & SIGNAL_GROUP_EXIT) 557 /* 558 * The process is in the middle of dying already. 559 */ 560 return; 561 562 if (sig_kernel_stop(sig)) { 563 /* 564 * This is a stop signal. Remove SIGCONT from all queues. 565 */ 566 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending); 567 t = p; 568 do { 569 rm_from_queue(sigmask(SIGCONT), &t->pending); 570 t = next_thread(t); 571 } while (t != p); 572 } else if (sig == SIGCONT) { 573 /* 574 * Remove all stop signals from all queues, 575 * and wake all threads. 576 */ 577 if (unlikely(p->signal->group_stop_count > 0)) { 578 /* 579 * There was a group stop in progress. We'll 580 * pretend it finished before we got here. We are 581 * obliged to report it to the parent: if the 582 * SIGSTOP happened "after" this SIGCONT, then it 583 * would have cleared this pending SIGCONT. If it 584 * happened "before" this SIGCONT, then the parent 585 * got the SIGCHLD about the stop finishing before 586 * the continue happened. We do the notification 587 * now, and it's as if the stop had finished and 588 * the SIGCHLD was pending on entry to this kill. 589 */ 590 p->signal->group_stop_count = 0; 591 p->signal->flags = SIGNAL_STOP_CONTINUED; 592 spin_unlock(&p->sighand->siglock); 593 do_notify_parent_cldstop(p, CLD_STOPPED); 594 spin_lock(&p->sighand->siglock); 595 } 596 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); 597 t = p; 598 do { 599 unsigned int state; 600 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); 601 602 /* 603 * If there is a handler for SIGCONT, we must make 604 * sure that no thread returns to user mode before 605 * we post the signal, in case it was the only 606 * thread eligible to run the signal handler--then 607 * it must not do anything between resuming and 608 * running the handler. With the TIF_SIGPENDING 609 * flag set, the thread will pause and acquire the 610 * siglock that we hold now and until we've queued 611 * the pending signal. 612 * 613 * Wake up the stopped thread _after_ setting 614 * TIF_SIGPENDING 615 */ 616 state = TASK_STOPPED; 617 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { 618 set_tsk_thread_flag(t, TIF_SIGPENDING); 619 state |= TASK_INTERRUPTIBLE; 620 } 621 wake_up_state(t, state); 622 623 t = next_thread(t); 624 } while (t != p); 625 626 if (p->signal->flags & SIGNAL_STOP_STOPPED) { 627 /* 628 * We were in fact stopped, and are now continued. 629 * Notify the parent with CLD_CONTINUED. 630 */ 631 p->signal->flags = SIGNAL_STOP_CONTINUED; 632 p->signal->group_exit_code = 0; 633 spin_unlock(&p->sighand->siglock); 634 do_notify_parent_cldstop(p, CLD_CONTINUED); 635 spin_lock(&p->sighand->siglock); 636 } else { 637 /* 638 * We are not stopped, but there could be a stop 639 * signal in the middle of being processed after 640 * being removed from the queue. Clear that too. 641 */ 642 p->signal->flags = 0; 643 } 644 } else if (sig == SIGKILL) { 645 /* 646 * Make sure that any pending stop signal already dequeued 647 * is undone by the wakeup for SIGKILL. 648 */ 649 p->signal->flags = 0; 650 } 651 } 652 653 static int send_signal(int sig, struct siginfo *info, struct task_struct *t, 654 struct sigpending *signals) 655 { 656 struct sigqueue * q = NULL; 657 int ret = 0; 658 659 /* 660 * Deliver the signal to listening signalfds. This must be called 661 * with the sighand lock held. 662 */ 663 signalfd_notify(t, sig); 664 665 /* 666 * fast-pathed signals for kernel-internal things like SIGSTOP 667 * or SIGKILL. 668 */ 669 if (info == SEND_SIG_FORCED) 670 goto out_set; 671 672 /* Real-time signals must be queued if sent by sigqueue, or 673 some other real-time mechanism. It is implementation 674 defined whether kill() does so. We attempt to do so, on 675 the principle of least surprise, but since kill is not 676 allowed to fail with EAGAIN when low on memory we just 677 make sure at least one signal gets delivered and don't 678 pass on the info struct. */ 679 680 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && 681 (is_si_special(info) || 682 info->si_code >= 0))); 683 if (q) { 684 list_add_tail(&q->list, &signals->list); 685 switch ((unsigned long) info) { 686 case (unsigned long) SEND_SIG_NOINFO: 687 q->info.si_signo = sig; 688 q->info.si_errno = 0; 689 q->info.si_code = SI_USER; 690 q->info.si_pid = current->pid; 691 q->info.si_uid = current->uid; 692 break; 693 case (unsigned long) SEND_SIG_PRIV: 694 q->info.si_signo = sig; 695 q->info.si_errno = 0; 696 q->info.si_code = SI_KERNEL; 697 q->info.si_pid = 0; 698 q->info.si_uid = 0; 699 break; 700 default: 701 copy_siginfo(&q->info, info); 702 break; 703 } 704 } else if (!is_si_special(info)) { 705 if (sig >= SIGRTMIN && info->si_code != SI_USER) 706 /* 707 * Queue overflow, abort. We may abort if the signal was rt 708 * and sent by user using something other than kill(). 709 */ 710 return -EAGAIN; 711 } 712 713 out_set: 714 sigaddset(&signals->signal, sig); 715 return ret; 716 } 717 718 #define LEGACY_QUEUE(sigptr, sig) \ 719 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig))) 720 721 722 static int 723 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) 724 { 725 int ret = 0; 726 727 BUG_ON(!irqs_disabled()); 728 assert_spin_locked(&t->sighand->siglock); 729 730 /* Short-circuit ignored signals. */ 731 if (sig_ignored(t, sig)) 732 goto out; 733 734 /* Support queueing exactly one non-rt signal, so that we 735 can get more detailed information about the cause of 736 the signal. */ 737 if (LEGACY_QUEUE(&t->pending, sig)) 738 goto out; 739 740 ret = send_signal(sig, info, t, &t->pending); 741 if (!ret && !sigismember(&t->blocked, sig)) 742 signal_wake_up(t, sig == SIGKILL); 743 out: 744 return ret; 745 } 746 747 /* 748 * Force a signal that the process can't ignore: if necessary 749 * we unblock the signal and change any SIG_IGN to SIG_DFL. 750 * 751 * Note: If we unblock the signal, we always reset it to SIG_DFL, 752 * since we do not want to have a signal handler that was blocked 753 * be invoked when user space had explicitly blocked it. 754 * 755 * We don't want to have recursive SIGSEGV's etc, for example. 756 */ 757 int 758 force_sig_info(int sig, struct siginfo *info, struct task_struct *t) 759 { 760 unsigned long int flags; 761 int ret, blocked, ignored; 762 struct k_sigaction *action; 763 764 spin_lock_irqsave(&t->sighand->siglock, flags); 765 action = &t->sighand->action[sig-1]; 766 ignored = action->sa.sa_handler == SIG_IGN; 767 blocked = sigismember(&t->blocked, sig); 768 if (blocked || ignored) { 769 action->sa.sa_handler = SIG_DFL; 770 if (blocked) { 771 sigdelset(&t->blocked, sig); 772 recalc_sigpending_and_wake(t); 773 } 774 } 775 ret = specific_send_sig_info(sig, info, t); 776 spin_unlock_irqrestore(&t->sighand->siglock, flags); 777 778 return ret; 779 } 780 781 void 782 force_sig_specific(int sig, struct task_struct *t) 783 { 784 force_sig_info(sig, SEND_SIG_FORCED, t); 785 } 786 787 /* 788 * Test if P wants to take SIG. After we've checked all threads with this, 789 * it's equivalent to finding no threads not blocking SIG. Any threads not 790 * blocking SIG were ruled out because they are not running and already 791 * have pending signals. Such threads will dequeue from the shared queue 792 * as soon as they're available, so putting the signal on the shared queue 793 * will be equivalent to sending it to one such thread. 794 */ 795 static inline int wants_signal(int sig, struct task_struct *p) 796 { 797 if (sigismember(&p->blocked, sig)) 798 return 0; 799 if (p->flags & PF_EXITING) 800 return 0; 801 if (sig == SIGKILL) 802 return 1; 803 if (p->state & (TASK_STOPPED | TASK_TRACED)) 804 return 0; 805 return task_curr(p) || !signal_pending(p); 806 } 807 808 static void 809 __group_complete_signal(int sig, struct task_struct *p) 810 { 811 struct task_struct *t; 812 813 /* 814 * Now find a thread we can wake up to take the signal off the queue. 815 * 816 * If the main thread wants the signal, it gets first crack. 817 * Probably the least surprising to the average bear. 818 */ 819 if (wants_signal(sig, p)) 820 t = p; 821 else if (thread_group_empty(p)) 822 /* 823 * There is just one thread and it does not need to be woken. 824 * It will dequeue unblocked signals before it runs again. 825 */ 826 return; 827 else { 828 /* 829 * Otherwise try to find a suitable thread. 830 */ 831 t = p->signal->curr_target; 832 if (t == NULL) 833 /* restart balancing at this thread */ 834 t = p->signal->curr_target = p; 835 836 while (!wants_signal(sig, t)) { 837 t = next_thread(t); 838 if (t == p->signal->curr_target) 839 /* 840 * No thread needs to be woken. 841 * Any eligible threads will see 842 * the signal in the queue soon. 843 */ 844 return; 845 } 846 p->signal->curr_target = t; 847 } 848 849 /* 850 * Found a killable thread. If the signal will be fatal, 851 * then start taking the whole group down immediately. 852 */ 853 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) && 854 !sigismember(&t->real_blocked, sig) && 855 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { 856 /* 857 * This signal will be fatal to the whole group. 858 */ 859 if (!sig_kernel_coredump(sig)) { 860 /* 861 * Start a group exit and wake everybody up. 862 * This way we don't have other threads 863 * running and doing things after a slower 864 * thread has the fatal signal pending. 865 */ 866 p->signal->flags = SIGNAL_GROUP_EXIT; 867 p->signal->group_exit_code = sig; 868 p->signal->group_stop_count = 0; 869 t = p; 870 do { 871 sigaddset(&t->pending.signal, SIGKILL); 872 signal_wake_up(t, 1); 873 t = next_thread(t); 874 } while (t != p); 875 return; 876 } 877 878 /* 879 * There will be a core dump. We make all threads other 880 * than the chosen one go into a group stop so that nothing 881 * happens until it gets scheduled, takes the signal off 882 * the shared queue, and does the core dump. This is a 883 * little more complicated than strictly necessary, but it 884 * keeps the signal state that winds up in the core dump 885 * unchanged from the death state, e.g. which thread had 886 * the core-dump signal unblocked. 887 */ 888 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); 889 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); 890 p->signal->group_stop_count = 0; 891 p->signal->group_exit_task = t; 892 t = p; 893 do { 894 p->signal->group_stop_count++; 895 signal_wake_up(t, 0); 896 t = next_thread(t); 897 } while (t != p); 898 wake_up_process(p->signal->group_exit_task); 899 return; 900 } 901 902 /* 903 * The signal is already in the shared-pending queue. 904 * Tell the chosen thread to wake up and dequeue it. 905 */ 906 signal_wake_up(t, sig == SIGKILL); 907 return; 908 } 909 910 int 911 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 912 { 913 int ret = 0; 914 915 assert_spin_locked(&p->sighand->siglock); 916 handle_stop_signal(sig, p); 917 918 /* Short-circuit ignored signals. */ 919 if (sig_ignored(p, sig)) 920 return ret; 921 922 if (LEGACY_QUEUE(&p->signal->shared_pending, sig)) 923 /* This is a non-RT signal and we already have one queued. */ 924 return ret; 925 926 /* 927 * Put this signal on the shared-pending queue, or fail with EAGAIN. 928 * We always use the shared queue for process-wide signals, 929 * to avoid several races. 930 */ 931 ret = send_signal(sig, info, p, &p->signal->shared_pending); 932 if (unlikely(ret)) 933 return ret; 934 935 __group_complete_signal(sig, p); 936 return 0; 937 } 938 939 /* 940 * Nuke all other threads in the group. 941 */ 942 void zap_other_threads(struct task_struct *p) 943 { 944 struct task_struct *t; 945 946 p->signal->flags = SIGNAL_GROUP_EXIT; 947 p->signal->group_stop_count = 0; 948 949 if (thread_group_empty(p)) 950 return; 951 952 for (t = next_thread(p); t != p; t = next_thread(t)) { 953 /* 954 * Don't bother with already dead threads 955 */ 956 if (t->exit_state) 957 continue; 958 959 /* SIGKILL will be handled before any pending SIGSTOP */ 960 sigaddset(&t->pending.signal, SIGKILL); 961 signal_wake_up(t, 1); 962 } 963 } 964 965 /* 966 * Must be called under rcu_read_lock() or with tasklist_lock read-held. 967 */ 968 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) 969 { 970 struct sighand_struct *sighand; 971 972 for (;;) { 973 sighand = rcu_dereference(tsk->sighand); 974 if (unlikely(sighand == NULL)) 975 break; 976 977 spin_lock_irqsave(&sighand->siglock, *flags); 978 if (likely(sighand == tsk->sighand)) 979 break; 980 spin_unlock_irqrestore(&sighand->siglock, *flags); 981 } 982 983 return sighand; 984 } 985 986 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 987 { 988 unsigned long flags; 989 int ret; 990 991 ret = check_kill_permission(sig, info, p); 992 993 if (!ret && sig) { 994 ret = -ESRCH; 995 if (lock_task_sighand(p, &flags)) { 996 ret = __group_send_sig_info(sig, info, p); 997 unlock_task_sighand(p, &flags); 998 } 999 } 1000 1001 return ret; 1002 } 1003 1004 /* 1005 * kill_pgrp_info() sends a signal to a process group: this is what the tty 1006 * control characters do (^C, ^Z etc) 1007 */ 1008 1009 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) 1010 { 1011 struct task_struct *p = NULL; 1012 int retval, success; 1013 1014 success = 0; 1015 retval = -ESRCH; 1016 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 1017 int err = group_send_sig_info(sig, info, p); 1018 success |= !err; 1019 retval = err; 1020 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 1021 return success ? 0 : retval; 1022 } 1023 1024 int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) 1025 { 1026 int retval; 1027 1028 read_lock(&tasklist_lock); 1029 retval = __kill_pgrp_info(sig, info, pgrp); 1030 read_unlock(&tasklist_lock); 1031 1032 return retval; 1033 } 1034 1035 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) 1036 { 1037 int error; 1038 struct task_struct *p; 1039 1040 rcu_read_lock(); 1041 if (unlikely(sig_needs_tasklist(sig))) 1042 read_lock(&tasklist_lock); 1043 1044 p = pid_task(pid, PIDTYPE_PID); 1045 error = -ESRCH; 1046 if (p) 1047 error = group_send_sig_info(sig, info, p); 1048 1049 if (unlikely(sig_needs_tasklist(sig))) 1050 read_unlock(&tasklist_lock); 1051 rcu_read_unlock(); 1052 return error; 1053 } 1054 1055 int 1056 kill_proc_info(int sig, struct siginfo *info, pid_t pid) 1057 { 1058 int error; 1059 rcu_read_lock(); 1060 error = kill_pid_info(sig, info, find_pid(pid)); 1061 rcu_read_unlock(); 1062 return error; 1063 } 1064 1065 /* like kill_pid_info(), but doesn't use uid/euid of "current" */ 1066 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, 1067 uid_t uid, uid_t euid, u32 secid) 1068 { 1069 int ret = -EINVAL; 1070 struct task_struct *p; 1071 1072 if (!valid_signal(sig)) 1073 return ret; 1074 1075 read_lock(&tasklist_lock); 1076 p = pid_task(pid, PIDTYPE_PID); 1077 if (!p) { 1078 ret = -ESRCH; 1079 goto out_unlock; 1080 } 1081 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) 1082 && (euid != p->suid) && (euid != p->uid) 1083 && (uid != p->suid) && (uid != p->uid)) { 1084 ret = -EPERM; 1085 goto out_unlock; 1086 } 1087 ret = security_task_kill(p, info, sig, secid); 1088 if (ret) 1089 goto out_unlock; 1090 if (sig && p->sighand) { 1091 unsigned long flags; 1092 spin_lock_irqsave(&p->sighand->siglock, flags); 1093 ret = __group_send_sig_info(sig, info, p); 1094 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1095 } 1096 out_unlock: 1097 read_unlock(&tasklist_lock); 1098 return ret; 1099 } 1100 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); 1101 1102 /* 1103 * kill_something_info() interprets pid in interesting ways just like kill(2). 1104 * 1105 * POSIX specifies that kill(-1,sig) is unspecified, but what we have 1106 * is probably wrong. Should make it like BSD or SYSV. 1107 */ 1108 1109 static int kill_something_info(int sig, struct siginfo *info, int pid) 1110 { 1111 int ret; 1112 rcu_read_lock(); 1113 if (!pid) { 1114 ret = kill_pgrp_info(sig, info, task_pgrp(current)); 1115 } else if (pid == -1) { 1116 int retval = 0, count = 0; 1117 struct task_struct * p; 1118 1119 read_lock(&tasklist_lock); 1120 for_each_process(p) { 1121 if (p->pid > 1 && p->tgid != current->tgid) { 1122 int err = group_send_sig_info(sig, info, p); 1123 ++count; 1124 if (err != -EPERM) 1125 retval = err; 1126 } 1127 } 1128 read_unlock(&tasklist_lock); 1129 ret = count ? retval : -ESRCH; 1130 } else if (pid < 0) { 1131 ret = kill_pgrp_info(sig, info, find_pid(-pid)); 1132 } else { 1133 ret = kill_pid_info(sig, info, find_pid(pid)); 1134 } 1135 rcu_read_unlock(); 1136 return ret; 1137 } 1138 1139 /* 1140 * These are for backward compatibility with the rest of the kernel source. 1141 */ 1142 1143 /* 1144 * These two are the most common entry points. They send a signal 1145 * just to the specific thread. 1146 */ 1147 int 1148 send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1149 { 1150 int ret; 1151 unsigned long flags; 1152 1153 /* 1154 * Make sure legacy kernel users don't send in bad values 1155 * (normal paths check this in check_kill_permission). 1156 */ 1157 if (!valid_signal(sig)) 1158 return -EINVAL; 1159 1160 /* 1161 * We need the tasklist lock even for the specific 1162 * thread case (when we don't need to follow the group 1163 * lists) in order to avoid races with "p->sighand" 1164 * going away or changing from under us. 1165 */ 1166 read_lock(&tasklist_lock); 1167 spin_lock_irqsave(&p->sighand->siglock, flags); 1168 ret = specific_send_sig_info(sig, info, p); 1169 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1170 read_unlock(&tasklist_lock); 1171 return ret; 1172 } 1173 1174 #define __si_special(priv) \ 1175 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) 1176 1177 int 1178 send_sig(int sig, struct task_struct *p, int priv) 1179 { 1180 return send_sig_info(sig, __si_special(priv), p); 1181 } 1182 1183 /* 1184 * This is the entry point for "process-wide" signals. 1185 * They will go to an appropriate thread in the thread group. 1186 */ 1187 int 1188 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1189 { 1190 int ret; 1191 read_lock(&tasklist_lock); 1192 ret = group_send_sig_info(sig, info, p); 1193 read_unlock(&tasklist_lock); 1194 return ret; 1195 } 1196 1197 void 1198 force_sig(int sig, struct task_struct *p) 1199 { 1200 force_sig_info(sig, SEND_SIG_PRIV, p); 1201 } 1202 1203 /* 1204 * When things go south during signal handling, we 1205 * will force a SIGSEGV. And if the signal that caused 1206 * the problem was already a SIGSEGV, we'll want to 1207 * make sure we don't even try to deliver the signal.. 1208 */ 1209 int 1210 force_sigsegv(int sig, struct task_struct *p) 1211 { 1212 if (sig == SIGSEGV) { 1213 unsigned long flags; 1214 spin_lock_irqsave(&p->sighand->siglock, flags); 1215 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; 1216 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1217 } 1218 force_sig(SIGSEGV, p); 1219 return 0; 1220 } 1221 1222 int kill_pgrp(struct pid *pid, int sig, int priv) 1223 { 1224 return kill_pgrp_info(sig, __si_special(priv), pid); 1225 } 1226 EXPORT_SYMBOL(kill_pgrp); 1227 1228 int kill_pid(struct pid *pid, int sig, int priv) 1229 { 1230 return kill_pid_info(sig, __si_special(priv), pid); 1231 } 1232 EXPORT_SYMBOL(kill_pid); 1233 1234 int 1235 kill_proc(pid_t pid, int sig, int priv) 1236 { 1237 return kill_proc_info(sig, __si_special(priv), pid); 1238 } 1239 1240 /* 1241 * These functions support sending signals using preallocated sigqueue 1242 * structures. This is needed "because realtime applications cannot 1243 * afford to lose notifications of asynchronous events, like timer 1244 * expirations or I/O completions". In the case of Posix Timers 1245 * we allocate the sigqueue structure from the timer_create. If this 1246 * allocation fails we are able to report the failure to the application 1247 * with an EAGAIN error. 1248 */ 1249 1250 struct sigqueue *sigqueue_alloc(void) 1251 { 1252 struct sigqueue *q; 1253 1254 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) 1255 q->flags |= SIGQUEUE_PREALLOC; 1256 return(q); 1257 } 1258 1259 void sigqueue_free(struct sigqueue *q) 1260 { 1261 unsigned long flags; 1262 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1263 /* 1264 * If the signal is still pending remove it from the 1265 * pending queue. 1266 */ 1267 if (unlikely(!list_empty(&q->list))) { 1268 spinlock_t *lock = ¤t->sighand->siglock; 1269 read_lock(&tasklist_lock); 1270 spin_lock_irqsave(lock, flags); 1271 if (!list_empty(&q->list)) 1272 list_del_init(&q->list); 1273 spin_unlock_irqrestore(lock, flags); 1274 read_unlock(&tasklist_lock); 1275 } 1276 q->flags &= ~SIGQUEUE_PREALLOC; 1277 __sigqueue_free(q); 1278 } 1279 1280 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) 1281 { 1282 unsigned long flags; 1283 int ret = 0; 1284 1285 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1286 1287 /* 1288 * The rcu based delayed sighand destroy makes it possible to 1289 * run this without tasklist lock held. The task struct itself 1290 * cannot go away as create_timer did get_task_struct(). 1291 * 1292 * We return -1, when the task is marked exiting, so 1293 * posix_timer_event can redirect it to the group leader 1294 */ 1295 rcu_read_lock(); 1296 1297 if (!likely(lock_task_sighand(p, &flags))) { 1298 ret = -1; 1299 goto out_err; 1300 } 1301 1302 if (unlikely(!list_empty(&q->list))) { 1303 /* 1304 * If an SI_TIMER entry is already queue just increment 1305 * the overrun count. 1306 */ 1307 BUG_ON(q->info.si_code != SI_TIMER); 1308 q->info.si_overrun++; 1309 goto out; 1310 } 1311 /* Short-circuit ignored signals. */ 1312 if (sig_ignored(p, sig)) { 1313 ret = 1; 1314 goto out; 1315 } 1316 /* 1317 * Deliver the signal to listening signalfds. This must be called 1318 * with the sighand lock held. 1319 */ 1320 signalfd_notify(p, sig); 1321 1322 list_add_tail(&q->list, &p->pending.list); 1323 sigaddset(&p->pending.signal, sig); 1324 if (!sigismember(&p->blocked, sig)) 1325 signal_wake_up(p, sig == SIGKILL); 1326 1327 out: 1328 unlock_task_sighand(p, &flags); 1329 out_err: 1330 rcu_read_unlock(); 1331 1332 return ret; 1333 } 1334 1335 int 1336 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) 1337 { 1338 unsigned long flags; 1339 int ret = 0; 1340 1341 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1342 1343 read_lock(&tasklist_lock); 1344 /* Since it_lock is held, p->sighand cannot be NULL. */ 1345 spin_lock_irqsave(&p->sighand->siglock, flags); 1346 handle_stop_signal(sig, p); 1347 1348 /* Short-circuit ignored signals. */ 1349 if (sig_ignored(p, sig)) { 1350 ret = 1; 1351 goto out; 1352 } 1353 1354 if (unlikely(!list_empty(&q->list))) { 1355 /* 1356 * If an SI_TIMER entry is already queue just increment 1357 * the overrun count. Other uses should not try to 1358 * send the signal multiple times. 1359 */ 1360 BUG_ON(q->info.si_code != SI_TIMER); 1361 q->info.si_overrun++; 1362 goto out; 1363 } 1364 /* 1365 * Deliver the signal to listening signalfds. This must be called 1366 * with the sighand lock held. 1367 */ 1368 signalfd_notify(p, sig); 1369 1370 /* 1371 * Put this signal on the shared-pending queue. 1372 * We always use the shared queue for process-wide signals, 1373 * to avoid several races. 1374 */ 1375 list_add_tail(&q->list, &p->signal->shared_pending.list); 1376 sigaddset(&p->signal->shared_pending.signal, sig); 1377 1378 __group_complete_signal(sig, p); 1379 out: 1380 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1381 read_unlock(&tasklist_lock); 1382 return ret; 1383 } 1384 1385 /* 1386 * Wake up any threads in the parent blocked in wait* syscalls. 1387 */ 1388 static inline void __wake_up_parent(struct task_struct *p, 1389 struct task_struct *parent) 1390 { 1391 wake_up_interruptible_sync(&parent->signal->wait_chldexit); 1392 } 1393 1394 /* 1395 * Let a parent know about the death of a child. 1396 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1397 */ 1398 1399 void do_notify_parent(struct task_struct *tsk, int sig) 1400 { 1401 struct siginfo info; 1402 unsigned long flags; 1403 struct sighand_struct *psig; 1404 1405 BUG_ON(sig == -1); 1406 1407 /* do_notify_parent_cldstop should have been called instead. */ 1408 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED)); 1409 1410 BUG_ON(!tsk->ptrace && 1411 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 1412 1413 info.si_signo = sig; 1414 info.si_errno = 0; 1415 info.si_pid = tsk->pid; 1416 info.si_uid = tsk->uid; 1417 1418 /* FIXME: find out whether or not this is supposed to be c*time. */ 1419 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime, 1420 tsk->signal->utime)); 1421 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime, 1422 tsk->signal->stime)); 1423 1424 info.si_status = tsk->exit_code & 0x7f; 1425 if (tsk->exit_code & 0x80) 1426 info.si_code = CLD_DUMPED; 1427 else if (tsk->exit_code & 0x7f) 1428 info.si_code = CLD_KILLED; 1429 else { 1430 info.si_code = CLD_EXITED; 1431 info.si_status = tsk->exit_code >> 8; 1432 } 1433 1434 psig = tsk->parent->sighand; 1435 spin_lock_irqsave(&psig->siglock, flags); 1436 if (!tsk->ptrace && sig == SIGCHLD && 1437 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || 1438 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { 1439 /* 1440 * We are exiting and our parent doesn't care. POSIX.1 1441 * defines special semantics for setting SIGCHLD to SIG_IGN 1442 * or setting the SA_NOCLDWAIT flag: we should be reaped 1443 * automatically and not left for our parent's wait4 call. 1444 * Rather than having the parent do it as a magic kind of 1445 * signal handler, we just set this to tell do_exit that we 1446 * can be cleaned up without becoming a zombie. Note that 1447 * we still call __wake_up_parent in this case, because a 1448 * blocked sys_wait4 might now return -ECHILD. 1449 * 1450 * Whether we send SIGCHLD or not for SA_NOCLDWAIT 1451 * is implementation-defined: we do (if you don't want 1452 * it, just use SIG_IGN instead). 1453 */ 1454 tsk->exit_signal = -1; 1455 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1456 sig = 0; 1457 } 1458 if (valid_signal(sig) && sig > 0) 1459 __group_send_sig_info(sig, &info, tsk->parent); 1460 __wake_up_parent(tsk, tsk->parent); 1461 spin_unlock_irqrestore(&psig->siglock, flags); 1462 } 1463 1464 static void do_notify_parent_cldstop(struct task_struct *tsk, int why) 1465 { 1466 struct siginfo info; 1467 unsigned long flags; 1468 struct task_struct *parent; 1469 struct sighand_struct *sighand; 1470 1471 if (tsk->ptrace & PT_PTRACED) 1472 parent = tsk->parent; 1473 else { 1474 tsk = tsk->group_leader; 1475 parent = tsk->real_parent; 1476 } 1477 1478 info.si_signo = SIGCHLD; 1479 info.si_errno = 0; 1480 info.si_pid = tsk->pid; 1481 info.si_uid = tsk->uid; 1482 1483 /* FIXME: find out whether or not this is supposed to be c*time. */ 1484 info.si_utime = cputime_to_jiffies(tsk->utime); 1485 info.si_stime = cputime_to_jiffies(tsk->stime); 1486 1487 info.si_code = why; 1488 switch (why) { 1489 case CLD_CONTINUED: 1490 info.si_status = SIGCONT; 1491 break; 1492 case CLD_STOPPED: 1493 info.si_status = tsk->signal->group_exit_code & 0x7f; 1494 break; 1495 case CLD_TRAPPED: 1496 info.si_status = tsk->exit_code & 0x7f; 1497 break; 1498 default: 1499 BUG(); 1500 } 1501 1502 sighand = parent->sighand; 1503 spin_lock_irqsave(&sighand->siglock, flags); 1504 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && 1505 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) 1506 __group_send_sig_info(SIGCHLD, &info, parent); 1507 /* 1508 * Even if SIGCHLD is not generated, we must wake up wait4 calls. 1509 */ 1510 __wake_up_parent(tsk, parent); 1511 spin_unlock_irqrestore(&sighand->siglock, flags); 1512 } 1513 1514 static inline int may_ptrace_stop(void) 1515 { 1516 if (!likely(current->ptrace & PT_PTRACED)) 1517 return 0; 1518 1519 if (unlikely(current->parent == current->real_parent && 1520 (current->ptrace & PT_ATTACHED))) 1521 return 0; 1522 1523 if (unlikely(current->signal == current->parent->signal) && 1524 unlikely(current->signal->flags & SIGNAL_GROUP_EXIT)) 1525 return 0; 1526 1527 /* 1528 * Are we in the middle of do_coredump? 1529 * If so and our tracer is also part of the coredump stopping 1530 * is a deadlock situation, and pointless because our tracer 1531 * is dead so don't allow us to stop. 1532 * If SIGKILL was already sent before the caller unlocked 1533 * ->siglock we must see ->core_waiters != 0. Otherwise it 1534 * is safe to enter schedule(). 1535 */ 1536 if (unlikely(current->mm->core_waiters) && 1537 unlikely(current->mm == current->parent->mm)) 1538 return 0; 1539 1540 return 1; 1541 } 1542 1543 /* 1544 * This must be called with current->sighand->siglock held. 1545 * 1546 * This should be the path for all ptrace stops. 1547 * We always set current->last_siginfo while stopped here. 1548 * That makes it a way to test a stopped process for 1549 * being ptrace-stopped vs being job-control-stopped. 1550 * 1551 * If we actually decide not to stop at all because the tracer is gone, 1552 * we leave nostop_code in current->exit_code. 1553 */ 1554 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) 1555 { 1556 /* 1557 * If there is a group stop in progress, 1558 * we must participate in the bookkeeping. 1559 */ 1560 if (current->signal->group_stop_count > 0) 1561 --current->signal->group_stop_count; 1562 1563 current->last_siginfo = info; 1564 current->exit_code = exit_code; 1565 1566 /* Let the debugger run. */ 1567 set_current_state(TASK_TRACED); 1568 spin_unlock_irq(¤t->sighand->siglock); 1569 try_to_freeze(); 1570 read_lock(&tasklist_lock); 1571 if (may_ptrace_stop()) { 1572 do_notify_parent_cldstop(current, CLD_TRAPPED); 1573 read_unlock(&tasklist_lock); 1574 schedule(); 1575 } else { 1576 /* 1577 * By the time we got the lock, our tracer went away. 1578 * Don't stop here. 1579 */ 1580 read_unlock(&tasklist_lock); 1581 set_current_state(TASK_RUNNING); 1582 current->exit_code = nostop_code; 1583 } 1584 1585 /* 1586 * We are back. Now reacquire the siglock before touching 1587 * last_siginfo, so that we are sure to have synchronized with 1588 * any signal-sending on another CPU that wants to examine it. 1589 */ 1590 spin_lock_irq(¤t->sighand->siglock); 1591 current->last_siginfo = NULL; 1592 1593 /* 1594 * Queued signals ignored us while we were stopped for tracing. 1595 * So check for any that we should take before resuming user mode. 1596 * This sets TIF_SIGPENDING, but never clears it. 1597 */ 1598 recalc_sigpending_tsk(current); 1599 } 1600 1601 void ptrace_notify(int exit_code) 1602 { 1603 siginfo_t info; 1604 1605 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); 1606 1607 memset(&info, 0, sizeof info); 1608 info.si_signo = SIGTRAP; 1609 info.si_code = exit_code; 1610 info.si_pid = current->pid; 1611 info.si_uid = current->uid; 1612 1613 /* Let the debugger run. */ 1614 spin_lock_irq(¤t->sighand->siglock); 1615 ptrace_stop(exit_code, 0, &info); 1616 spin_unlock_irq(¤t->sighand->siglock); 1617 } 1618 1619 static void 1620 finish_stop(int stop_count) 1621 { 1622 /* 1623 * If there are no other threads in the group, or if there is 1624 * a group stop in progress and we are the last to stop, 1625 * report to the parent. When ptraced, every thread reports itself. 1626 */ 1627 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { 1628 read_lock(&tasklist_lock); 1629 do_notify_parent_cldstop(current, CLD_STOPPED); 1630 read_unlock(&tasklist_lock); 1631 } 1632 1633 do { 1634 schedule(); 1635 } while (try_to_freeze()); 1636 /* 1637 * Now we don't run again until continued. 1638 */ 1639 current->exit_code = 0; 1640 } 1641 1642 /* 1643 * This performs the stopping for SIGSTOP and other stop signals. 1644 * We have to stop all threads in the thread group. 1645 * Returns nonzero if we've actually stopped and released the siglock. 1646 * Returns zero if we didn't stop and still hold the siglock. 1647 */ 1648 static int do_signal_stop(int signr) 1649 { 1650 struct signal_struct *sig = current->signal; 1651 int stop_count; 1652 1653 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) 1654 return 0; 1655 1656 if (sig->group_stop_count > 0) { 1657 /* 1658 * There is a group stop in progress. We don't need to 1659 * start another one. 1660 */ 1661 stop_count = --sig->group_stop_count; 1662 } else { 1663 /* 1664 * There is no group stop already in progress. 1665 * We must initiate one now. 1666 */ 1667 struct task_struct *t; 1668 1669 sig->group_exit_code = signr; 1670 1671 stop_count = 0; 1672 for (t = next_thread(current); t != current; t = next_thread(t)) 1673 /* 1674 * Setting state to TASK_STOPPED for a group 1675 * stop is always done with the siglock held, 1676 * so this check has no races. 1677 */ 1678 if (!t->exit_state && 1679 !(t->state & (TASK_STOPPED|TASK_TRACED))) { 1680 stop_count++; 1681 signal_wake_up(t, 0); 1682 } 1683 sig->group_stop_count = stop_count; 1684 } 1685 1686 if (stop_count == 0) 1687 sig->flags = SIGNAL_STOP_STOPPED; 1688 current->exit_code = sig->group_exit_code; 1689 __set_current_state(TASK_STOPPED); 1690 1691 spin_unlock_irq(¤t->sighand->siglock); 1692 finish_stop(stop_count); 1693 return 1; 1694 } 1695 1696 /* 1697 * Do appropriate magic when group_stop_count > 0. 1698 * We return nonzero if we stopped, after releasing the siglock. 1699 * We return zero if we still hold the siglock and should look 1700 * for another signal without checking group_stop_count again. 1701 */ 1702 static int handle_group_stop(void) 1703 { 1704 int stop_count; 1705 1706 if (current->signal->group_exit_task == current) { 1707 /* 1708 * Group stop is so we can do a core dump, 1709 * We are the initiating thread, so get on with it. 1710 */ 1711 current->signal->group_exit_task = NULL; 1712 return 0; 1713 } 1714 1715 if (current->signal->flags & SIGNAL_GROUP_EXIT) 1716 /* 1717 * Group stop is so another thread can do a core dump, 1718 * or else we are racing against a death signal. 1719 * Just punt the stop so we can get the next signal. 1720 */ 1721 return 0; 1722 1723 /* 1724 * There is a group stop in progress. We stop 1725 * without any associated signal being in our queue. 1726 */ 1727 stop_count = --current->signal->group_stop_count; 1728 if (stop_count == 0) 1729 current->signal->flags = SIGNAL_STOP_STOPPED; 1730 current->exit_code = current->signal->group_exit_code; 1731 set_current_state(TASK_STOPPED); 1732 spin_unlock_irq(¤t->sighand->siglock); 1733 finish_stop(stop_count); 1734 return 1; 1735 } 1736 1737 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, 1738 struct pt_regs *regs, void *cookie) 1739 { 1740 sigset_t *mask = ¤t->blocked; 1741 int signr = 0; 1742 1743 try_to_freeze(); 1744 1745 relock: 1746 spin_lock_irq(¤t->sighand->siglock); 1747 for (;;) { 1748 struct k_sigaction *ka; 1749 1750 if (unlikely(current->signal->group_stop_count > 0) && 1751 handle_group_stop()) 1752 goto relock; 1753 1754 signr = dequeue_signal(current, mask, info); 1755 1756 if (!signr) 1757 break; /* will return 0 */ 1758 1759 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { 1760 ptrace_signal_deliver(regs, cookie); 1761 1762 /* Let the debugger run. */ 1763 ptrace_stop(signr, signr, info); 1764 1765 /* We're back. Did the debugger cancel the sig? */ 1766 signr = current->exit_code; 1767 if (signr == 0) 1768 continue; 1769 1770 current->exit_code = 0; 1771 1772 /* Update the siginfo structure if the signal has 1773 changed. If the debugger wanted something 1774 specific in the siginfo structure then it should 1775 have updated *info via PTRACE_SETSIGINFO. */ 1776 if (signr != info->si_signo) { 1777 info->si_signo = signr; 1778 info->si_errno = 0; 1779 info->si_code = SI_USER; 1780 info->si_pid = current->parent->pid; 1781 info->si_uid = current->parent->uid; 1782 } 1783 1784 /* If the (new) signal is now blocked, requeue it. */ 1785 if (sigismember(¤t->blocked, signr)) { 1786 specific_send_sig_info(signr, info, current); 1787 continue; 1788 } 1789 } 1790 1791 ka = ¤t->sighand->action[signr-1]; 1792 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 1793 continue; 1794 if (ka->sa.sa_handler != SIG_DFL) { 1795 /* Run the handler. */ 1796 *return_ka = *ka; 1797 1798 if (ka->sa.sa_flags & SA_ONESHOT) 1799 ka->sa.sa_handler = SIG_DFL; 1800 1801 break; /* will return non-zero "signr" value */ 1802 } 1803 1804 /* 1805 * Now we are doing the default action for this signal. 1806 */ 1807 if (sig_kernel_ignore(signr)) /* Default is nothing. */ 1808 continue; 1809 1810 /* 1811 * Init of a pid space gets no signals it doesn't want from 1812 * within that pid space. It can of course get signals from 1813 * its parent pid space. 1814 */ 1815 if (current == child_reaper(current)) 1816 continue; 1817 1818 if (sig_kernel_stop(signr)) { 1819 /* 1820 * The default action is to stop all threads in 1821 * the thread group. The job control signals 1822 * do nothing in an orphaned pgrp, but SIGSTOP 1823 * always works. Note that siglock needs to be 1824 * dropped during the call to is_orphaned_pgrp() 1825 * because of lock ordering with tasklist_lock. 1826 * This allows an intervening SIGCONT to be posted. 1827 * We need to check for that and bail out if necessary. 1828 */ 1829 if (signr != SIGSTOP) { 1830 spin_unlock_irq(¤t->sighand->siglock); 1831 1832 /* signals can be posted during this window */ 1833 1834 if (is_current_pgrp_orphaned()) 1835 goto relock; 1836 1837 spin_lock_irq(¤t->sighand->siglock); 1838 } 1839 1840 if (likely(do_signal_stop(signr))) { 1841 /* It released the siglock. */ 1842 goto relock; 1843 } 1844 1845 /* 1846 * We didn't actually stop, due to a race 1847 * with SIGCONT or something like that. 1848 */ 1849 continue; 1850 } 1851 1852 spin_unlock_irq(¤t->sighand->siglock); 1853 1854 /* 1855 * Anything else is fatal, maybe with a core dump. 1856 */ 1857 current->flags |= PF_SIGNALED; 1858 if (sig_kernel_coredump(signr)) { 1859 /* 1860 * If it was able to dump core, this kills all 1861 * other threads in the group and synchronizes with 1862 * their demise. If we lost the race with another 1863 * thread getting here, it set group_exit_code 1864 * first and our do_group_exit call below will use 1865 * that value and ignore the one we pass it. 1866 */ 1867 do_coredump((long)signr, signr, regs); 1868 } 1869 1870 /* 1871 * Death signals, no core dump. 1872 */ 1873 do_group_exit(signr); 1874 /* NOTREACHED */ 1875 } 1876 spin_unlock_irq(¤t->sighand->siglock); 1877 return signr; 1878 } 1879 1880 EXPORT_SYMBOL(recalc_sigpending); 1881 EXPORT_SYMBOL_GPL(dequeue_signal); 1882 EXPORT_SYMBOL(flush_signals); 1883 EXPORT_SYMBOL(force_sig); 1884 EXPORT_SYMBOL(kill_proc); 1885 EXPORT_SYMBOL(ptrace_notify); 1886 EXPORT_SYMBOL(send_sig); 1887 EXPORT_SYMBOL(send_sig_info); 1888 EXPORT_SYMBOL(sigprocmask); 1889 EXPORT_SYMBOL(block_all_signals); 1890 EXPORT_SYMBOL(unblock_all_signals); 1891 1892 1893 /* 1894 * System call entry points. 1895 */ 1896 1897 asmlinkage long sys_restart_syscall(void) 1898 { 1899 struct restart_block *restart = ¤t_thread_info()->restart_block; 1900 return restart->fn(restart); 1901 } 1902 1903 long do_no_restart_syscall(struct restart_block *param) 1904 { 1905 return -EINTR; 1906 } 1907 1908 /* 1909 * We don't need to get the kernel lock - this is all local to this 1910 * particular thread.. (and that's good, because this is _heavily_ 1911 * used by various programs) 1912 */ 1913 1914 /* 1915 * This is also useful for kernel threads that want to temporarily 1916 * (or permanently) block certain signals. 1917 * 1918 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel 1919 * interface happily blocks "unblockable" signals like SIGKILL 1920 * and friends. 1921 */ 1922 int sigprocmask(int how, sigset_t *set, sigset_t *oldset) 1923 { 1924 int error; 1925 1926 spin_lock_irq(¤t->sighand->siglock); 1927 if (oldset) 1928 *oldset = current->blocked; 1929 1930 error = 0; 1931 switch (how) { 1932 case SIG_BLOCK: 1933 sigorsets(¤t->blocked, ¤t->blocked, set); 1934 break; 1935 case SIG_UNBLOCK: 1936 signandsets(¤t->blocked, ¤t->blocked, set); 1937 break; 1938 case SIG_SETMASK: 1939 current->blocked = *set; 1940 break; 1941 default: 1942 error = -EINVAL; 1943 } 1944 recalc_sigpending(); 1945 spin_unlock_irq(¤t->sighand->siglock); 1946 1947 return error; 1948 } 1949 1950 asmlinkage long 1951 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize) 1952 { 1953 int error = -EINVAL; 1954 sigset_t old_set, new_set; 1955 1956 /* XXX: Don't preclude handling different sized sigset_t's. */ 1957 if (sigsetsize != sizeof(sigset_t)) 1958 goto out; 1959 1960 if (set) { 1961 error = -EFAULT; 1962 if (copy_from_user(&new_set, set, sizeof(*set))) 1963 goto out; 1964 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1965 1966 error = sigprocmask(how, &new_set, &old_set); 1967 if (error) 1968 goto out; 1969 if (oset) 1970 goto set_old; 1971 } else if (oset) { 1972 spin_lock_irq(¤t->sighand->siglock); 1973 old_set = current->blocked; 1974 spin_unlock_irq(¤t->sighand->siglock); 1975 1976 set_old: 1977 error = -EFAULT; 1978 if (copy_to_user(oset, &old_set, sizeof(*oset))) 1979 goto out; 1980 } 1981 error = 0; 1982 out: 1983 return error; 1984 } 1985 1986 long do_sigpending(void __user *set, unsigned long sigsetsize) 1987 { 1988 long error = -EINVAL; 1989 sigset_t pending; 1990 1991 if (sigsetsize > sizeof(sigset_t)) 1992 goto out; 1993 1994 spin_lock_irq(¤t->sighand->siglock); 1995 sigorsets(&pending, ¤t->pending.signal, 1996 ¤t->signal->shared_pending.signal); 1997 spin_unlock_irq(¤t->sighand->siglock); 1998 1999 /* Outside the lock because only this thread touches it. */ 2000 sigandsets(&pending, ¤t->blocked, &pending); 2001 2002 error = -EFAULT; 2003 if (!copy_to_user(set, &pending, sigsetsize)) 2004 error = 0; 2005 2006 out: 2007 return error; 2008 } 2009 2010 asmlinkage long 2011 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize) 2012 { 2013 return do_sigpending(set, sigsetsize); 2014 } 2015 2016 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER 2017 2018 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) 2019 { 2020 int err; 2021 2022 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) 2023 return -EFAULT; 2024 if (from->si_code < 0) 2025 return __copy_to_user(to, from, sizeof(siginfo_t)) 2026 ? -EFAULT : 0; 2027 /* 2028 * If you change siginfo_t structure, please be sure 2029 * this code is fixed accordingly. 2030 * Please remember to update the signalfd_copyinfo() function 2031 * inside fs/signalfd.c too, in case siginfo_t changes. 2032 * It should never copy any pad contained in the structure 2033 * to avoid security leaks, but must copy the generic 2034 * 3 ints plus the relevant union member. 2035 */ 2036 err = __put_user(from->si_signo, &to->si_signo); 2037 err |= __put_user(from->si_errno, &to->si_errno); 2038 err |= __put_user((short)from->si_code, &to->si_code); 2039 switch (from->si_code & __SI_MASK) { 2040 case __SI_KILL: 2041 err |= __put_user(from->si_pid, &to->si_pid); 2042 err |= __put_user(from->si_uid, &to->si_uid); 2043 break; 2044 case __SI_TIMER: 2045 err |= __put_user(from->si_tid, &to->si_tid); 2046 err |= __put_user(from->si_overrun, &to->si_overrun); 2047 err |= __put_user(from->si_ptr, &to->si_ptr); 2048 break; 2049 case __SI_POLL: 2050 err |= __put_user(from->si_band, &to->si_band); 2051 err |= __put_user(from->si_fd, &to->si_fd); 2052 break; 2053 case __SI_FAULT: 2054 err |= __put_user(from->si_addr, &to->si_addr); 2055 #ifdef __ARCH_SI_TRAPNO 2056 err |= __put_user(from->si_trapno, &to->si_trapno); 2057 #endif 2058 break; 2059 case __SI_CHLD: 2060 err |= __put_user(from->si_pid, &to->si_pid); 2061 err |= __put_user(from->si_uid, &to->si_uid); 2062 err |= __put_user(from->si_status, &to->si_status); 2063 err |= __put_user(from->si_utime, &to->si_utime); 2064 err |= __put_user(from->si_stime, &to->si_stime); 2065 break; 2066 case __SI_RT: /* This is not generated by the kernel as of now. */ 2067 case __SI_MESGQ: /* But this is */ 2068 err |= __put_user(from->si_pid, &to->si_pid); 2069 err |= __put_user(from->si_uid, &to->si_uid); 2070 err |= __put_user(from->si_ptr, &to->si_ptr); 2071 break; 2072 default: /* this is just in case for now ... */ 2073 err |= __put_user(from->si_pid, &to->si_pid); 2074 err |= __put_user(from->si_uid, &to->si_uid); 2075 break; 2076 } 2077 return err; 2078 } 2079 2080 #endif 2081 2082 asmlinkage long 2083 sys_rt_sigtimedwait(const sigset_t __user *uthese, 2084 siginfo_t __user *uinfo, 2085 const struct timespec __user *uts, 2086 size_t sigsetsize) 2087 { 2088 int ret, sig; 2089 sigset_t these; 2090 struct timespec ts; 2091 siginfo_t info; 2092 long timeout = 0; 2093 2094 /* XXX: Don't preclude handling different sized sigset_t's. */ 2095 if (sigsetsize != sizeof(sigset_t)) 2096 return -EINVAL; 2097 2098 if (copy_from_user(&these, uthese, sizeof(these))) 2099 return -EFAULT; 2100 2101 /* 2102 * Invert the set of allowed signals to get those we 2103 * want to block. 2104 */ 2105 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2106 signotset(&these); 2107 2108 if (uts) { 2109 if (copy_from_user(&ts, uts, sizeof(ts))) 2110 return -EFAULT; 2111 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 2112 || ts.tv_sec < 0) 2113 return -EINVAL; 2114 } 2115 2116 spin_lock_irq(¤t->sighand->siglock); 2117 sig = dequeue_signal(current, &these, &info); 2118 if (!sig) { 2119 timeout = MAX_SCHEDULE_TIMEOUT; 2120 if (uts) 2121 timeout = (timespec_to_jiffies(&ts) 2122 + (ts.tv_sec || ts.tv_nsec)); 2123 2124 if (timeout) { 2125 /* None ready -- temporarily unblock those we're 2126 * interested while we are sleeping in so that we'll 2127 * be awakened when they arrive. */ 2128 current->real_blocked = current->blocked; 2129 sigandsets(¤t->blocked, ¤t->blocked, &these); 2130 recalc_sigpending(); 2131 spin_unlock_irq(¤t->sighand->siglock); 2132 2133 timeout = schedule_timeout_interruptible(timeout); 2134 2135 spin_lock_irq(¤t->sighand->siglock); 2136 sig = dequeue_signal(current, &these, &info); 2137 current->blocked = current->real_blocked; 2138 siginitset(¤t->real_blocked, 0); 2139 recalc_sigpending(); 2140 } 2141 } 2142 spin_unlock_irq(¤t->sighand->siglock); 2143 2144 if (sig) { 2145 ret = sig; 2146 if (uinfo) { 2147 if (copy_siginfo_to_user(uinfo, &info)) 2148 ret = -EFAULT; 2149 } 2150 } else { 2151 ret = -EAGAIN; 2152 if (timeout) 2153 ret = -EINTR; 2154 } 2155 2156 return ret; 2157 } 2158 2159 asmlinkage long 2160 sys_kill(int pid, int sig) 2161 { 2162 struct siginfo info; 2163 2164 info.si_signo = sig; 2165 info.si_errno = 0; 2166 info.si_code = SI_USER; 2167 info.si_pid = current->tgid; 2168 info.si_uid = current->uid; 2169 2170 return kill_something_info(sig, &info, pid); 2171 } 2172 2173 static int do_tkill(int tgid, int pid, int sig) 2174 { 2175 int error; 2176 struct siginfo info; 2177 struct task_struct *p; 2178 2179 error = -ESRCH; 2180 info.si_signo = sig; 2181 info.si_errno = 0; 2182 info.si_code = SI_TKILL; 2183 info.si_pid = current->tgid; 2184 info.si_uid = current->uid; 2185 2186 read_lock(&tasklist_lock); 2187 p = find_task_by_pid(pid); 2188 if (p && (tgid <= 0 || p->tgid == tgid)) { 2189 error = check_kill_permission(sig, &info, p); 2190 /* 2191 * The null signal is a permissions and process existence 2192 * probe. No signal is actually delivered. 2193 */ 2194 if (!error && sig && p->sighand) { 2195 spin_lock_irq(&p->sighand->siglock); 2196 handle_stop_signal(sig, p); 2197 error = specific_send_sig_info(sig, &info, p); 2198 spin_unlock_irq(&p->sighand->siglock); 2199 } 2200 } 2201 read_unlock(&tasklist_lock); 2202 2203 return error; 2204 } 2205 2206 /** 2207 * sys_tgkill - send signal to one specific thread 2208 * @tgid: the thread group ID of the thread 2209 * @pid: the PID of the thread 2210 * @sig: signal to be sent 2211 * 2212 * This syscall also checks the @tgid and returns -ESRCH even if the PID 2213 * exists but it's not belonging to the target process anymore. This 2214 * method solves the problem of threads exiting and PIDs getting reused. 2215 */ 2216 asmlinkage long sys_tgkill(int tgid, int pid, int sig) 2217 { 2218 /* This is only valid for single tasks */ 2219 if (pid <= 0 || tgid <= 0) 2220 return -EINVAL; 2221 2222 return do_tkill(tgid, pid, sig); 2223 } 2224 2225 /* 2226 * Send a signal to only one task, even if it's a CLONE_THREAD task. 2227 */ 2228 asmlinkage long 2229 sys_tkill(int pid, int sig) 2230 { 2231 /* This is only valid for single tasks */ 2232 if (pid <= 0) 2233 return -EINVAL; 2234 2235 return do_tkill(0, pid, sig); 2236 } 2237 2238 asmlinkage long 2239 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) 2240 { 2241 siginfo_t info; 2242 2243 if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) 2244 return -EFAULT; 2245 2246 /* Not even root can pretend to send signals from the kernel. 2247 Nor can they impersonate a kill(), which adds source info. */ 2248 if (info.si_code >= 0) 2249 return -EPERM; 2250 info.si_signo = sig; 2251 2252 /* POSIX.1b doesn't mention process groups. */ 2253 return kill_proc_info(sig, &info, pid); 2254 } 2255 2256 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) 2257 { 2258 struct k_sigaction *k; 2259 sigset_t mask; 2260 2261 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) 2262 return -EINVAL; 2263 2264 k = ¤t->sighand->action[sig-1]; 2265 2266 spin_lock_irq(¤t->sighand->siglock); 2267 if (signal_pending(current)) { 2268 /* 2269 * If there might be a fatal signal pending on multiple 2270 * threads, make sure we take it before changing the action. 2271 */ 2272 spin_unlock_irq(¤t->sighand->siglock); 2273 return -ERESTARTNOINTR; 2274 } 2275 2276 if (oact) 2277 *oact = *k; 2278 2279 if (act) { 2280 sigdelsetmask(&act->sa.sa_mask, 2281 sigmask(SIGKILL) | sigmask(SIGSTOP)); 2282 *k = *act; 2283 /* 2284 * POSIX 3.3.1.3: 2285 * "Setting a signal action to SIG_IGN for a signal that is 2286 * pending shall cause the pending signal to be discarded, 2287 * whether or not it is blocked." 2288 * 2289 * "Setting a signal action to SIG_DFL for a signal that is 2290 * pending and whose default action is to ignore the signal 2291 * (for example, SIGCHLD), shall cause the pending signal to 2292 * be discarded, whether or not it is blocked" 2293 */ 2294 if (act->sa.sa_handler == SIG_IGN || 2295 (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) { 2296 struct task_struct *t = current; 2297 sigemptyset(&mask); 2298 sigaddset(&mask, sig); 2299 rm_from_queue_full(&mask, &t->signal->shared_pending); 2300 do { 2301 rm_from_queue_full(&mask, &t->pending); 2302 recalc_sigpending_and_wake(t); 2303 t = next_thread(t); 2304 } while (t != current); 2305 } 2306 } 2307 2308 spin_unlock_irq(¤t->sighand->siglock); 2309 return 0; 2310 } 2311 2312 int 2313 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) 2314 { 2315 stack_t oss; 2316 int error; 2317 2318 if (uoss) { 2319 oss.ss_sp = (void __user *) current->sas_ss_sp; 2320 oss.ss_size = current->sas_ss_size; 2321 oss.ss_flags = sas_ss_flags(sp); 2322 } 2323 2324 if (uss) { 2325 void __user *ss_sp; 2326 size_t ss_size; 2327 int ss_flags; 2328 2329 error = -EFAULT; 2330 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) 2331 || __get_user(ss_sp, &uss->ss_sp) 2332 || __get_user(ss_flags, &uss->ss_flags) 2333 || __get_user(ss_size, &uss->ss_size)) 2334 goto out; 2335 2336 error = -EPERM; 2337 if (on_sig_stack(sp)) 2338 goto out; 2339 2340 error = -EINVAL; 2341 /* 2342 * 2343 * Note - this code used to test ss_flags incorrectly 2344 * old code may have been written using ss_flags==0 2345 * to mean ss_flags==SS_ONSTACK (as this was the only 2346 * way that worked) - this fix preserves that older 2347 * mechanism 2348 */ 2349 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) 2350 goto out; 2351 2352 if (ss_flags == SS_DISABLE) { 2353 ss_size = 0; 2354 ss_sp = NULL; 2355 } else { 2356 error = -ENOMEM; 2357 if (ss_size < MINSIGSTKSZ) 2358 goto out; 2359 } 2360 2361 current->sas_ss_sp = (unsigned long) ss_sp; 2362 current->sas_ss_size = ss_size; 2363 } 2364 2365 if (uoss) { 2366 error = -EFAULT; 2367 if (copy_to_user(uoss, &oss, sizeof(oss))) 2368 goto out; 2369 } 2370 2371 error = 0; 2372 out: 2373 return error; 2374 } 2375 2376 #ifdef __ARCH_WANT_SYS_SIGPENDING 2377 2378 asmlinkage long 2379 sys_sigpending(old_sigset_t __user *set) 2380 { 2381 return do_sigpending(set, sizeof(*set)); 2382 } 2383 2384 #endif 2385 2386 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 2387 /* Some platforms have their own version with special arguments others 2388 support only sys_rt_sigprocmask. */ 2389 2390 asmlinkage long 2391 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset) 2392 { 2393 int error; 2394 old_sigset_t old_set, new_set; 2395 2396 if (set) { 2397 error = -EFAULT; 2398 if (copy_from_user(&new_set, set, sizeof(*set))) 2399 goto out; 2400 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); 2401 2402 spin_lock_irq(¤t->sighand->siglock); 2403 old_set = current->blocked.sig[0]; 2404 2405 error = 0; 2406 switch (how) { 2407 default: 2408 error = -EINVAL; 2409 break; 2410 case SIG_BLOCK: 2411 sigaddsetmask(¤t->blocked, new_set); 2412 break; 2413 case SIG_UNBLOCK: 2414 sigdelsetmask(¤t->blocked, new_set); 2415 break; 2416 case SIG_SETMASK: 2417 current->blocked.sig[0] = new_set; 2418 break; 2419 } 2420 2421 recalc_sigpending(); 2422 spin_unlock_irq(¤t->sighand->siglock); 2423 if (error) 2424 goto out; 2425 if (oset) 2426 goto set_old; 2427 } else if (oset) { 2428 old_set = current->blocked.sig[0]; 2429 set_old: 2430 error = -EFAULT; 2431 if (copy_to_user(oset, &old_set, sizeof(*oset))) 2432 goto out; 2433 } 2434 error = 0; 2435 out: 2436 return error; 2437 } 2438 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 2439 2440 #ifdef __ARCH_WANT_SYS_RT_SIGACTION 2441 asmlinkage long 2442 sys_rt_sigaction(int sig, 2443 const struct sigaction __user *act, 2444 struct sigaction __user *oact, 2445 size_t sigsetsize) 2446 { 2447 struct k_sigaction new_sa, old_sa; 2448 int ret = -EINVAL; 2449 2450 /* XXX: Don't preclude handling different sized sigset_t's. */ 2451 if (sigsetsize != sizeof(sigset_t)) 2452 goto out; 2453 2454 if (act) { 2455 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) 2456 return -EFAULT; 2457 } 2458 2459 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); 2460 2461 if (!ret && oact) { 2462 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) 2463 return -EFAULT; 2464 } 2465 out: 2466 return ret; 2467 } 2468 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ 2469 2470 #ifdef __ARCH_WANT_SYS_SGETMASK 2471 2472 /* 2473 * For backwards compatibility. Functionality superseded by sigprocmask. 2474 */ 2475 asmlinkage long 2476 sys_sgetmask(void) 2477 { 2478 /* SMP safe */ 2479 return current->blocked.sig[0]; 2480 } 2481 2482 asmlinkage long 2483 sys_ssetmask(int newmask) 2484 { 2485 int old; 2486 2487 spin_lock_irq(¤t->sighand->siglock); 2488 old = current->blocked.sig[0]; 2489 2490 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)| 2491 sigmask(SIGSTOP))); 2492 recalc_sigpending(); 2493 spin_unlock_irq(¤t->sighand->siglock); 2494 2495 return old; 2496 } 2497 #endif /* __ARCH_WANT_SGETMASK */ 2498 2499 #ifdef __ARCH_WANT_SYS_SIGNAL 2500 /* 2501 * For backwards compatibility. Functionality superseded by sigaction. 2502 */ 2503 asmlinkage unsigned long 2504 sys_signal(int sig, __sighandler_t handler) 2505 { 2506 struct k_sigaction new_sa, old_sa; 2507 int ret; 2508 2509 new_sa.sa.sa_handler = handler; 2510 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; 2511 sigemptyset(&new_sa.sa.sa_mask); 2512 2513 ret = do_sigaction(sig, &new_sa, &old_sa); 2514 2515 return ret ? ret : (unsigned long)old_sa.sa.sa_handler; 2516 } 2517 #endif /* __ARCH_WANT_SYS_SIGNAL */ 2518 2519 #ifdef __ARCH_WANT_SYS_PAUSE 2520 2521 asmlinkage long 2522 sys_pause(void) 2523 { 2524 current->state = TASK_INTERRUPTIBLE; 2525 schedule(); 2526 return -ERESTARTNOHAND; 2527 } 2528 2529 #endif 2530 2531 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND 2532 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) 2533 { 2534 sigset_t newset; 2535 2536 /* XXX: Don't preclude handling different sized sigset_t's. */ 2537 if (sigsetsize != sizeof(sigset_t)) 2538 return -EINVAL; 2539 2540 if (copy_from_user(&newset, unewset, sizeof(newset))) 2541 return -EFAULT; 2542 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2543 2544 spin_lock_irq(¤t->sighand->siglock); 2545 current->saved_sigmask = current->blocked; 2546 current->blocked = newset; 2547 recalc_sigpending(); 2548 spin_unlock_irq(¤t->sighand->siglock); 2549 2550 current->state = TASK_INTERRUPTIBLE; 2551 schedule(); 2552 set_thread_flag(TIF_RESTORE_SIGMASK); 2553 return -ERESTARTNOHAND; 2554 } 2555 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ 2556 2557 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) 2558 { 2559 return NULL; 2560 } 2561 2562 void __init signals_init(void) 2563 { 2564 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); 2565 } 2566