1 /* 2 * linux/kernel/signal.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson 7 * 8 * 2003-06-02 Jim Houston - Concurrent Computer Corp. 9 * Changes to use preallocated sigqueue structures 10 * to allow signals to be sent reliably. 11 */ 12 13 #include <linux/slab.h> 14 #include <linux/module.h> 15 #include <linux/smp_lock.h> 16 #include <linux/init.h> 17 #include <linux/sched.h> 18 #include <linux/fs.h> 19 #include <linux/tty.h> 20 #include <linux/binfmts.h> 21 #include <linux/security.h> 22 #include <linux/syscalls.h> 23 #include <linux/ptrace.h> 24 #include <linux/signal.h> 25 #include <linux/capability.h> 26 #include <linux/freezer.h> 27 #include <linux/pid_namespace.h> 28 #include <linux/nsproxy.h> 29 30 #include <asm/param.h> 31 #include <asm/uaccess.h> 32 #include <asm/unistd.h> 33 #include <asm/siginfo.h> 34 #include "audit.h" /* audit_signal_info() */ 35 36 /* 37 * SLAB caches for signal bits. 38 */ 39 40 static struct kmem_cache *sigqueue_cachep; 41 42 /* 43 * In POSIX a signal is sent either to a specific thread (Linux task) 44 * or to the process as a whole (Linux thread group). How the signal 45 * is sent determines whether it's to one thread or the whole group, 46 * which determines which signal mask(s) are involved in blocking it 47 * from being delivered until later. When the signal is delivered, 48 * either it's caught or ignored by a user handler or it has a default 49 * effect that applies to the whole thread group (POSIX process). 50 * 51 * The possible effects an unblocked signal set to SIG_DFL can have are: 52 * ignore - Nothing Happens 53 * terminate - kill the process, i.e. all threads in the group, 54 * similar to exit_group. The group leader (only) reports 55 * WIFSIGNALED status to its parent. 56 * coredump - write a core dump file describing all threads using 57 * the same mm and then kill all those threads 58 * stop - stop all the threads in the group, i.e. TASK_STOPPED state 59 * 60 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored. 61 * Other signals when not blocked and set to SIG_DFL behaves as follows. 62 * The job control signals also have other special effects. 63 * 64 * +--------------------+------------------+ 65 * | POSIX signal | default action | 66 * +--------------------+------------------+ 67 * | SIGHUP | terminate | 68 * | SIGINT | terminate | 69 * | SIGQUIT | coredump | 70 * | SIGILL | coredump | 71 * | SIGTRAP | coredump | 72 * | SIGABRT/SIGIOT | coredump | 73 * | SIGBUS | coredump | 74 * | SIGFPE | coredump | 75 * | SIGKILL | terminate(+) | 76 * | SIGUSR1 | terminate | 77 * | SIGSEGV | coredump | 78 * | SIGUSR2 | terminate | 79 * | SIGPIPE | terminate | 80 * | SIGALRM | terminate | 81 * | SIGTERM | terminate | 82 * | SIGCHLD | ignore | 83 * | SIGCONT | ignore(*) | 84 * | SIGSTOP | stop(*)(+) | 85 * | SIGTSTP | stop(*) | 86 * | SIGTTIN | stop(*) | 87 * | SIGTTOU | stop(*) | 88 * | SIGURG | ignore | 89 * | SIGXCPU | coredump | 90 * | SIGXFSZ | coredump | 91 * | SIGVTALRM | terminate | 92 * | SIGPROF | terminate | 93 * | SIGPOLL/SIGIO | terminate | 94 * | SIGSYS/SIGUNUSED | coredump | 95 * | SIGSTKFLT | terminate | 96 * | SIGWINCH | ignore | 97 * | SIGPWR | terminate | 98 * | SIGRTMIN-SIGRTMAX | terminate | 99 * +--------------------+------------------+ 100 * | non-POSIX signal | default action | 101 * +--------------------+------------------+ 102 * | SIGEMT | coredump | 103 * +--------------------+------------------+ 104 * 105 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default". 106 * (*) Special job control effects: 107 * When SIGCONT is sent, it resumes the process (all threads in the group) 108 * from TASK_STOPPED state and also clears any pending/queued stop signals 109 * (any of those marked with "stop(*)"). This happens regardless of blocking, 110 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears 111 * any pending/queued SIGCONT signals; this happens regardless of blocking, 112 * catching, or ignored the stop signal, though (except for SIGSTOP) the 113 * default action of stopping the process may happen later or never. 114 */ 115 116 #ifdef SIGEMT 117 #define M_SIGEMT M(SIGEMT) 118 #else 119 #define M_SIGEMT 0 120 #endif 121 122 #if SIGRTMIN > BITS_PER_LONG 123 #define M(sig) (1ULL << ((sig)-1)) 124 #else 125 #define M(sig) (1UL << ((sig)-1)) 126 #endif 127 #define T(sig, mask) (M(sig) & (mask)) 128 129 #define SIG_KERNEL_ONLY_MASK (\ 130 M(SIGKILL) | M(SIGSTOP) ) 131 132 #define SIG_KERNEL_STOP_MASK (\ 133 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) ) 134 135 #define SIG_KERNEL_COREDUMP_MASK (\ 136 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \ 137 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \ 138 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT ) 139 140 #define SIG_KERNEL_IGNORE_MASK (\ 141 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) ) 142 143 #define sig_kernel_only(sig) \ 144 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK)) 145 #define sig_kernel_coredump(sig) \ 146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK)) 147 #define sig_kernel_ignore(sig) \ 148 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK)) 149 #define sig_kernel_stop(sig) \ 150 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK)) 151 152 #define sig_needs_tasklist(sig) ((sig) == SIGCONT) 153 154 #define sig_user_defined(t, signr) \ 155 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ 156 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN)) 157 158 #define sig_fatal(t, signr) \ 159 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \ 160 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL) 161 162 static int sig_ignored(struct task_struct *t, int sig) 163 { 164 void __user * handler; 165 166 /* 167 * Tracers always want to know about signals.. 168 */ 169 if (t->ptrace & PT_PTRACED) 170 return 0; 171 172 /* 173 * Blocked signals are never ignored, since the 174 * signal handler may change by the time it is 175 * unblocked. 176 */ 177 if (sigismember(&t->blocked, sig)) 178 return 0; 179 180 /* Is it explicitly or implicitly ignored? */ 181 handler = t->sighand->action[sig-1].sa.sa_handler; 182 return handler == SIG_IGN || 183 (handler == SIG_DFL && sig_kernel_ignore(sig)); 184 } 185 186 /* 187 * Re-calculate pending state from the set of locally pending 188 * signals, globally pending signals, and blocked signals. 189 */ 190 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) 191 { 192 unsigned long ready; 193 long i; 194 195 switch (_NSIG_WORDS) { 196 default: 197 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) 198 ready |= signal->sig[i] &~ blocked->sig[i]; 199 break; 200 201 case 4: ready = signal->sig[3] &~ blocked->sig[3]; 202 ready |= signal->sig[2] &~ blocked->sig[2]; 203 ready |= signal->sig[1] &~ blocked->sig[1]; 204 ready |= signal->sig[0] &~ blocked->sig[0]; 205 break; 206 207 case 2: ready = signal->sig[1] &~ blocked->sig[1]; 208 ready |= signal->sig[0] &~ blocked->sig[0]; 209 break; 210 211 case 1: ready = signal->sig[0] &~ blocked->sig[0]; 212 } 213 return ready != 0; 214 } 215 216 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) 217 218 fastcall void recalc_sigpending_tsk(struct task_struct *t) 219 { 220 if (t->signal->group_stop_count > 0 || 221 (freezing(t)) || 222 PENDING(&t->pending, &t->blocked) || 223 PENDING(&t->signal->shared_pending, &t->blocked)) 224 set_tsk_thread_flag(t, TIF_SIGPENDING); 225 else 226 clear_tsk_thread_flag(t, TIF_SIGPENDING); 227 } 228 229 void recalc_sigpending(void) 230 { 231 recalc_sigpending_tsk(current); 232 } 233 234 /* Given the mask, find the first available signal that should be serviced. */ 235 236 static int 237 next_signal(struct sigpending *pending, sigset_t *mask) 238 { 239 unsigned long i, *s, *m, x; 240 int sig = 0; 241 242 s = pending->signal.sig; 243 m = mask->sig; 244 switch (_NSIG_WORDS) { 245 default: 246 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m) 247 if ((x = *s &~ *m) != 0) { 248 sig = ffz(~x) + i*_NSIG_BPW + 1; 249 break; 250 } 251 break; 252 253 case 2: if ((x = s[0] &~ m[0]) != 0) 254 sig = 1; 255 else if ((x = s[1] &~ m[1]) != 0) 256 sig = _NSIG_BPW + 1; 257 else 258 break; 259 sig += ffz(~x); 260 break; 261 262 case 1: if ((x = *s &~ *m) != 0) 263 sig = ffz(~x) + 1; 264 break; 265 } 266 267 return sig; 268 } 269 270 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, 271 int override_rlimit) 272 { 273 struct sigqueue *q = NULL; 274 struct user_struct *user; 275 276 /* 277 * In order to avoid problems with "switch_user()", we want to make 278 * sure that the compiler doesn't re-load "t->user" 279 */ 280 user = t->user; 281 barrier(); 282 atomic_inc(&user->sigpending); 283 if (override_rlimit || 284 atomic_read(&user->sigpending) <= 285 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) 286 q = kmem_cache_alloc(sigqueue_cachep, flags); 287 if (unlikely(q == NULL)) { 288 atomic_dec(&user->sigpending); 289 } else { 290 INIT_LIST_HEAD(&q->list); 291 q->flags = 0; 292 q->user = get_uid(user); 293 } 294 return(q); 295 } 296 297 static void __sigqueue_free(struct sigqueue *q) 298 { 299 if (q->flags & SIGQUEUE_PREALLOC) 300 return; 301 atomic_dec(&q->user->sigpending); 302 free_uid(q->user); 303 kmem_cache_free(sigqueue_cachep, q); 304 } 305 306 void flush_sigqueue(struct sigpending *queue) 307 { 308 struct sigqueue *q; 309 310 sigemptyset(&queue->signal); 311 while (!list_empty(&queue->list)) { 312 q = list_entry(queue->list.next, struct sigqueue , list); 313 list_del_init(&q->list); 314 __sigqueue_free(q); 315 } 316 } 317 318 /* 319 * Flush all pending signals for a task. 320 */ 321 void flush_signals(struct task_struct *t) 322 { 323 unsigned long flags; 324 325 spin_lock_irqsave(&t->sighand->siglock, flags); 326 clear_tsk_thread_flag(t,TIF_SIGPENDING); 327 flush_sigqueue(&t->pending); 328 flush_sigqueue(&t->signal->shared_pending); 329 spin_unlock_irqrestore(&t->sighand->siglock, flags); 330 } 331 332 /* 333 * Flush all handlers for a task. 334 */ 335 336 void 337 flush_signal_handlers(struct task_struct *t, int force_default) 338 { 339 int i; 340 struct k_sigaction *ka = &t->sighand->action[0]; 341 for (i = _NSIG ; i != 0 ; i--) { 342 if (force_default || ka->sa.sa_handler != SIG_IGN) 343 ka->sa.sa_handler = SIG_DFL; 344 ka->sa.sa_flags = 0; 345 sigemptyset(&ka->sa.sa_mask); 346 ka++; 347 } 348 } 349 350 351 /* Notify the system that a driver wants to block all signals for this 352 * process, and wants to be notified if any signals at all were to be 353 * sent/acted upon. If the notifier routine returns non-zero, then the 354 * signal will be acted upon after all. If the notifier routine returns 0, 355 * then then signal will be blocked. Only one block per process is 356 * allowed. priv is a pointer to private data that the notifier routine 357 * can use to determine if the signal should be blocked or not. */ 358 359 void 360 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) 361 { 362 unsigned long flags; 363 364 spin_lock_irqsave(¤t->sighand->siglock, flags); 365 current->notifier_mask = mask; 366 current->notifier_data = priv; 367 current->notifier = notifier; 368 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 369 } 370 371 /* Notify the system that blocking has ended. */ 372 373 void 374 unblock_all_signals(void) 375 { 376 unsigned long flags; 377 378 spin_lock_irqsave(¤t->sighand->siglock, flags); 379 current->notifier = NULL; 380 current->notifier_data = NULL; 381 recalc_sigpending(); 382 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 383 } 384 385 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) 386 { 387 struct sigqueue *q, *first = NULL; 388 int still_pending = 0; 389 390 if (unlikely(!sigismember(&list->signal, sig))) 391 return 0; 392 393 /* 394 * Collect the siginfo appropriate to this signal. Check if 395 * there is another siginfo for the same signal. 396 */ 397 list_for_each_entry(q, &list->list, list) { 398 if (q->info.si_signo == sig) { 399 if (first) { 400 still_pending = 1; 401 break; 402 } 403 first = q; 404 } 405 } 406 if (first) { 407 list_del_init(&first->list); 408 copy_siginfo(info, &first->info); 409 __sigqueue_free(first); 410 if (!still_pending) 411 sigdelset(&list->signal, sig); 412 } else { 413 414 /* Ok, it wasn't in the queue. This must be 415 a fast-pathed signal or we must have been 416 out of queue space. So zero out the info. 417 */ 418 sigdelset(&list->signal, sig); 419 info->si_signo = sig; 420 info->si_errno = 0; 421 info->si_code = 0; 422 info->si_pid = 0; 423 info->si_uid = 0; 424 } 425 return 1; 426 } 427 428 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 429 siginfo_t *info) 430 { 431 int sig = next_signal(pending, mask); 432 433 if (sig) { 434 if (current->notifier) { 435 if (sigismember(current->notifier_mask, sig)) { 436 if (!(current->notifier)(current->notifier_data)) { 437 clear_thread_flag(TIF_SIGPENDING); 438 return 0; 439 } 440 } 441 } 442 443 if (!collect_signal(sig, pending, info)) 444 sig = 0; 445 } 446 447 return sig; 448 } 449 450 /* 451 * Dequeue a signal and return the element to the caller, which is 452 * expected to free it. 453 * 454 * All callers have to hold the siglock. 455 */ 456 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 457 { 458 int signr = __dequeue_signal(&tsk->pending, mask, info); 459 if (!signr) { 460 signr = __dequeue_signal(&tsk->signal->shared_pending, 461 mask, info); 462 /* 463 * itimer signal ? 464 * 465 * itimers are process shared and we restart periodic 466 * itimers in the signal delivery path to prevent DoS 467 * attacks in the high resolution timer case. This is 468 * compliant with the old way of self restarting 469 * itimers, as the SIGALRM is a legacy signal and only 470 * queued once. Changing the restart behaviour to 471 * restart the timer in the signal dequeue path is 472 * reducing the timer noise on heavy loaded !highres 473 * systems too. 474 */ 475 if (unlikely(signr == SIGALRM)) { 476 struct hrtimer *tmr = &tsk->signal->real_timer; 477 478 if (!hrtimer_is_queued(tmr) && 479 tsk->signal->it_real_incr.tv64 != 0) { 480 hrtimer_forward(tmr, tmr->base->get_time(), 481 tsk->signal->it_real_incr); 482 hrtimer_restart(tmr); 483 } 484 } 485 } 486 recalc_sigpending_tsk(tsk); 487 if (signr && unlikely(sig_kernel_stop(signr))) { 488 /* 489 * Set a marker that we have dequeued a stop signal. Our 490 * caller might release the siglock and then the pending 491 * stop signal it is about to process is no longer in the 492 * pending bitmasks, but must still be cleared by a SIGCONT 493 * (and overruled by a SIGKILL). So those cases clear this 494 * shared flag after we've set it. Note that this flag may 495 * remain set after the signal we return is ignored or 496 * handled. That doesn't matter because its only purpose 497 * is to alert stop-signal processing code when another 498 * processor has come along and cleared the flag. 499 */ 500 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) 501 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; 502 } 503 if ( signr && 504 ((info->si_code & __SI_MASK) == __SI_TIMER) && 505 info->si_sys_private){ 506 /* 507 * Release the siglock to ensure proper locking order 508 * of timer locks outside of siglocks. Note, we leave 509 * irqs disabled here, since the posix-timers code is 510 * about to disable them again anyway. 511 */ 512 spin_unlock(&tsk->sighand->siglock); 513 do_schedule_next_timer(info); 514 spin_lock(&tsk->sighand->siglock); 515 } 516 return signr; 517 } 518 519 /* 520 * Tell a process that it has a new active signal.. 521 * 522 * NOTE! we rely on the previous spin_lock to 523 * lock interrupts for us! We can only be called with 524 * "siglock" held, and the local interrupt must 525 * have been disabled when that got acquired! 526 * 527 * No need to set need_resched since signal event passing 528 * goes through ->blocked 529 */ 530 void signal_wake_up(struct task_struct *t, int resume) 531 { 532 unsigned int mask; 533 534 set_tsk_thread_flag(t, TIF_SIGPENDING); 535 536 /* 537 * For SIGKILL, we want to wake it up in the stopped/traced case. 538 * We don't check t->state here because there is a race with it 539 * executing another processor and just now entering stopped state. 540 * By using wake_up_state, we ensure the process will wake up and 541 * handle its death signal. 542 */ 543 mask = TASK_INTERRUPTIBLE; 544 if (resume) 545 mask |= TASK_STOPPED | TASK_TRACED; 546 if (!wake_up_state(t, mask)) 547 kick_process(t); 548 } 549 550 /* 551 * Remove signals in mask from the pending set and queue. 552 * Returns 1 if any signals were found. 553 * 554 * All callers must be holding the siglock. 555 * 556 * This version takes a sigset mask and looks at all signals, 557 * not just those in the first mask word. 558 */ 559 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) 560 { 561 struct sigqueue *q, *n; 562 sigset_t m; 563 564 sigandsets(&m, mask, &s->signal); 565 if (sigisemptyset(&m)) 566 return 0; 567 568 signandsets(&s->signal, &s->signal, mask); 569 list_for_each_entry_safe(q, n, &s->list, list) { 570 if (sigismember(mask, q->info.si_signo)) { 571 list_del_init(&q->list); 572 __sigqueue_free(q); 573 } 574 } 575 return 1; 576 } 577 /* 578 * Remove signals in mask from the pending set and queue. 579 * Returns 1 if any signals were found. 580 * 581 * All callers must be holding the siglock. 582 */ 583 static int rm_from_queue(unsigned long mask, struct sigpending *s) 584 { 585 struct sigqueue *q, *n; 586 587 if (!sigtestsetmask(&s->signal, mask)) 588 return 0; 589 590 sigdelsetmask(&s->signal, mask); 591 list_for_each_entry_safe(q, n, &s->list, list) { 592 if (q->info.si_signo < SIGRTMIN && 593 (mask & sigmask(q->info.si_signo))) { 594 list_del_init(&q->list); 595 __sigqueue_free(q); 596 } 597 } 598 return 1; 599 } 600 601 /* 602 * Bad permissions for sending the signal 603 */ 604 static int check_kill_permission(int sig, struct siginfo *info, 605 struct task_struct *t) 606 { 607 int error = -EINVAL; 608 if (!valid_signal(sig)) 609 return error; 610 error = -EPERM; 611 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) 612 && ((sig != SIGCONT) || 613 (process_session(current) != process_session(t))) 614 && (current->euid ^ t->suid) && (current->euid ^ t->uid) 615 && (current->uid ^ t->suid) && (current->uid ^ t->uid) 616 && !capable(CAP_KILL)) 617 return error; 618 619 error = security_task_kill(t, info, sig, 0); 620 if (!error) 621 audit_signal_info(sig, t); /* Let audit system see the signal */ 622 return error; 623 } 624 625 /* forward decl */ 626 static void do_notify_parent_cldstop(struct task_struct *tsk, int why); 627 628 /* 629 * Handle magic process-wide effects of stop/continue signals. 630 * Unlike the signal actions, these happen immediately at signal-generation 631 * time regardless of blocking, ignoring, or handling. This does the 632 * actual continuing for SIGCONT, but not the actual stopping for stop 633 * signals. The process stop is done as a signal action for SIG_DFL. 634 */ 635 static void handle_stop_signal(int sig, struct task_struct *p) 636 { 637 struct task_struct *t; 638 639 if (p->signal->flags & SIGNAL_GROUP_EXIT) 640 /* 641 * The process is in the middle of dying already. 642 */ 643 return; 644 645 if (sig_kernel_stop(sig)) { 646 /* 647 * This is a stop signal. Remove SIGCONT from all queues. 648 */ 649 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending); 650 t = p; 651 do { 652 rm_from_queue(sigmask(SIGCONT), &t->pending); 653 t = next_thread(t); 654 } while (t != p); 655 } else if (sig == SIGCONT) { 656 /* 657 * Remove all stop signals from all queues, 658 * and wake all threads. 659 */ 660 if (unlikely(p->signal->group_stop_count > 0)) { 661 /* 662 * There was a group stop in progress. We'll 663 * pretend it finished before we got here. We are 664 * obliged to report it to the parent: if the 665 * SIGSTOP happened "after" this SIGCONT, then it 666 * would have cleared this pending SIGCONT. If it 667 * happened "before" this SIGCONT, then the parent 668 * got the SIGCHLD about the stop finishing before 669 * the continue happened. We do the notification 670 * now, and it's as if the stop had finished and 671 * the SIGCHLD was pending on entry to this kill. 672 */ 673 p->signal->group_stop_count = 0; 674 p->signal->flags = SIGNAL_STOP_CONTINUED; 675 spin_unlock(&p->sighand->siglock); 676 do_notify_parent_cldstop(p, CLD_STOPPED); 677 spin_lock(&p->sighand->siglock); 678 } 679 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); 680 t = p; 681 do { 682 unsigned int state; 683 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); 684 685 /* 686 * If there is a handler for SIGCONT, we must make 687 * sure that no thread returns to user mode before 688 * we post the signal, in case it was the only 689 * thread eligible to run the signal handler--then 690 * it must not do anything between resuming and 691 * running the handler. With the TIF_SIGPENDING 692 * flag set, the thread will pause and acquire the 693 * siglock that we hold now and until we've queued 694 * the pending signal. 695 * 696 * Wake up the stopped thread _after_ setting 697 * TIF_SIGPENDING 698 */ 699 state = TASK_STOPPED; 700 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { 701 set_tsk_thread_flag(t, TIF_SIGPENDING); 702 state |= TASK_INTERRUPTIBLE; 703 } 704 wake_up_state(t, state); 705 706 t = next_thread(t); 707 } while (t != p); 708 709 if (p->signal->flags & SIGNAL_STOP_STOPPED) { 710 /* 711 * We were in fact stopped, and are now continued. 712 * Notify the parent with CLD_CONTINUED. 713 */ 714 p->signal->flags = SIGNAL_STOP_CONTINUED; 715 p->signal->group_exit_code = 0; 716 spin_unlock(&p->sighand->siglock); 717 do_notify_parent_cldstop(p, CLD_CONTINUED); 718 spin_lock(&p->sighand->siglock); 719 } else { 720 /* 721 * We are not stopped, but there could be a stop 722 * signal in the middle of being processed after 723 * being removed from the queue. Clear that too. 724 */ 725 p->signal->flags = 0; 726 } 727 } else if (sig == SIGKILL) { 728 /* 729 * Make sure that any pending stop signal already dequeued 730 * is undone by the wakeup for SIGKILL. 731 */ 732 p->signal->flags = 0; 733 } 734 } 735 736 static int send_signal(int sig, struct siginfo *info, struct task_struct *t, 737 struct sigpending *signals) 738 { 739 struct sigqueue * q = NULL; 740 int ret = 0; 741 742 /* 743 * fast-pathed signals for kernel-internal things like SIGSTOP 744 * or SIGKILL. 745 */ 746 if (info == SEND_SIG_FORCED) 747 goto out_set; 748 749 /* Real-time signals must be queued if sent by sigqueue, or 750 some other real-time mechanism. It is implementation 751 defined whether kill() does so. We attempt to do so, on 752 the principle of least surprise, but since kill is not 753 allowed to fail with EAGAIN when low on memory we just 754 make sure at least one signal gets delivered and don't 755 pass on the info struct. */ 756 757 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && 758 (is_si_special(info) || 759 info->si_code >= 0))); 760 if (q) { 761 list_add_tail(&q->list, &signals->list); 762 switch ((unsigned long) info) { 763 case (unsigned long) SEND_SIG_NOINFO: 764 q->info.si_signo = sig; 765 q->info.si_errno = 0; 766 q->info.si_code = SI_USER; 767 q->info.si_pid = current->pid; 768 q->info.si_uid = current->uid; 769 break; 770 case (unsigned long) SEND_SIG_PRIV: 771 q->info.si_signo = sig; 772 q->info.si_errno = 0; 773 q->info.si_code = SI_KERNEL; 774 q->info.si_pid = 0; 775 q->info.si_uid = 0; 776 break; 777 default: 778 copy_siginfo(&q->info, info); 779 break; 780 } 781 } else if (!is_si_special(info)) { 782 if (sig >= SIGRTMIN && info->si_code != SI_USER) 783 /* 784 * Queue overflow, abort. We may abort if the signal was rt 785 * and sent by user using something other than kill(). 786 */ 787 return -EAGAIN; 788 } 789 790 out_set: 791 sigaddset(&signals->signal, sig); 792 return ret; 793 } 794 795 #define LEGACY_QUEUE(sigptr, sig) \ 796 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig))) 797 798 799 static int 800 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) 801 { 802 int ret = 0; 803 804 BUG_ON(!irqs_disabled()); 805 assert_spin_locked(&t->sighand->siglock); 806 807 /* Short-circuit ignored signals. */ 808 if (sig_ignored(t, sig)) 809 goto out; 810 811 /* Support queueing exactly one non-rt signal, so that we 812 can get more detailed information about the cause of 813 the signal. */ 814 if (LEGACY_QUEUE(&t->pending, sig)) 815 goto out; 816 817 ret = send_signal(sig, info, t, &t->pending); 818 if (!ret && !sigismember(&t->blocked, sig)) 819 signal_wake_up(t, sig == SIGKILL); 820 out: 821 return ret; 822 } 823 824 /* 825 * Force a signal that the process can't ignore: if necessary 826 * we unblock the signal and change any SIG_IGN to SIG_DFL. 827 * 828 * Note: If we unblock the signal, we always reset it to SIG_DFL, 829 * since we do not want to have a signal handler that was blocked 830 * be invoked when user space had explicitly blocked it. 831 * 832 * We don't want to have recursive SIGSEGV's etc, for example. 833 */ 834 int 835 force_sig_info(int sig, struct siginfo *info, struct task_struct *t) 836 { 837 unsigned long int flags; 838 int ret, blocked, ignored; 839 struct k_sigaction *action; 840 841 spin_lock_irqsave(&t->sighand->siglock, flags); 842 action = &t->sighand->action[sig-1]; 843 ignored = action->sa.sa_handler == SIG_IGN; 844 blocked = sigismember(&t->blocked, sig); 845 if (blocked || ignored) { 846 action->sa.sa_handler = SIG_DFL; 847 if (blocked) { 848 sigdelset(&t->blocked, sig); 849 recalc_sigpending_tsk(t); 850 } 851 } 852 ret = specific_send_sig_info(sig, info, t); 853 spin_unlock_irqrestore(&t->sighand->siglock, flags); 854 855 return ret; 856 } 857 858 void 859 force_sig_specific(int sig, struct task_struct *t) 860 { 861 force_sig_info(sig, SEND_SIG_FORCED, t); 862 } 863 864 /* 865 * Test if P wants to take SIG. After we've checked all threads with this, 866 * it's equivalent to finding no threads not blocking SIG. Any threads not 867 * blocking SIG were ruled out because they are not running and already 868 * have pending signals. Such threads will dequeue from the shared queue 869 * as soon as they're available, so putting the signal on the shared queue 870 * will be equivalent to sending it to one such thread. 871 */ 872 static inline int wants_signal(int sig, struct task_struct *p) 873 { 874 if (sigismember(&p->blocked, sig)) 875 return 0; 876 if (p->flags & PF_EXITING) 877 return 0; 878 if (sig == SIGKILL) 879 return 1; 880 if (p->state & (TASK_STOPPED | TASK_TRACED)) 881 return 0; 882 return task_curr(p) || !signal_pending(p); 883 } 884 885 static void 886 __group_complete_signal(int sig, struct task_struct *p) 887 { 888 struct task_struct *t; 889 890 /* 891 * Now find a thread we can wake up to take the signal off the queue. 892 * 893 * If the main thread wants the signal, it gets first crack. 894 * Probably the least surprising to the average bear. 895 */ 896 if (wants_signal(sig, p)) 897 t = p; 898 else if (thread_group_empty(p)) 899 /* 900 * There is just one thread and it does not need to be woken. 901 * It will dequeue unblocked signals before it runs again. 902 */ 903 return; 904 else { 905 /* 906 * Otherwise try to find a suitable thread. 907 */ 908 t = p->signal->curr_target; 909 if (t == NULL) 910 /* restart balancing at this thread */ 911 t = p->signal->curr_target = p; 912 913 while (!wants_signal(sig, t)) { 914 t = next_thread(t); 915 if (t == p->signal->curr_target) 916 /* 917 * No thread needs to be woken. 918 * Any eligible threads will see 919 * the signal in the queue soon. 920 */ 921 return; 922 } 923 p->signal->curr_target = t; 924 } 925 926 /* 927 * Found a killable thread. If the signal will be fatal, 928 * then start taking the whole group down immediately. 929 */ 930 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) && 931 !sigismember(&t->real_blocked, sig) && 932 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { 933 /* 934 * This signal will be fatal to the whole group. 935 */ 936 if (!sig_kernel_coredump(sig)) { 937 /* 938 * Start a group exit and wake everybody up. 939 * This way we don't have other threads 940 * running and doing things after a slower 941 * thread has the fatal signal pending. 942 */ 943 p->signal->flags = SIGNAL_GROUP_EXIT; 944 p->signal->group_exit_code = sig; 945 p->signal->group_stop_count = 0; 946 t = p; 947 do { 948 sigaddset(&t->pending.signal, SIGKILL); 949 signal_wake_up(t, 1); 950 t = next_thread(t); 951 } while (t != p); 952 return; 953 } 954 955 /* 956 * There will be a core dump. We make all threads other 957 * than the chosen one go into a group stop so that nothing 958 * happens until it gets scheduled, takes the signal off 959 * the shared queue, and does the core dump. This is a 960 * little more complicated than strictly necessary, but it 961 * keeps the signal state that winds up in the core dump 962 * unchanged from the death state, e.g. which thread had 963 * the core-dump signal unblocked. 964 */ 965 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); 966 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); 967 p->signal->group_stop_count = 0; 968 p->signal->group_exit_task = t; 969 t = p; 970 do { 971 p->signal->group_stop_count++; 972 signal_wake_up(t, 0); 973 t = next_thread(t); 974 } while (t != p); 975 wake_up_process(p->signal->group_exit_task); 976 return; 977 } 978 979 /* 980 * The signal is already in the shared-pending queue. 981 * Tell the chosen thread to wake up and dequeue it. 982 */ 983 signal_wake_up(t, sig == SIGKILL); 984 return; 985 } 986 987 int 988 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 989 { 990 int ret = 0; 991 992 assert_spin_locked(&p->sighand->siglock); 993 handle_stop_signal(sig, p); 994 995 /* Short-circuit ignored signals. */ 996 if (sig_ignored(p, sig)) 997 return ret; 998 999 if (LEGACY_QUEUE(&p->signal->shared_pending, sig)) 1000 /* This is a non-RT signal and we already have one queued. */ 1001 return ret; 1002 1003 /* 1004 * Put this signal on the shared-pending queue, or fail with EAGAIN. 1005 * We always use the shared queue for process-wide signals, 1006 * to avoid several races. 1007 */ 1008 ret = send_signal(sig, info, p, &p->signal->shared_pending); 1009 if (unlikely(ret)) 1010 return ret; 1011 1012 __group_complete_signal(sig, p); 1013 return 0; 1014 } 1015 1016 /* 1017 * Nuke all other threads in the group. 1018 */ 1019 void zap_other_threads(struct task_struct *p) 1020 { 1021 struct task_struct *t; 1022 1023 p->signal->flags = SIGNAL_GROUP_EXIT; 1024 p->signal->group_stop_count = 0; 1025 1026 if (thread_group_empty(p)) 1027 return; 1028 1029 for (t = next_thread(p); t != p; t = next_thread(t)) { 1030 /* 1031 * Don't bother with already dead threads 1032 */ 1033 if (t->exit_state) 1034 continue; 1035 1036 /* 1037 * We don't want to notify the parent, since we are 1038 * killed as part of a thread group due to another 1039 * thread doing an execve() or similar. So set the 1040 * exit signal to -1 to allow immediate reaping of 1041 * the process. But don't detach the thread group 1042 * leader. 1043 */ 1044 if (t != p->group_leader) 1045 t->exit_signal = -1; 1046 1047 /* SIGKILL will be handled before any pending SIGSTOP */ 1048 sigaddset(&t->pending.signal, SIGKILL); 1049 signal_wake_up(t, 1); 1050 } 1051 } 1052 1053 /* 1054 * Must be called under rcu_read_lock() or with tasklist_lock read-held. 1055 */ 1056 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) 1057 { 1058 struct sighand_struct *sighand; 1059 1060 for (;;) { 1061 sighand = rcu_dereference(tsk->sighand); 1062 if (unlikely(sighand == NULL)) 1063 break; 1064 1065 spin_lock_irqsave(&sighand->siglock, *flags); 1066 if (likely(sighand == tsk->sighand)) 1067 break; 1068 spin_unlock_irqrestore(&sighand->siglock, *flags); 1069 } 1070 1071 return sighand; 1072 } 1073 1074 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1075 { 1076 unsigned long flags; 1077 int ret; 1078 1079 ret = check_kill_permission(sig, info, p); 1080 1081 if (!ret && sig) { 1082 ret = -ESRCH; 1083 if (lock_task_sighand(p, &flags)) { 1084 ret = __group_send_sig_info(sig, info, p); 1085 unlock_task_sighand(p, &flags); 1086 } 1087 } 1088 1089 return ret; 1090 } 1091 1092 /* 1093 * kill_pgrp_info() sends a signal to a process group: this is what the tty 1094 * control characters do (^C, ^Z etc) 1095 */ 1096 1097 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) 1098 { 1099 struct task_struct *p = NULL; 1100 int retval, success; 1101 1102 success = 0; 1103 retval = -ESRCH; 1104 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 1105 int err = group_send_sig_info(sig, info, p); 1106 success |= !err; 1107 retval = err; 1108 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 1109 return success ? 0 : retval; 1110 } 1111 1112 int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) 1113 { 1114 int retval; 1115 1116 read_lock(&tasklist_lock); 1117 retval = __kill_pgrp_info(sig, info, pgrp); 1118 read_unlock(&tasklist_lock); 1119 1120 return retval; 1121 } 1122 1123 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) 1124 { 1125 int error; 1126 struct task_struct *p; 1127 1128 rcu_read_lock(); 1129 if (unlikely(sig_needs_tasklist(sig))) 1130 read_lock(&tasklist_lock); 1131 1132 p = pid_task(pid, PIDTYPE_PID); 1133 error = -ESRCH; 1134 if (p) 1135 error = group_send_sig_info(sig, info, p); 1136 1137 if (unlikely(sig_needs_tasklist(sig))) 1138 read_unlock(&tasklist_lock); 1139 rcu_read_unlock(); 1140 return error; 1141 } 1142 1143 int 1144 kill_proc_info(int sig, struct siginfo *info, pid_t pid) 1145 { 1146 int error; 1147 rcu_read_lock(); 1148 error = kill_pid_info(sig, info, find_pid(pid)); 1149 rcu_read_unlock(); 1150 return error; 1151 } 1152 1153 /* like kill_pid_info(), but doesn't use uid/euid of "current" */ 1154 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, 1155 uid_t uid, uid_t euid, u32 secid) 1156 { 1157 int ret = -EINVAL; 1158 struct task_struct *p; 1159 1160 if (!valid_signal(sig)) 1161 return ret; 1162 1163 read_lock(&tasklist_lock); 1164 p = pid_task(pid, PIDTYPE_PID); 1165 if (!p) { 1166 ret = -ESRCH; 1167 goto out_unlock; 1168 } 1169 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) 1170 && (euid != p->suid) && (euid != p->uid) 1171 && (uid != p->suid) && (uid != p->uid)) { 1172 ret = -EPERM; 1173 goto out_unlock; 1174 } 1175 ret = security_task_kill(p, info, sig, secid); 1176 if (ret) 1177 goto out_unlock; 1178 if (sig && p->sighand) { 1179 unsigned long flags; 1180 spin_lock_irqsave(&p->sighand->siglock, flags); 1181 ret = __group_send_sig_info(sig, info, p); 1182 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1183 } 1184 out_unlock: 1185 read_unlock(&tasklist_lock); 1186 return ret; 1187 } 1188 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); 1189 1190 /* 1191 * kill_something_info() interprets pid in interesting ways just like kill(2). 1192 * 1193 * POSIX specifies that kill(-1,sig) is unspecified, but what we have 1194 * is probably wrong. Should make it like BSD or SYSV. 1195 */ 1196 1197 static int kill_something_info(int sig, struct siginfo *info, int pid) 1198 { 1199 int ret; 1200 rcu_read_lock(); 1201 if (!pid) { 1202 ret = kill_pgrp_info(sig, info, task_pgrp(current)); 1203 } else if (pid == -1) { 1204 int retval = 0, count = 0; 1205 struct task_struct * p; 1206 1207 read_lock(&tasklist_lock); 1208 for_each_process(p) { 1209 if (p->pid > 1 && p->tgid != current->tgid) { 1210 int err = group_send_sig_info(sig, info, p); 1211 ++count; 1212 if (err != -EPERM) 1213 retval = err; 1214 } 1215 } 1216 read_unlock(&tasklist_lock); 1217 ret = count ? retval : -ESRCH; 1218 } else if (pid < 0) { 1219 ret = kill_pgrp_info(sig, info, find_pid(-pid)); 1220 } else { 1221 ret = kill_pid_info(sig, info, find_pid(pid)); 1222 } 1223 rcu_read_unlock(); 1224 return ret; 1225 } 1226 1227 /* 1228 * These are for backward compatibility with the rest of the kernel source. 1229 */ 1230 1231 /* 1232 * These two are the most common entry points. They send a signal 1233 * just to the specific thread. 1234 */ 1235 int 1236 send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1237 { 1238 int ret; 1239 unsigned long flags; 1240 1241 /* 1242 * Make sure legacy kernel users don't send in bad values 1243 * (normal paths check this in check_kill_permission). 1244 */ 1245 if (!valid_signal(sig)) 1246 return -EINVAL; 1247 1248 /* 1249 * We need the tasklist lock even for the specific 1250 * thread case (when we don't need to follow the group 1251 * lists) in order to avoid races with "p->sighand" 1252 * going away or changing from under us. 1253 */ 1254 read_lock(&tasklist_lock); 1255 spin_lock_irqsave(&p->sighand->siglock, flags); 1256 ret = specific_send_sig_info(sig, info, p); 1257 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1258 read_unlock(&tasklist_lock); 1259 return ret; 1260 } 1261 1262 #define __si_special(priv) \ 1263 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) 1264 1265 int 1266 send_sig(int sig, struct task_struct *p, int priv) 1267 { 1268 return send_sig_info(sig, __si_special(priv), p); 1269 } 1270 1271 /* 1272 * This is the entry point for "process-wide" signals. 1273 * They will go to an appropriate thread in the thread group. 1274 */ 1275 int 1276 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1277 { 1278 int ret; 1279 read_lock(&tasklist_lock); 1280 ret = group_send_sig_info(sig, info, p); 1281 read_unlock(&tasklist_lock); 1282 return ret; 1283 } 1284 1285 void 1286 force_sig(int sig, struct task_struct *p) 1287 { 1288 force_sig_info(sig, SEND_SIG_PRIV, p); 1289 } 1290 1291 /* 1292 * When things go south during signal handling, we 1293 * will force a SIGSEGV. And if the signal that caused 1294 * the problem was already a SIGSEGV, we'll want to 1295 * make sure we don't even try to deliver the signal.. 1296 */ 1297 int 1298 force_sigsegv(int sig, struct task_struct *p) 1299 { 1300 if (sig == SIGSEGV) { 1301 unsigned long flags; 1302 spin_lock_irqsave(&p->sighand->siglock, flags); 1303 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; 1304 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1305 } 1306 force_sig(SIGSEGV, p); 1307 return 0; 1308 } 1309 1310 int kill_pgrp(struct pid *pid, int sig, int priv) 1311 { 1312 return kill_pgrp_info(sig, __si_special(priv), pid); 1313 } 1314 EXPORT_SYMBOL(kill_pgrp); 1315 1316 int kill_pid(struct pid *pid, int sig, int priv) 1317 { 1318 return kill_pid_info(sig, __si_special(priv), pid); 1319 } 1320 EXPORT_SYMBOL(kill_pid); 1321 1322 int 1323 kill_proc(pid_t pid, int sig, int priv) 1324 { 1325 return kill_proc_info(sig, __si_special(priv), pid); 1326 } 1327 1328 /* 1329 * These functions support sending signals using preallocated sigqueue 1330 * structures. This is needed "because realtime applications cannot 1331 * afford to lose notifications of asynchronous events, like timer 1332 * expirations or I/O completions". In the case of Posix Timers 1333 * we allocate the sigqueue structure from the timer_create. If this 1334 * allocation fails we are able to report the failure to the application 1335 * with an EAGAIN error. 1336 */ 1337 1338 struct sigqueue *sigqueue_alloc(void) 1339 { 1340 struct sigqueue *q; 1341 1342 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) 1343 q->flags |= SIGQUEUE_PREALLOC; 1344 return(q); 1345 } 1346 1347 void sigqueue_free(struct sigqueue *q) 1348 { 1349 unsigned long flags; 1350 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1351 /* 1352 * If the signal is still pending remove it from the 1353 * pending queue. 1354 */ 1355 if (unlikely(!list_empty(&q->list))) { 1356 spinlock_t *lock = ¤t->sighand->siglock; 1357 read_lock(&tasklist_lock); 1358 spin_lock_irqsave(lock, flags); 1359 if (!list_empty(&q->list)) 1360 list_del_init(&q->list); 1361 spin_unlock_irqrestore(lock, flags); 1362 read_unlock(&tasklist_lock); 1363 } 1364 q->flags &= ~SIGQUEUE_PREALLOC; 1365 __sigqueue_free(q); 1366 } 1367 1368 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) 1369 { 1370 unsigned long flags; 1371 int ret = 0; 1372 1373 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1374 1375 /* 1376 * The rcu based delayed sighand destroy makes it possible to 1377 * run this without tasklist lock held. The task struct itself 1378 * cannot go away as create_timer did get_task_struct(). 1379 * 1380 * We return -1, when the task is marked exiting, so 1381 * posix_timer_event can redirect it to the group leader 1382 */ 1383 rcu_read_lock(); 1384 1385 if (!likely(lock_task_sighand(p, &flags))) { 1386 ret = -1; 1387 goto out_err; 1388 } 1389 1390 if (unlikely(!list_empty(&q->list))) { 1391 /* 1392 * If an SI_TIMER entry is already queue just increment 1393 * the overrun count. 1394 */ 1395 BUG_ON(q->info.si_code != SI_TIMER); 1396 q->info.si_overrun++; 1397 goto out; 1398 } 1399 /* Short-circuit ignored signals. */ 1400 if (sig_ignored(p, sig)) { 1401 ret = 1; 1402 goto out; 1403 } 1404 1405 list_add_tail(&q->list, &p->pending.list); 1406 sigaddset(&p->pending.signal, sig); 1407 if (!sigismember(&p->blocked, sig)) 1408 signal_wake_up(p, sig == SIGKILL); 1409 1410 out: 1411 unlock_task_sighand(p, &flags); 1412 out_err: 1413 rcu_read_unlock(); 1414 1415 return ret; 1416 } 1417 1418 int 1419 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) 1420 { 1421 unsigned long flags; 1422 int ret = 0; 1423 1424 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1425 1426 read_lock(&tasklist_lock); 1427 /* Since it_lock is held, p->sighand cannot be NULL. */ 1428 spin_lock_irqsave(&p->sighand->siglock, flags); 1429 handle_stop_signal(sig, p); 1430 1431 /* Short-circuit ignored signals. */ 1432 if (sig_ignored(p, sig)) { 1433 ret = 1; 1434 goto out; 1435 } 1436 1437 if (unlikely(!list_empty(&q->list))) { 1438 /* 1439 * If an SI_TIMER entry is already queue just increment 1440 * the overrun count. Other uses should not try to 1441 * send the signal multiple times. 1442 */ 1443 BUG_ON(q->info.si_code != SI_TIMER); 1444 q->info.si_overrun++; 1445 goto out; 1446 } 1447 1448 /* 1449 * Put this signal on the shared-pending queue. 1450 * We always use the shared queue for process-wide signals, 1451 * to avoid several races. 1452 */ 1453 list_add_tail(&q->list, &p->signal->shared_pending.list); 1454 sigaddset(&p->signal->shared_pending.signal, sig); 1455 1456 __group_complete_signal(sig, p); 1457 out: 1458 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1459 read_unlock(&tasklist_lock); 1460 return ret; 1461 } 1462 1463 /* 1464 * Wake up any threads in the parent blocked in wait* syscalls. 1465 */ 1466 static inline void __wake_up_parent(struct task_struct *p, 1467 struct task_struct *parent) 1468 { 1469 wake_up_interruptible_sync(&parent->signal->wait_chldexit); 1470 } 1471 1472 /* 1473 * Let a parent know about the death of a child. 1474 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1475 */ 1476 1477 void do_notify_parent(struct task_struct *tsk, int sig) 1478 { 1479 struct siginfo info; 1480 unsigned long flags; 1481 struct sighand_struct *psig; 1482 1483 BUG_ON(sig == -1); 1484 1485 /* do_notify_parent_cldstop should have been called instead. */ 1486 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED)); 1487 1488 BUG_ON(!tsk->ptrace && 1489 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 1490 1491 info.si_signo = sig; 1492 info.si_errno = 0; 1493 info.si_pid = tsk->pid; 1494 info.si_uid = tsk->uid; 1495 1496 /* FIXME: find out whether or not this is supposed to be c*time. */ 1497 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime, 1498 tsk->signal->utime)); 1499 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime, 1500 tsk->signal->stime)); 1501 1502 info.si_status = tsk->exit_code & 0x7f; 1503 if (tsk->exit_code & 0x80) 1504 info.si_code = CLD_DUMPED; 1505 else if (tsk->exit_code & 0x7f) 1506 info.si_code = CLD_KILLED; 1507 else { 1508 info.si_code = CLD_EXITED; 1509 info.si_status = tsk->exit_code >> 8; 1510 } 1511 1512 psig = tsk->parent->sighand; 1513 spin_lock_irqsave(&psig->siglock, flags); 1514 if (!tsk->ptrace && sig == SIGCHLD && 1515 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || 1516 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { 1517 /* 1518 * We are exiting and our parent doesn't care. POSIX.1 1519 * defines special semantics for setting SIGCHLD to SIG_IGN 1520 * or setting the SA_NOCLDWAIT flag: we should be reaped 1521 * automatically and not left for our parent's wait4 call. 1522 * Rather than having the parent do it as a magic kind of 1523 * signal handler, we just set this to tell do_exit that we 1524 * can be cleaned up without becoming a zombie. Note that 1525 * we still call __wake_up_parent in this case, because a 1526 * blocked sys_wait4 might now return -ECHILD. 1527 * 1528 * Whether we send SIGCHLD or not for SA_NOCLDWAIT 1529 * is implementation-defined: we do (if you don't want 1530 * it, just use SIG_IGN instead). 1531 */ 1532 tsk->exit_signal = -1; 1533 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1534 sig = 0; 1535 } 1536 if (valid_signal(sig) && sig > 0) 1537 __group_send_sig_info(sig, &info, tsk->parent); 1538 __wake_up_parent(tsk, tsk->parent); 1539 spin_unlock_irqrestore(&psig->siglock, flags); 1540 } 1541 1542 static void do_notify_parent_cldstop(struct task_struct *tsk, int why) 1543 { 1544 struct siginfo info; 1545 unsigned long flags; 1546 struct task_struct *parent; 1547 struct sighand_struct *sighand; 1548 1549 if (tsk->ptrace & PT_PTRACED) 1550 parent = tsk->parent; 1551 else { 1552 tsk = tsk->group_leader; 1553 parent = tsk->real_parent; 1554 } 1555 1556 info.si_signo = SIGCHLD; 1557 info.si_errno = 0; 1558 info.si_pid = tsk->pid; 1559 info.si_uid = tsk->uid; 1560 1561 /* FIXME: find out whether or not this is supposed to be c*time. */ 1562 info.si_utime = cputime_to_jiffies(tsk->utime); 1563 info.si_stime = cputime_to_jiffies(tsk->stime); 1564 1565 info.si_code = why; 1566 switch (why) { 1567 case CLD_CONTINUED: 1568 info.si_status = SIGCONT; 1569 break; 1570 case CLD_STOPPED: 1571 info.si_status = tsk->signal->group_exit_code & 0x7f; 1572 break; 1573 case CLD_TRAPPED: 1574 info.si_status = tsk->exit_code & 0x7f; 1575 break; 1576 default: 1577 BUG(); 1578 } 1579 1580 sighand = parent->sighand; 1581 spin_lock_irqsave(&sighand->siglock, flags); 1582 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && 1583 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) 1584 __group_send_sig_info(SIGCHLD, &info, parent); 1585 /* 1586 * Even if SIGCHLD is not generated, we must wake up wait4 calls. 1587 */ 1588 __wake_up_parent(tsk, parent); 1589 spin_unlock_irqrestore(&sighand->siglock, flags); 1590 } 1591 1592 static inline int may_ptrace_stop(void) 1593 { 1594 if (!likely(current->ptrace & PT_PTRACED)) 1595 return 0; 1596 1597 if (unlikely(current->parent == current->real_parent && 1598 (current->ptrace & PT_ATTACHED))) 1599 return 0; 1600 1601 if (unlikely(current->signal == current->parent->signal) && 1602 unlikely(current->signal->flags & SIGNAL_GROUP_EXIT)) 1603 return 0; 1604 1605 /* 1606 * Are we in the middle of do_coredump? 1607 * If so and our tracer is also part of the coredump stopping 1608 * is a deadlock situation, and pointless because our tracer 1609 * is dead so don't allow us to stop. 1610 * If SIGKILL was already sent before the caller unlocked 1611 * ->siglock we must see ->core_waiters != 0. Otherwise it 1612 * is safe to enter schedule(). 1613 */ 1614 if (unlikely(current->mm->core_waiters) && 1615 unlikely(current->mm == current->parent->mm)) 1616 return 0; 1617 1618 return 1; 1619 } 1620 1621 /* 1622 * This must be called with current->sighand->siglock held. 1623 * 1624 * This should be the path for all ptrace stops. 1625 * We always set current->last_siginfo while stopped here. 1626 * That makes it a way to test a stopped process for 1627 * being ptrace-stopped vs being job-control-stopped. 1628 * 1629 * If we actually decide not to stop at all because the tracer is gone, 1630 * we leave nostop_code in current->exit_code. 1631 */ 1632 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) 1633 { 1634 /* 1635 * If there is a group stop in progress, 1636 * we must participate in the bookkeeping. 1637 */ 1638 if (current->signal->group_stop_count > 0) 1639 --current->signal->group_stop_count; 1640 1641 current->last_siginfo = info; 1642 current->exit_code = exit_code; 1643 1644 /* Let the debugger run. */ 1645 set_current_state(TASK_TRACED); 1646 spin_unlock_irq(¤t->sighand->siglock); 1647 try_to_freeze(); 1648 read_lock(&tasklist_lock); 1649 if (may_ptrace_stop()) { 1650 do_notify_parent_cldstop(current, CLD_TRAPPED); 1651 read_unlock(&tasklist_lock); 1652 schedule(); 1653 } else { 1654 /* 1655 * By the time we got the lock, our tracer went away. 1656 * Don't stop here. 1657 */ 1658 read_unlock(&tasklist_lock); 1659 set_current_state(TASK_RUNNING); 1660 current->exit_code = nostop_code; 1661 } 1662 1663 /* 1664 * We are back. Now reacquire the siglock before touching 1665 * last_siginfo, so that we are sure to have synchronized with 1666 * any signal-sending on another CPU that wants to examine it. 1667 */ 1668 spin_lock_irq(¤t->sighand->siglock); 1669 current->last_siginfo = NULL; 1670 1671 /* 1672 * Queued signals ignored us while we were stopped for tracing. 1673 * So check for any that we should take before resuming user mode. 1674 */ 1675 recalc_sigpending(); 1676 } 1677 1678 void ptrace_notify(int exit_code) 1679 { 1680 siginfo_t info; 1681 1682 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); 1683 1684 memset(&info, 0, sizeof info); 1685 info.si_signo = SIGTRAP; 1686 info.si_code = exit_code; 1687 info.si_pid = current->pid; 1688 info.si_uid = current->uid; 1689 1690 /* Let the debugger run. */ 1691 spin_lock_irq(¤t->sighand->siglock); 1692 ptrace_stop(exit_code, 0, &info); 1693 spin_unlock_irq(¤t->sighand->siglock); 1694 } 1695 1696 static void 1697 finish_stop(int stop_count) 1698 { 1699 /* 1700 * If there are no other threads in the group, or if there is 1701 * a group stop in progress and we are the last to stop, 1702 * report to the parent. When ptraced, every thread reports itself. 1703 */ 1704 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { 1705 read_lock(&tasklist_lock); 1706 do_notify_parent_cldstop(current, CLD_STOPPED); 1707 read_unlock(&tasklist_lock); 1708 } 1709 1710 do { 1711 schedule(); 1712 } while (try_to_freeze()); 1713 /* 1714 * Now we don't run again until continued. 1715 */ 1716 current->exit_code = 0; 1717 } 1718 1719 /* 1720 * This performs the stopping for SIGSTOP and other stop signals. 1721 * We have to stop all threads in the thread group. 1722 * Returns nonzero if we've actually stopped and released the siglock. 1723 * Returns zero if we didn't stop and still hold the siglock. 1724 */ 1725 static int do_signal_stop(int signr) 1726 { 1727 struct signal_struct *sig = current->signal; 1728 int stop_count; 1729 1730 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) 1731 return 0; 1732 1733 if (sig->group_stop_count > 0) { 1734 /* 1735 * There is a group stop in progress. We don't need to 1736 * start another one. 1737 */ 1738 stop_count = --sig->group_stop_count; 1739 } else { 1740 /* 1741 * There is no group stop already in progress. 1742 * We must initiate one now. 1743 */ 1744 struct task_struct *t; 1745 1746 sig->group_exit_code = signr; 1747 1748 stop_count = 0; 1749 for (t = next_thread(current); t != current; t = next_thread(t)) 1750 /* 1751 * Setting state to TASK_STOPPED for a group 1752 * stop is always done with the siglock held, 1753 * so this check has no races. 1754 */ 1755 if (!t->exit_state && 1756 !(t->state & (TASK_STOPPED|TASK_TRACED))) { 1757 stop_count++; 1758 signal_wake_up(t, 0); 1759 } 1760 sig->group_stop_count = stop_count; 1761 } 1762 1763 if (stop_count == 0) 1764 sig->flags = SIGNAL_STOP_STOPPED; 1765 current->exit_code = sig->group_exit_code; 1766 __set_current_state(TASK_STOPPED); 1767 1768 spin_unlock_irq(¤t->sighand->siglock); 1769 finish_stop(stop_count); 1770 return 1; 1771 } 1772 1773 /* 1774 * Do appropriate magic when group_stop_count > 0. 1775 * We return nonzero if we stopped, after releasing the siglock. 1776 * We return zero if we still hold the siglock and should look 1777 * for another signal without checking group_stop_count again. 1778 */ 1779 static int handle_group_stop(void) 1780 { 1781 int stop_count; 1782 1783 if (current->signal->group_exit_task == current) { 1784 /* 1785 * Group stop is so we can do a core dump, 1786 * We are the initiating thread, so get on with it. 1787 */ 1788 current->signal->group_exit_task = NULL; 1789 return 0; 1790 } 1791 1792 if (current->signal->flags & SIGNAL_GROUP_EXIT) 1793 /* 1794 * Group stop is so another thread can do a core dump, 1795 * or else we are racing against a death signal. 1796 * Just punt the stop so we can get the next signal. 1797 */ 1798 return 0; 1799 1800 /* 1801 * There is a group stop in progress. We stop 1802 * without any associated signal being in our queue. 1803 */ 1804 stop_count = --current->signal->group_stop_count; 1805 if (stop_count == 0) 1806 current->signal->flags = SIGNAL_STOP_STOPPED; 1807 current->exit_code = current->signal->group_exit_code; 1808 set_current_state(TASK_STOPPED); 1809 spin_unlock_irq(¤t->sighand->siglock); 1810 finish_stop(stop_count); 1811 return 1; 1812 } 1813 1814 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, 1815 struct pt_regs *regs, void *cookie) 1816 { 1817 sigset_t *mask = ¤t->blocked; 1818 int signr = 0; 1819 1820 try_to_freeze(); 1821 1822 relock: 1823 spin_lock_irq(¤t->sighand->siglock); 1824 for (;;) { 1825 struct k_sigaction *ka; 1826 1827 if (unlikely(current->signal->group_stop_count > 0) && 1828 handle_group_stop()) 1829 goto relock; 1830 1831 signr = dequeue_signal(current, mask, info); 1832 1833 if (!signr) 1834 break; /* will return 0 */ 1835 1836 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { 1837 ptrace_signal_deliver(regs, cookie); 1838 1839 /* Let the debugger run. */ 1840 ptrace_stop(signr, signr, info); 1841 1842 /* We're back. Did the debugger cancel the sig? */ 1843 signr = current->exit_code; 1844 if (signr == 0) 1845 continue; 1846 1847 current->exit_code = 0; 1848 1849 /* Update the siginfo structure if the signal has 1850 changed. If the debugger wanted something 1851 specific in the siginfo structure then it should 1852 have updated *info via PTRACE_SETSIGINFO. */ 1853 if (signr != info->si_signo) { 1854 info->si_signo = signr; 1855 info->si_errno = 0; 1856 info->si_code = SI_USER; 1857 info->si_pid = current->parent->pid; 1858 info->si_uid = current->parent->uid; 1859 } 1860 1861 /* If the (new) signal is now blocked, requeue it. */ 1862 if (sigismember(¤t->blocked, signr)) { 1863 specific_send_sig_info(signr, info, current); 1864 continue; 1865 } 1866 } 1867 1868 ka = ¤t->sighand->action[signr-1]; 1869 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 1870 continue; 1871 if (ka->sa.sa_handler != SIG_DFL) { 1872 /* Run the handler. */ 1873 *return_ka = *ka; 1874 1875 if (ka->sa.sa_flags & SA_ONESHOT) 1876 ka->sa.sa_handler = SIG_DFL; 1877 1878 break; /* will return non-zero "signr" value */ 1879 } 1880 1881 /* 1882 * Now we are doing the default action for this signal. 1883 */ 1884 if (sig_kernel_ignore(signr)) /* Default is nothing. */ 1885 continue; 1886 1887 /* 1888 * Init of a pid space gets no signals it doesn't want from 1889 * within that pid space. It can of course get signals from 1890 * its parent pid space. 1891 */ 1892 if (current == child_reaper(current)) 1893 continue; 1894 1895 if (sig_kernel_stop(signr)) { 1896 /* 1897 * The default action is to stop all threads in 1898 * the thread group. The job control signals 1899 * do nothing in an orphaned pgrp, but SIGSTOP 1900 * always works. Note that siglock needs to be 1901 * dropped during the call to is_orphaned_pgrp() 1902 * because of lock ordering with tasklist_lock. 1903 * This allows an intervening SIGCONT to be posted. 1904 * We need to check for that and bail out if necessary. 1905 */ 1906 if (signr != SIGSTOP) { 1907 spin_unlock_irq(¤t->sighand->siglock); 1908 1909 /* signals can be posted during this window */ 1910 1911 if (is_current_pgrp_orphaned()) 1912 goto relock; 1913 1914 spin_lock_irq(¤t->sighand->siglock); 1915 } 1916 1917 if (likely(do_signal_stop(signr))) { 1918 /* It released the siglock. */ 1919 goto relock; 1920 } 1921 1922 /* 1923 * We didn't actually stop, due to a race 1924 * with SIGCONT or something like that. 1925 */ 1926 continue; 1927 } 1928 1929 spin_unlock_irq(¤t->sighand->siglock); 1930 1931 /* 1932 * Anything else is fatal, maybe with a core dump. 1933 */ 1934 current->flags |= PF_SIGNALED; 1935 if (sig_kernel_coredump(signr)) { 1936 /* 1937 * If it was able to dump core, this kills all 1938 * other threads in the group and synchronizes with 1939 * their demise. If we lost the race with another 1940 * thread getting here, it set group_exit_code 1941 * first and our do_group_exit call below will use 1942 * that value and ignore the one we pass it. 1943 */ 1944 do_coredump((long)signr, signr, regs); 1945 } 1946 1947 /* 1948 * Death signals, no core dump. 1949 */ 1950 do_group_exit(signr); 1951 /* NOTREACHED */ 1952 } 1953 spin_unlock_irq(¤t->sighand->siglock); 1954 return signr; 1955 } 1956 1957 EXPORT_SYMBOL(recalc_sigpending); 1958 EXPORT_SYMBOL_GPL(dequeue_signal); 1959 EXPORT_SYMBOL(flush_signals); 1960 EXPORT_SYMBOL(force_sig); 1961 EXPORT_SYMBOL(kill_proc); 1962 EXPORT_SYMBOL(ptrace_notify); 1963 EXPORT_SYMBOL(send_sig); 1964 EXPORT_SYMBOL(send_sig_info); 1965 EXPORT_SYMBOL(sigprocmask); 1966 EXPORT_SYMBOL(block_all_signals); 1967 EXPORT_SYMBOL(unblock_all_signals); 1968 1969 1970 /* 1971 * System call entry points. 1972 */ 1973 1974 asmlinkage long sys_restart_syscall(void) 1975 { 1976 struct restart_block *restart = ¤t_thread_info()->restart_block; 1977 return restart->fn(restart); 1978 } 1979 1980 long do_no_restart_syscall(struct restart_block *param) 1981 { 1982 return -EINTR; 1983 } 1984 1985 /* 1986 * We don't need to get the kernel lock - this is all local to this 1987 * particular thread.. (and that's good, because this is _heavily_ 1988 * used by various programs) 1989 */ 1990 1991 /* 1992 * This is also useful for kernel threads that want to temporarily 1993 * (or permanently) block certain signals. 1994 * 1995 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel 1996 * interface happily blocks "unblockable" signals like SIGKILL 1997 * and friends. 1998 */ 1999 int sigprocmask(int how, sigset_t *set, sigset_t *oldset) 2000 { 2001 int error; 2002 2003 spin_lock_irq(¤t->sighand->siglock); 2004 if (oldset) 2005 *oldset = current->blocked; 2006 2007 error = 0; 2008 switch (how) { 2009 case SIG_BLOCK: 2010 sigorsets(¤t->blocked, ¤t->blocked, set); 2011 break; 2012 case SIG_UNBLOCK: 2013 signandsets(¤t->blocked, ¤t->blocked, set); 2014 break; 2015 case SIG_SETMASK: 2016 current->blocked = *set; 2017 break; 2018 default: 2019 error = -EINVAL; 2020 } 2021 recalc_sigpending(); 2022 spin_unlock_irq(¤t->sighand->siglock); 2023 2024 return error; 2025 } 2026 2027 asmlinkage long 2028 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize) 2029 { 2030 int error = -EINVAL; 2031 sigset_t old_set, new_set; 2032 2033 /* XXX: Don't preclude handling different sized sigset_t's. */ 2034 if (sigsetsize != sizeof(sigset_t)) 2035 goto out; 2036 2037 if (set) { 2038 error = -EFAULT; 2039 if (copy_from_user(&new_set, set, sizeof(*set))) 2040 goto out; 2041 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2042 2043 error = sigprocmask(how, &new_set, &old_set); 2044 if (error) 2045 goto out; 2046 if (oset) 2047 goto set_old; 2048 } else if (oset) { 2049 spin_lock_irq(¤t->sighand->siglock); 2050 old_set = current->blocked; 2051 spin_unlock_irq(¤t->sighand->siglock); 2052 2053 set_old: 2054 error = -EFAULT; 2055 if (copy_to_user(oset, &old_set, sizeof(*oset))) 2056 goto out; 2057 } 2058 error = 0; 2059 out: 2060 return error; 2061 } 2062 2063 long do_sigpending(void __user *set, unsigned long sigsetsize) 2064 { 2065 long error = -EINVAL; 2066 sigset_t pending; 2067 2068 if (sigsetsize > sizeof(sigset_t)) 2069 goto out; 2070 2071 spin_lock_irq(¤t->sighand->siglock); 2072 sigorsets(&pending, ¤t->pending.signal, 2073 ¤t->signal->shared_pending.signal); 2074 spin_unlock_irq(¤t->sighand->siglock); 2075 2076 /* Outside the lock because only this thread touches it. */ 2077 sigandsets(&pending, ¤t->blocked, &pending); 2078 2079 error = -EFAULT; 2080 if (!copy_to_user(set, &pending, sigsetsize)) 2081 error = 0; 2082 2083 out: 2084 return error; 2085 } 2086 2087 asmlinkage long 2088 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize) 2089 { 2090 return do_sigpending(set, sigsetsize); 2091 } 2092 2093 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER 2094 2095 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) 2096 { 2097 int err; 2098 2099 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) 2100 return -EFAULT; 2101 if (from->si_code < 0) 2102 return __copy_to_user(to, from, sizeof(siginfo_t)) 2103 ? -EFAULT : 0; 2104 /* 2105 * If you change siginfo_t structure, please be sure 2106 * this code is fixed accordingly. 2107 * It should never copy any pad contained in the structure 2108 * to avoid security leaks, but must copy the generic 2109 * 3 ints plus the relevant union member. 2110 */ 2111 err = __put_user(from->si_signo, &to->si_signo); 2112 err |= __put_user(from->si_errno, &to->si_errno); 2113 err |= __put_user((short)from->si_code, &to->si_code); 2114 switch (from->si_code & __SI_MASK) { 2115 case __SI_KILL: 2116 err |= __put_user(from->si_pid, &to->si_pid); 2117 err |= __put_user(from->si_uid, &to->si_uid); 2118 break; 2119 case __SI_TIMER: 2120 err |= __put_user(from->si_tid, &to->si_tid); 2121 err |= __put_user(from->si_overrun, &to->si_overrun); 2122 err |= __put_user(from->si_ptr, &to->si_ptr); 2123 break; 2124 case __SI_POLL: 2125 err |= __put_user(from->si_band, &to->si_band); 2126 err |= __put_user(from->si_fd, &to->si_fd); 2127 break; 2128 case __SI_FAULT: 2129 err |= __put_user(from->si_addr, &to->si_addr); 2130 #ifdef __ARCH_SI_TRAPNO 2131 err |= __put_user(from->si_trapno, &to->si_trapno); 2132 #endif 2133 break; 2134 case __SI_CHLD: 2135 err |= __put_user(from->si_pid, &to->si_pid); 2136 err |= __put_user(from->si_uid, &to->si_uid); 2137 err |= __put_user(from->si_status, &to->si_status); 2138 err |= __put_user(from->si_utime, &to->si_utime); 2139 err |= __put_user(from->si_stime, &to->si_stime); 2140 break; 2141 case __SI_RT: /* This is not generated by the kernel as of now. */ 2142 case __SI_MESGQ: /* But this is */ 2143 err |= __put_user(from->si_pid, &to->si_pid); 2144 err |= __put_user(from->si_uid, &to->si_uid); 2145 err |= __put_user(from->si_ptr, &to->si_ptr); 2146 break; 2147 default: /* this is just in case for now ... */ 2148 err |= __put_user(from->si_pid, &to->si_pid); 2149 err |= __put_user(from->si_uid, &to->si_uid); 2150 break; 2151 } 2152 return err; 2153 } 2154 2155 #endif 2156 2157 asmlinkage long 2158 sys_rt_sigtimedwait(const sigset_t __user *uthese, 2159 siginfo_t __user *uinfo, 2160 const struct timespec __user *uts, 2161 size_t sigsetsize) 2162 { 2163 int ret, sig; 2164 sigset_t these; 2165 struct timespec ts; 2166 siginfo_t info; 2167 long timeout = 0; 2168 2169 /* XXX: Don't preclude handling different sized sigset_t's. */ 2170 if (sigsetsize != sizeof(sigset_t)) 2171 return -EINVAL; 2172 2173 if (copy_from_user(&these, uthese, sizeof(these))) 2174 return -EFAULT; 2175 2176 /* 2177 * Invert the set of allowed signals to get those we 2178 * want to block. 2179 */ 2180 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2181 signotset(&these); 2182 2183 if (uts) { 2184 if (copy_from_user(&ts, uts, sizeof(ts))) 2185 return -EFAULT; 2186 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 2187 || ts.tv_sec < 0) 2188 return -EINVAL; 2189 } 2190 2191 spin_lock_irq(¤t->sighand->siglock); 2192 sig = dequeue_signal(current, &these, &info); 2193 if (!sig) { 2194 timeout = MAX_SCHEDULE_TIMEOUT; 2195 if (uts) 2196 timeout = (timespec_to_jiffies(&ts) 2197 + (ts.tv_sec || ts.tv_nsec)); 2198 2199 if (timeout) { 2200 /* None ready -- temporarily unblock those we're 2201 * interested while we are sleeping in so that we'll 2202 * be awakened when they arrive. */ 2203 current->real_blocked = current->blocked; 2204 sigandsets(¤t->blocked, ¤t->blocked, &these); 2205 recalc_sigpending(); 2206 spin_unlock_irq(¤t->sighand->siglock); 2207 2208 timeout = schedule_timeout_interruptible(timeout); 2209 2210 spin_lock_irq(¤t->sighand->siglock); 2211 sig = dequeue_signal(current, &these, &info); 2212 current->blocked = current->real_blocked; 2213 siginitset(¤t->real_blocked, 0); 2214 recalc_sigpending(); 2215 } 2216 } 2217 spin_unlock_irq(¤t->sighand->siglock); 2218 2219 if (sig) { 2220 ret = sig; 2221 if (uinfo) { 2222 if (copy_siginfo_to_user(uinfo, &info)) 2223 ret = -EFAULT; 2224 } 2225 } else { 2226 ret = -EAGAIN; 2227 if (timeout) 2228 ret = -EINTR; 2229 } 2230 2231 return ret; 2232 } 2233 2234 asmlinkage long 2235 sys_kill(int pid, int sig) 2236 { 2237 struct siginfo info; 2238 2239 info.si_signo = sig; 2240 info.si_errno = 0; 2241 info.si_code = SI_USER; 2242 info.si_pid = current->tgid; 2243 info.si_uid = current->uid; 2244 2245 return kill_something_info(sig, &info, pid); 2246 } 2247 2248 static int do_tkill(int tgid, int pid, int sig) 2249 { 2250 int error; 2251 struct siginfo info; 2252 struct task_struct *p; 2253 2254 error = -ESRCH; 2255 info.si_signo = sig; 2256 info.si_errno = 0; 2257 info.si_code = SI_TKILL; 2258 info.si_pid = current->tgid; 2259 info.si_uid = current->uid; 2260 2261 read_lock(&tasklist_lock); 2262 p = find_task_by_pid(pid); 2263 if (p && (tgid <= 0 || p->tgid == tgid)) { 2264 error = check_kill_permission(sig, &info, p); 2265 /* 2266 * The null signal is a permissions and process existence 2267 * probe. No signal is actually delivered. 2268 */ 2269 if (!error && sig && p->sighand) { 2270 spin_lock_irq(&p->sighand->siglock); 2271 handle_stop_signal(sig, p); 2272 error = specific_send_sig_info(sig, &info, p); 2273 spin_unlock_irq(&p->sighand->siglock); 2274 } 2275 } 2276 read_unlock(&tasklist_lock); 2277 2278 return error; 2279 } 2280 2281 /** 2282 * sys_tgkill - send signal to one specific thread 2283 * @tgid: the thread group ID of the thread 2284 * @pid: the PID of the thread 2285 * @sig: signal to be sent 2286 * 2287 * This syscall also checks the @tgid and returns -ESRCH even if the PID 2288 * exists but it's not belonging to the target process anymore. This 2289 * method solves the problem of threads exiting and PIDs getting reused. 2290 */ 2291 asmlinkage long sys_tgkill(int tgid, int pid, int sig) 2292 { 2293 /* This is only valid for single tasks */ 2294 if (pid <= 0 || tgid <= 0) 2295 return -EINVAL; 2296 2297 return do_tkill(tgid, pid, sig); 2298 } 2299 2300 /* 2301 * Send a signal to only one task, even if it's a CLONE_THREAD task. 2302 */ 2303 asmlinkage long 2304 sys_tkill(int pid, int sig) 2305 { 2306 /* This is only valid for single tasks */ 2307 if (pid <= 0) 2308 return -EINVAL; 2309 2310 return do_tkill(0, pid, sig); 2311 } 2312 2313 asmlinkage long 2314 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) 2315 { 2316 siginfo_t info; 2317 2318 if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) 2319 return -EFAULT; 2320 2321 /* Not even root can pretend to send signals from the kernel. 2322 Nor can they impersonate a kill(), which adds source info. */ 2323 if (info.si_code >= 0) 2324 return -EPERM; 2325 info.si_signo = sig; 2326 2327 /* POSIX.1b doesn't mention process groups. */ 2328 return kill_proc_info(sig, &info, pid); 2329 } 2330 2331 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) 2332 { 2333 struct k_sigaction *k; 2334 sigset_t mask; 2335 2336 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) 2337 return -EINVAL; 2338 2339 k = ¤t->sighand->action[sig-1]; 2340 2341 spin_lock_irq(¤t->sighand->siglock); 2342 if (signal_pending(current)) { 2343 /* 2344 * If there might be a fatal signal pending on multiple 2345 * threads, make sure we take it before changing the action. 2346 */ 2347 spin_unlock_irq(¤t->sighand->siglock); 2348 return -ERESTARTNOINTR; 2349 } 2350 2351 if (oact) 2352 *oact = *k; 2353 2354 if (act) { 2355 sigdelsetmask(&act->sa.sa_mask, 2356 sigmask(SIGKILL) | sigmask(SIGSTOP)); 2357 *k = *act; 2358 /* 2359 * POSIX 3.3.1.3: 2360 * "Setting a signal action to SIG_IGN for a signal that is 2361 * pending shall cause the pending signal to be discarded, 2362 * whether or not it is blocked." 2363 * 2364 * "Setting a signal action to SIG_DFL for a signal that is 2365 * pending and whose default action is to ignore the signal 2366 * (for example, SIGCHLD), shall cause the pending signal to 2367 * be discarded, whether or not it is blocked" 2368 */ 2369 if (act->sa.sa_handler == SIG_IGN || 2370 (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) { 2371 struct task_struct *t = current; 2372 sigemptyset(&mask); 2373 sigaddset(&mask, sig); 2374 rm_from_queue_full(&mask, &t->signal->shared_pending); 2375 do { 2376 rm_from_queue_full(&mask, &t->pending); 2377 recalc_sigpending_tsk(t); 2378 t = next_thread(t); 2379 } while (t != current); 2380 } 2381 } 2382 2383 spin_unlock_irq(¤t->sighand->siglock); 2384 return 0; 2385 } 2386 2387 int 2388 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) 2389 { 2390 stack_t oss; 2391 int error; 2392 2393 if (uoss) { 2394 oss.ss_sp = (void __user *) current->sas_ss_sp; 2395 oss.ss_size = current->sas_ss_size; 2396 oss.ss_flags = sas_ss_flags(sp); 2397 } 2398 2399 if (uss) { 2400 void __user *ss_sp; 2401 size_t ss_size; 2402 int ss_flags; 2403 2404 error = -EFAULT; 2405 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) 2406 || __get_user(ss_sp, &uss->ss_sp) 2407 || __get_user(ss_flags, &uss->ss_flags) 2408 || __get_user(ss_size, &uss->ss_size)) 2409 goto out; 2410 2411 error = -EPERM; 2412 if (on_sig_stack(sp)) 2413 goto out; 2414 2415 error = -EINVAL; 2416 /* 2417 * 2418 * Note - this code used to test ss_flags incorrectly 2419 * old code may have been written using ss_flags==0 2420 * to mean ss_flags==SS_ONSTACK (as this was the only 2421 * way that worked) - this fix preserves that older 2422 * mechanism 2423 */ 2424 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) 2425 goto out; 2426 2427 if (ss_flags == SS_DISABLE) { 2428 ss_size = 0; 2429 ss_sp = NULL; 2430 } else { 2431 error = -ENOMEM; 2432 if (ss_size < MINSIGSTKSZ) 2433 goto out; 2434 } 2435 2436 current->sas_ss_sp = (unsigned long) ss_sp; 2437 current->sas_ss_size = ss_size; 2438 } 2439 2440 if (uoss) { 2441 error = -EFAULT; 2442 if (copy_to_user(uoss, &oss, sizeof(oss))) 2443 goto out; 2444 } 2445 2446 error = 0; 2447 out: 2448 return error; 2449 } 2450 2451 #ifdef __ARCH_WANT_SYS_SIGPENDING 2452 2453 asmlinkage long 2454 sys_sigpending(old_sigset_t __user *set) 2455 { 2456 return do_sigpending(set, sizeof(*set)); 2457 } 2458 2459 #endif 2460 2461 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 2462 /* Some platforms have their own version with special arguments others 2463 support only sys_rt_sigprocmask. */ 2464 2465 asmlinkage long 2466 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset) 2467 { 2468 int error; 2469 old_sigset_t old_set, new_set; 2470 2471 if (set) { 2472 error = -EFAULT; 2473 if (copy_from_user(&new_set, set, sizeof(*set))) 2474 goto out; 2475 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); 2476 2477 spin_lock_irq(¤t->sighand->siglock); 2478 old_set = current->blocked.sig[0]; 2479 2480 error = 0; 2481 switch (how) { 2482 default: 2483 error = -EINVAL; 2484 break; 2485 case SIG_BLOCK: 2486 sigaddsetmask(¤t->blocked, new_set); 2487 break; 2488 case SIG_UNBLOCK: 2489 sigdelsetmask(¤t->blocked, new_set); 2490 break; 2491 case SIG_SETMASK: 2492 current->blocked.sig[0] = new_set; 2493 break; 2494 } 2495 2496 recalc_sigpending(); 2497 spin_unlock_irq(¤t->sighand->siglock); 2498 if (error) 2499 goto out; 2500 if (oset) 2501 goto set_old; 2502 } else if (oset) { 2503 old_set = current->blocked.sig[0]; 2504 set_old: 2505 error = -EFAULT; 2506 if (copy_to_user(oset, &old_set, sizeof(*oset))) 2507 goto out; 2508 } 2509 error = 0; 2510 out: 2511 return error; 2512 } 2513 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 2514 2515 #ifdef __ARCH_WANT_SYS_RT_SIGACTION 2516 asmlinkage long 2517 sys_rt_sigaction(int sig, 2518 const struct sigaction __user *act, 2519 struct sigaction __user *oact, 2520 size_t sigsetsize) 2521 { 2522 struct k_sigaction new_sa, old_sa; 2523 int ret = -EINVAL; 2524 2525 /* XXX: Don't preclude handling different sized sigset_t's. */ 2526 if (sigsetsize != sizeof(sigset_t)) 2527 goto out; 2528 2529 if (act) { 2530 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) 2531 return -EFAULT; 2532 } 2533 2534 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); 2535 2536 if (!ret && oact) { 2537 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) 2538 return -EFAULT; 2539 } 2540 out: 2541 return ret; 2542 } 2543 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ 2544 2545 #ifdef __ARCH_WANT_SYS_SGETMASK 2546 2547 /* 2548 * For backwards compatibility. Functionality superseded by sigprocmask. 2549 */ 2550 asmlinkage long 2551 sys_sgetmask(void) 2552 { 2553 /* SMP safe */ 2554 return current->blocked.sig[0]; 2555 } 2556 2557 asmlinkage long 2558 sys_ssetmask(int newmask) 2559 { 2560 int old; 2561 2562 spin_lock_irq(¤t->sighand->siglock); 2563 old = current->blocked.sig[0]; 2564 2565 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)| 2566 sigmask(SIGSTOP))); 2567 recalc_sigpending(); 2568 spin_unlock_irq(¤t->sighand->siglock); 2569 2570 return old; 2571 } 2572 #endif /* __ARCH_WANT_SGETMASK */ 2573 2574 #ifdef __ARCH_WANT_SYS_SIGNAL 2575 /* 2576 * For backwards compatibility. Functionality superseded by sigaction. 2577 */ 2578 asmlinkage unsigned long 2579 sys_signal(int sig, __sighandler_t handler) 2580 { 2581 struct k_sigaction new_sa, old_sa; 2582 int ret; 2583 2584 new_sa.sa.sa_handler = handler; 2585 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; 2586 sigemptyset(&new_sa.sa.sa_mask); 2587 2588 ret = do_sigaction(sig, &new_sa, &old_sa); 2589 2590 return ret ? ret : (unsigned long)old_sa.sa.sa_handler; 2591 } 2592 #endif /* __ARCH_WANT_SYS_SIGNAL */ 2593 2594 #ifdef __ARCH_WANT_SYS_PAUSE 2595 2596 asmlinkage long 2597 sys_pause(void) 2598 { 2599 current->state = TASK_INTERRUPTIBLE; 2600 schedule(); 2601 return -ERESTARTNOHAND; 2602 } 2603 2604 #endif 2605 2606 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND 2607 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) 2608 { 2609 sigset_t newset; 2610 2611 /* XXX: Don't preclude handling different sized sigset_t's. */ 2612 if (sigsetsize != sizeof(sigset_t)) 2613 return -EINVAL; 2614 2615 if (copy_from_user(&newset, unewset, sizeof(newset))) 2616 return -EFAULT; 2617 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2618 2619 spin_lock_irq(¤t->sighand->siglock); 2620 current->saved_sigmask = current->blocked; 2621 current->blocked = newset; 2622 recalc_sigpending(); 2623 spin_unlock_irq(¤t->sighand->siglock); 2624 2625 current->state = TASK_INTERRUPTIBLE; 2626 schedule(); 2627 set_thread_flag(TIF_RESTORE_SIGMASK); 2628 return -ERESTARTNOHAND; 2629 } 2630 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ 2631 2632 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) 2633 { 2634 return NULL; 2635 } 2636 2637 void __init signals_init(void) 2638 { 2639 sigqueue_cachep = 2640 kmem_cache_create("sigqueue", 2641 sizeof(struct sigqueue), 2642 __alignof__(struct sigqueue), 2643 SLAB_PANIC, NULL, NULL); 2644 } 2645