1 /* 2 * linux/kernel/signal.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson 7 * 8 * 2003-06-02 Jim Houston - Concurrent Computer Corp. 9 * Changes to use preallocated sigqueue structures 10 * to allow signals to be sent reliably. 11 */ 12 13 #include <linux/slab.h> 14 #include <linux/module.h> 15 #include <linux/smp_lock.h> 16 #include <linux/init.h> 17 #include <linux/sched.h> 18 #include <linux/fs.h> 19 #include <linux/tty.h> 20 #include <linux/binfmts.h> 21 #include <linux/security.h> 22 #include <linux/syscalls.h> 23 #include <linux/ptrace.h> 24 #include <linux/signal.h> 25 #include <linux/capability.h> 26 #include <linux/freezer.h> 27 #include <linux/pid_namespace.h> 28 #include <linux/nsproxy.h> 29 30 #include <asm/param.h> 31 #include <asm/uaccess.h> 32 #include <asm/unistd.h> 33 #include <asm/siginfo.h> 34 #include "audit.h" /* audit_signal_info() */ 35 36 /* 37 * SLAB caches for signal bits. 38 */ 39 40 static struct kmem_cache *sigqueue_cachep; 41 42 /* 43 * In POSIX a signal is sent either to a specific thread (Linux task) 44 * or to the process as a whole (Linux thread group). How the signal 45 * is sent determines whether it's to one thread or the whole group, 46 * which determines which signal mask(s) are involved in blocking it 47 * from being delivered until later. When the signal is delivered, 48 * either it's caught or ignored by a user handler or it has a default 49 * effect that applies to the whole thread group (POSIX process). 50 * 51 * The possible effects an unblocked signal set to SIG_DFL can have are: 52 * ignore - Nothing Happens 53 * terminate - kill the process, i.e. all threads in the group, 54 * similar to exit_group. The group leader (only) reports 55 * WIFSIGNALED status to its parent. 56 * coredump - write a core dump file describing all threads using 57 * the same mm and then kill all those threads 58 * stop - stop all the threads in the group, i.e. TASK_STOPPED state 59 * 60 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored. 61 * Other signals when not blocked and set to SIG_DFL behaves as follows. 62 * The job control signals also have other special effects. 63 * 64 * +--------------------+------------------+ 65 * | POSIX signal | default action | 66 * +--------------------+------------------+ 67 * | SIGHUP | terminate | 68 * | SIGINT | terminate | 69 * | SIGQUIT | coredump | 70 * | SIGILL | coredump | 71 * | SIGTRAP | coredump | 72 * | SIGABRT/SIGIOT | coredump | 73 * | SIGBUS | coredump | 74 * | SIGFPE | coredump | 75 * | SIGKILL | terminate(+) | 76 * | SIGUSR1 | terminate | 77 * | SIGSEGV | coredump | 78 * | SIGUSR2 | terminate | 79 * | SIGPIPE | terminate | 80 * | SIGALRM | terminate | 81 * | SIGTERM | terminate | 82 * | SIGCHLD | ignore | 83 * | SIGCONT | ignore(*) | 84 * | SIGSTOP | stop(*)(+) | 85 * | SIGTSTP | stop(*) | 86 * | SIGTTIN | stop(*) | 87 * | SIGTTOU | stop(*) | 88 * | SIGURG | ignore | 89 * | SIGXCPU | coredump | 90 * | SIGXFSZ | coredump | 91 * | SIGVTALRM | terminate | 92 * | SIGPROF | terminate | 93 * | SIGPOLL/SIGIO | terminate | 94 * | SIGSYS/SIGUNUSED | coredump | 95 * | SIGSTKFLT | terminate | 96 * | SIGWINCH | ignore | 97 * | SIGPWR | terminate | 98 * | SIGRTMIN-SIGRTMAX | terminate | 99 * +--------------------+------------------+ 100 * | non-POSIX signal | default action | 101 * +--------------------+------------------+ 102 * | SIGEMT | coredump | 103 * +--------------------+------------------+ 104 * 105 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default". 106 * (*) Special job control effects: 107 * When SIGCONT is sent, it resumes the process (all threads in the group) 108 * from TASK_STOPPED state and also clears any pending/queued stop signals 109 * (any of those marked with "stop(*)"). This happens regardless of blocking, 110 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears 111 * any pending/queued SIGCONT signals; this happens regardless of blocking, 112 * catching, or ignored the stop signal, though (except for SIGSTOP) the 113 * default action of stopping the process may happen later or never. 114 */ 115 116 #ifdef SIGEMT 117 #define M_SIGEMT M(SIGEMT) 118 #else 119 #define M_SIGEMT 0 120 #endif 121 122 #if SIGRTMIN > BITS_PER_LONG 123 #define M(sig) (1ULL << ((sig)-1)) 124 #else 125 #define M(sig) (1UL << ((sig)-1)) 126 #endif 127 #define T(sig, mask) (M(sig) & (mask)) 128 129 #define SIG_KERNEL_ONLY_MASK (\ 130 M(SIGKILL) | M(SIGSTOP) ) 131 132 #define SIG_KERNEL_STOP_MASK (\ 133 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) ) 134 135 #define SIG_KERNEL_COREDUMP_MASK (\ 136 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \ 137 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \ 138 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT ) 139 140 #define SIG_KERNEL_IGNORE_MASK (\ 141 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) ) 142 143 #define sig_kernel_only(sig) \ 144 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK)) 145 #define sig_kernel_coredump(sig) \ 146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK)) 147 #define sig_kernel_ignore(sig) \ 148 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK)) 149 #define sig_kernel_stop(sig) \ 150 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK)) 151 152 #define sig_needs_tasklist(sig) ((sig) == SIGCONT) 153 154 #define sig_user_defined(t, signr) \ 155 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ 156 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN)) 157 158 #define sig_fatal(t, signr) \ 159 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \ 160 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL) 161 162 static int sig_ignored(struct task_struct *t, int sig) 163 { 164 void __user * handler; 165 166 /* 167 * Tracers always want to know about signals.. 168 */ 169 if (t->ptrace & PT_PTRACED) 170 return 0; 171 172 /* 173 * Blocked signals are never ignored, since the 174 * signal handler may change by the time it is 175 * unblocked. 176 */ 177 if (sigismember(&t->blocked, sig)) 178 return 0; 179 180 /* Is it explicitly or implicitly ignored? */ 181 handler = t->sighand->action[sig-1].sa.sa_handler; 182 return handler == SIG_IGN || 183 (handler == SIG_DFL && sig_kernel_ignore(sig)); 184 } 185 186 /* 187 * Re-calculate pending state from the set of locally pending 188 * signals, globally pending signals, and blocked signals. 189 */ 190 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) 191 { 192 unsigned long ready; 193 long i; 194 195 switch (_NSIG_WORDS) { 196 default: 197 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) 198 ready |= signal->sig[i] &~ blocked->sig[i]; 199 break; 200 201 case 4: ready = signal->sig[3] &~ blocked->sig[3]; 202 ready |= signal->sig[2] &~ blocked->sig[2]; 203 ready |= signal->sig[1] &~ blocked->sig[1]; 204 ready |= signal->sig[0] &~ blocked->sig[0]; 205 break; 206 207 case 2: ready = signal->sig[1] &~ blocked->sig[1]; 208 ready |= signal->sig[0] &~ blocked->sig[0]; 209 break; 210 211 case 1: ready = signal->sig[0] &~ blocked->sig[0]; 212 } 213 return ready != 0; 214 } 215 216 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) 217 218 fastcall void recalc_sigpending_tsk(struct task_struct *t) 219 { 220 if (t->signal->group_stop_count > 0 || 221 (freezing(t)) || 222 PENDING(&t->pending, &t->blocked) || 223 PENDING(&t->signal->shared_pending, &t->blocked)) 224 set_tsk_thread_flag(t, TIF_SIGPENDING); 225 else 226 clear_tsk_thread_flag(t, TIF_SIGPENDING); 227 } 228 229 void recalc_sigpending(void) 230 { 231 recalc_sigpending_tsk(current); 232 } 233 234 /* Given the mask, find the first available signal that should be serviced. */ 235 236 static int 237 next_signal(struct sigpending *pending, sigset_t *mask) 238 { 239 unsigned long i, *s, *m, x; 240 int sig = 0; 241 242 s = pending->signal.sig; 243 m = mask->sig; 244 switch (_NSIG_WORDS) { 245 default: 246 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m) 247 if ((x = *s &~ *m) != 0) { 248 sig = ffz(~x) + i*_NSIG_BPW + 1; 249 break; 250 } 251 break; 252 253 case 2: if ((x = s[0] &~ m[0]) != 0) 254 sig = 1; 255 else if ((x = s[1] &~ m[1]) != 0) 256 sig = _NSIG_BPW + 1; 257 else 258 break; 259 sig += ffz(~x); 260 break; 261 262 case 1: if ((x = *s &~ *m) != 0) 263 sig = ffz(~x) + 1; 264 break; 265 } 266 267 return sig; 268 } 269 270 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, 271 int override_rlimit) 272 { 273 struct sigqueue *q = NULL; 274 struct user_struct *user; 275 276 /* 277 * In order to avoid problems with "switch_user()", we want to make 278 * sure that the compiler doesn't re-load "t->user" 279 */ 280 user = t->user; 281 barrier(); 282 atomic_inc(&user->sigpending); 283 if (override_rlimit || 284 atomic_read(&user->sigpending) <= 285 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) 286 q = kmem_cache_alloc(sigqueue_cachep, flags); 287 if (unlikely(q == NULL)) { 288 atomic_dec(&user->sigpending); 289 } else { 290 INIT_LIST_HEAD(&q->list); 291 q->flags = 0; 292 q->user = get_uid(user); 293 } 294 return(q); 295 } 296 297 static void __sigqueue_free(struct sigqueue *q) 298 { 299 if (q->flags & SIGQUEUE_PREALLOC) 300 return; 301 atomic_dec(&q->user->sigpending); 302 free_uid(q->user); 303 kmem_cache_free(sigqueue_cachep, q); 304 } 305 306 void flush_sigqueue(struct sigpending *queue) 307 { 308 struct sigqueue *q; 309 310 sigemptyset(&queue->signal); 311 while (!list_empty(&queue->list)) { 312 q = list_entry(queue->list.next, struct sigqueue , list); 313 list_del_init(&q->list); 314 __sigqueue_free(q); 315 } 316 } 317 318 /* 319 * Flush all pending signals for a task. 320 */ 321 void flush_signals(struct task_struct *t) 322 { 323 unsigned long flags; 324 325 spin_lock_irqsave(&t->sighand->siglock, flags); 326 clear_tsk_thread_flag(t,TIF_SIGPENDING); 327 flush_sigqueue(&t->pending); 328 flush_sigqueue(&t->signal->shared_pending); 329 spin_unlock_irqrestore(&t->sighand->siglock, flags); 330 } 331 332 /* 333 * Flush all handlers for a task. 334 */ 335 336 void 337 flush_signal_handlers(struct task_struct *t, int force_default) 338 { 339 int i; 340 struct k_sigaction *ka = &t->sighand->action[0]; 341 for (i = _NSIG ; i != 0 ; i--) { 342 if (force_default || ka->sa.sa_handler != SIG_IGN) 343 ka->sa.sa_handler = SIG_DFL; 344 ka->sa.sa_flags = 0; 345 sigemptyset(&ka->sa.sa_mask); 346 ka++; 347 } 348 } 349 350 351 /* Notify the system that a driver wants to block all signals for this 352 * process, and wants to be notified if any signals at all were to be 353 * sent/acted upon. If the notifier routine returns non-zero, then the 354 * signal will be acted upon after all. If the notifier routine returns 0, 355 * then then signal will be blocked. Only one block per process is 356 * allowed. priv is a pointer to private data that the notifier routine 357 * can use to determine if the signal should be blocked or not. */ 358 359 void 360 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) 361 { 362 unsigned long flags; 363 364 spin_lock_irqsave(¤t->sighand->siglock, flags); 365 current->notifier_mask = mask; 366 current->notifier_data = priv; 367 current->notifier = notifier; 368 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 369 } 370 371 /* Notify the system that blocking has ended. */ 372 373 void 374 unblock_all_signals(void) 375 { 376 unsigned long flags; 377 378 spin_lock_irqsave(¤t->sighand->siglock, flags); 379 current->notifier = NULL; 380 current->notifier_data = NULL; 381 recalc_sigpending(); 382 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 383 } 384 385 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) 386 { 387 struct sigqueue *q, *first = NULL; 388 int still_pending = 0; 389 390 if (unlikely(!sigismember(&list->signal, sig))) 391 return 0; 392 393 /* 394 * Collect the siginfo appropriate to this signal. Check if 395 * there is another siginfo for the same signal. 396 */ 397 list_for_each_entry(q, &list->list, list) { 398 if (q->info.si_signo == sig) { 399 if (first) { 400 still_pending = 1; 401 break; 402 } 403 first = q; 404 } 405 } 406 if (first) { 407 list_del_init(&first->list); 408 copy_siginfo(info, &first->info); 409 __sigqueue_free(first); 410 if (!still_pending) 411 sigdelset(&list->signal, sig); 412 } else { 413 414 /* Ok, it wasn't in the queue. This must be 415 a fast-pathed signal or we must have been 416 out of queue space. So zero out the info. 417 */ 418 sigdelset(&list->signal, sig); 419 info->si_signo = sig; 420 info->si_errno = 0; 421 info->si_code = 0; 422 info->si_pid = 0; 423 info->si_uid = 0; 424 } 425 return 1; 426 } 427 428 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 429 siginfo_t *info) 430 { 431 int sig = next_signal(pending, mask); 432 433 if (sig) { 434 if (current->notifier) { 435 if (sigismember(current->notifier_mask, sig)) { 436 if (!(current->notifier)(current->notifier_data)) { 437 clear_thread_flag(TIF_SIGPENDING); 438 return 0; 439 } 440 } 441 } 442 443 if (!collect_signal(sig, pending, info)) 444 sig = 0; 445 } 446 447 return sig; 448 } 449 450 /* 451 * Dequeue a signal and return the element to the caller, which is 452 * expected to free it. 453 * 454 * All callers have to hold the siglock. 455 */ 456 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 457 { 458 int signr = __dequeue_signal(&tsk->pending, mask, info); 459 if (!signr) 460 signr = __dequeue_signal(&tsk->signal->shared_pending, 461 mask, info); 462 recalc_sigpending_tsk(tsk); 463 if (signr && unlikely(sig_kernel_stop(signr))) { 464 /* 465 * Set a marker that we have dequeued a stop signal. Our 466 * caller might release the siglock and then the pending 467 * stop signal it is about to process is no longer in the 468 * pending bitmasks, but must still be cleared by a SIGCONT 469 * (and overruled by a SIGKILL). So those cases clear this 470 * shared flag after we've set it. Note that this flag may 471 * remain set after the signal we return is ignored or 472 * handled. That doesn't matter because its only purpose 473 * is to alert stop-signal processing code when another 474 * processor has come along and cleared the flag. 475 */ 476 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) 477 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; 478 } 479 if ( signr && 480 ((info->si_code & __SI_MASK) == __SI_TIMER) && 481 info->si_sys_private){ 482 /* 483 * Release the siglock to ensure proper locking order 484 * of timer locks outside of siglocks. Note, we leave 485 * irqs disabled here, since the posix-timers code is 486 * about to disable them again anyway. 487 */ 488 spin_unlock(&tsk->sighand->siglock); 489 do_schedule_next_timer(info); 490 spin_lock(&tsk->sighand->siglock); 491 } 492 return signr; 493 } 494 495 /* 496 * Tell a process that it has a new active signal.. 497 * 498 * NOTE! we rely on the previous spin_lock to 499 * lock interrupts for us! We can only be called with 500 * "siglock" held, and the local interrupt must 501 * have been disabled when that got acquired! 502 * 503 * No need to set need_resched since signal event passing 504 * goes through ->blocked 505 */ 506 void signal_wake_up(struct task_struct *t, int resume) 507 { 508 unsigned int mask; 509 510 set_tsk_thread_flag(t, TIF_SIGPENDING); 511 512 /* 513 * For SIGKILL, we want to wake it up in the stopped/traced case. 514 * We don't check t->state here because there is a race with it 515 * executing another processor and just now entering stopped state. 516 * By using wake_up_state, we ensure the process will wake up and 517 * handle its death signal. 518 */ 519 mask = TASK_INTERRUPTIBLE; 520 if (resume) 521 mask |= TASK_STOPPED | TASK_TRACED; 522 if (!wake_up_state(t, mask)) 523 kick_process(t); 524 } 525 526 /* 527 * Remove signals in mask from the pending set and queue. 528 * Returns 1 if any signals were found. 529 * 530 * All callers must be holding the siglock. 531 * 532 * This version takes a sigset mask and looks at all signals, 533 * not just those in the first mask word. 534 */ 535 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) 536 { 537 struct sigqueue *q, *n; 538 sigset_t m; 539 540 sigandsets(&m, mask, &s->signal); 541 if (sigisemptyset(&m)) 542 return 0; 543 544 signandsets(&s->signal, &s->signal, mask); 545 list_for_each_entry_safe(q, n, &s->list, list) { 546 if (sigismember(mask, q->info.si_signo)) { 547 list_del_init(&q->list); 548 __sigqueue_free(q); 549 } 550 } 551 return 1; 552 } 553 /* 554 * Remove signals in mask from the pending set and queue. 555 * Returns 1 if any signals were found. 556 * 557 * All callers must be holding the siglock. 558 */ 559 static int rm_from_queue(unsigned long mask, struct sigpending *s) 560 { 561 struct sigqueue *q, *n; 562 563 if (!sigtestsetmask(&s->signal, mask)) 564 return 0; 565 566 sigdelsetmask(&s->signal, mask); 567 list_for_each_entry_safe(q, n, &s->list, list) { 568 if (q->info.si_signo < SIGRTMIN && 569 (mask & sigmask(q->info.si_signo))) { 570 list_del_init(&q->list); 571 __sigqueue_free(q); 572 } 573 } 574 return 1; 575 } 576 577 /* 578 * Bad permissions for sending the signal 579 */ 580 static int check_kill_permission(int sig, struct siginfo *info, 581 struct task_struct *t) 582 { 583 int error = -EINVAL; 584 if (!valid_signal(sig)) 585 return error; 586 error = -EPERM; 587 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) 588 && ((sig != SIGCONT) || 589 (process_session(current) != process_session(t))) 590 && (current->euid ^ t->suid) && (current->euid ^ t->uid) 591 && (current->uid ^ t->suid) && (current->uid ^ t->uid) 592 && !capable(CAP_KILL)) 593 return error; 594 595 error = security_task_kill(t, info, sig, 0); 596 if (!error) 597 audit_signal_info(sig, t); /* Let audit system see the signal */ 598 return error; 599 } 600 601 /* forward decl */ 602 static void do_notify_parent_cldstop(struct task_struct *tsk, int why); 603 604 /* 605 * Handle magic process-wide effects of stop/continue signals. 606 * Unlike the signal actions, these happen immediately at signal-generation 607 * time regardless of blocking, ignoring, or handling. This does the 608 * actual continuing for SIGCONT, but not the actual stopping for stop 609 * signals. The process stop is done as a signal action for SIG_DFL. 610 */ 611 static void handle_stop_signal(int sig, struct task_struct *p) 612 { 613 struct task_struct *t; 614 615 if (p->signal->flags & SIGNAL_GROUP_EXIT) 616 /* 617 * The process is in the middle of dying already. 618 */ 619 return; 620 621 if (sig_kernel_stop(sig)) { 622 /* 623 * This is a stop signal. Remove SIGCONT from all queues. 624 */ 625 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending); 626 t = p; 627 do { 628 rm_from_queue(sigmask(SIGCONT), &t->pending); 629 t = next_thread(t); 630 } while (t != p); 631 } else if (sig == SIGCONT) { 632 /* 633 * Remove all stop signals from all queues, 634 * and wake all threads. 635 */ 636 if (unlikely(p->signal->group_stop_count > 0)) { 637 /* 638 * There was a group stop in progress. We'll 639 * pretend it finished before we got here. We are 640 * obliged to report it to the parent: if the 641 * SIGSTOP happened "after" this SIGCONT, then it 642 * would have cleared this pending SIGCONT. If it 643 * happened "before" this SIGCONT, then the parent 644 * got the SIGCHLD about the stop finishing before 645 * the continue happened. We do the notification 646 * now, and it's as if the stop had finished and 647 * the SIGCHLD was pending on entry to this kill. 648 */ 649 p->signal->group_stop_count = 0; 650 p->signal->flags = SIGNAL_STOP_CONTINUED; 651 spin_unlock(&p->sighand->siglock); 652 do_notify_parent_cldstop(p, CLD_STOPPED); 653 spin_lock(&p->sighand->siglock); 654 } 655 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); 656 t = p; 657 do { 658 unsigned int state; 659 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); 660 661 /* 662 * If there is a handler for SIGCONT, we must make 663 * sure that no thread returns to user mode before 664 * we post the signal, in case it was the only 665 * thread eligible to run the signal handler--then 666 * it must not do anything between resuming and 667 * running the handler. With the TIF_SIGPENDING 668 * flag set, the thread will pause and acquire the 669 * siglock that we hold now and until we've queued 670 * the pending signal. 671 * 672 * Wake up the stopped thread _after_ setting 673 * TIF_SIGPENDING 674 */ 675 state = TASK_STOPPED; 676 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { 677 set_tsk_thread_flag(t, TIF_SIGPENDING); 678 state |= TASK_INTERRUPTIBLE; 679 } 680 wake_up_state(t, state); 681 682 t = next_thread(t); 683 } while (t != p); 684 685 if (p->signal->flags & SIGNAL_STOP_STOPPED) { 686 /* 687 * We were in fact stopped, and are now continued. 688 * Notify the parent with CLD_CONTINUED. 689 */ 690 p->signal->flags = SIGNAL_STOP_CONTINUED; 691 p->signal->group_exit_code = 0; 692 spin_unlock(&p->sighand->siglock); 693 do_notify_parent_cldstop(p, CLD_CONTINUED); 694 spin_lock(&p->sighand->siglock); 695 } else { 696 /* 697 * We are not stopped, but there could be a stop 698 * signal in the middle of being processed after 699 * being removed from the queue. Clear that too. 700 */ 701 p->signal->flags = 0; 702 } 703 } else if (sig == SIGKILL) { 704 /* 705 * Make sure that any pending stop signal already dequeued 706 * is undone by the wakeup for SIGKILL. 707 */ 708 p->signal->flags = 0; 709 } 710 } 711 712 static int send_signal(int sig, struct siginfo *info, struct task_struct *t, 713 struct sigpending *signals) 714 { 715 struct sigqueue * q = NULL; 716 int ret = 0; 717 718 /* 719 * fast-pathed signals for kernel-internal things like SIGSTOP 720 * or SIGKILL. 721 */ 722 if (info == SEND_SIG_FORCED) 723 goto out_set; 724 725 /* Real-time signals must be queued if sent by sigqueue, or 726 some other real-time mechanism. It is implementation 727 defined whether kill() does so. We attempt to do so, on 728 the principle of least surprise, but since kill is not 729 allowed to fail with EAGAIN when low on memory we just 730 make sure at least one signal gets delivered and don't 731 pass on the info struct. */ 732 733 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && 734 (is_si_special(info) || 735 info->si_code >= 0))); 736 if (q) { 737 list_add_tail(&q->list, &signals->list); 738 switch ((unsigned long) info) { 739 case (unsigned long) SEND_SIG_NOINFO: 740 q->info.si_signo = sig; 741 q->info.si_errno = 0; 742 q->info.si_code = SI_USER; 743 q->info.si_pid = current->pid; 744 q->info.si_uid = current->uid; 745 break; 746 case (unsigned long) SEND_SIG_PRIV: 747 q->info.si_signo = sig; 748 q->info.si_errno = 0; 749 q->info.si_code = SI_KERNEL; 750 q->info.si_pid = 0; 751 q->info.si_uid = 0; 752 break; 753 default: 754 copy_siginfo(&q->info, info); 755 break; 756 } 757 } else if (!is_si_special(info)) { 758 if (sig >= SIGRTMIN && info->si_code != SI_USER) 759 /* 760 * Queue overflow, abort. We may abort if the signal was rt 761 * and sent by user using something other than kill(). 762 */ 763 return -EAGAIN; 764 } 765 766 out_set: 767 sigaddset(&signals->signal, sig); 768 return ret; 769 } 770 771 #define LEGACY_QUEUE(sigptr, sig) \ 772 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig))) 773 774 775 static int 776 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) 777 { 778 int ret = 0; 779 780 BUG_ON(!irqs_disabled()); 781 assert_spin_locked(&t->sighand->siglock); 782 783 /* Short-circuit ignored signals. */ 784 if (sig_ignored(t, sig)) 785 goto out; 786 787 /* Support queueing exactly one non-rt signal, so that we 788 can get more detailed information about the cause of 789 the signal. */ 790 if (LEGACY_QUEUE(&t->pending, sig)) 791 goto out; 792 793 ret = send_signal(sig, info, t, &t->pending); 794 if (!ret && !sigismember(&t->blocked, sig)) 795 signal_wake_up(t, sig == SIGKILL); 796 out: 797 return ret; 798 } 799 800 /* 801 * Force a signal that the process can't ignore: if necessary 802 * we unblock the signal and change any SIG_IGN to SIG_DFL. 803 * 804 * Note: If we unblock the signal, we always reset it to SIG_DFL, 805 * since we do not want to have a signal handler that was blocked 806 * be invoked when user space had explicitly blocked it. 807 * 808 * We don't want to have recursive SIGSEGV's etc, for example. 809 */ 810 int 811 force_sig_info(int sig, struct siginfo *info, struct task_struct *t) 812 { 813 unsigned long int flags; 814 int ret, blocked, ignored; 815 struct k_sigaction *action; 816 817 spin_lock_irqsave(&t->sighand->siglock, flags); 818 action = &t->sighand->action[sig-1]; 819 ignored = action->sa.sa_handler == SIG_IGN; 820 blocked = sigismember(&t->blocked, sig); 821 if (blocked || ignored) { 822 action->sa.sa_handler = SIG_DFL; 823 if (blocked) { 824 sigdelset(&t->blocked, sig); 825 recalc_sigpending_tsk(t); 826 } 827 } 828 ret = specific_send_sig_info(sig, info, t); 829 spin_unlock_irqrestore(&t->sighand->siglock, flags); 830 831 return ret; 832 } 833 834 void 835 force_sig_specific(int sig, struct task_struct *t) 836 { 837 force_sig_info(sig, SEND_SIG_FORCED, t); 838 } 839 840 /* 841 * Test if P wants to take SIG. After we've checked all threads with this, 842 * it's equivalent to finding no threads not blocking SIG. Any threads not 843 * blocking SIG were ruled out because they are not running and already 844 * have pending signals. Such threads will dequeue from the shared queue 845 * as soon as they're available, so putting the signal on the shared queue 846 * will be equivalent to sending it to one such thread. 847 */ 848 static inline int wants_signal(int sig, struct task_struct *p) 849 { 850 if (sigismember(&p->blocked, sig)) 851 return 0; 852 if (p->flags & PF_EXITING) 853 return 0; 854 if (sig == SIGKILL) 855 return 1; 856 if (p->state & (TASK_STOPPED | TASK_TRACED)) 857 return 0; 858 return task_curr(p) || !signal_pending(p); 859 } 860 861 static void 862 __group_complete_signal(int sig, struct task_struct *p) 863 { 864 struct task_struct *t; 865 866 /* 867 * Now find a thread we can wake up to take the signal off the queue. 868 * 869 * If the main thread wants the signal, it gets first crack. 870 * Probably the least surprising to the average bear. 871 */ 872 if (wants_signal(sig, p)) 873 t = p; 874 else if (thread_group_empty(p)) 875 /* 876 * There is just one thread and it does not need to be woken. 877 * It will dequeue unblocked signals before it runs again. 878 */ 879 return; 880 else { 881 /* 882 * Otherwise try to find a suitable thread. 883 */ 884 t = p->signal->curr_target; 885 if (t == NULL) 886 /* restart balancing at this thread */ 887 t = p->signal->curr_target = p; 888 889 while (!wants_signal(sig, t)) { 890 t = next_thread(t); 891 if (t == p->signal->curr_target) 892 /* 893 * No thread needs to be woken. 894 * Any eligible threads will see 895 * the signal in the queue soon. 896 */ 897 return; 898 } 899 p->signal->curr_target = t; 900 } 901 902 /* 903 * Found a killable thread. If the signal will be fatal, 904 * then start taking the whole group down immediately. 905 */ 906 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) && 907 !sigismember(&t->real_blocked, sig) && 908 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { 909 /* 910 * This signal will be fatal to the whole group. 911 */ 912 if (!sig_kernel_coredump(sig)) { 913 /* 914 * Start a group exit and wake everybody up. 915 * This way we don't have other threads 916 * running and doing things after a slower 917 * thread has the fatal signal pending. 918 */ 919 p->signal->flags = SIGNAL_GROUP_EXIT; 920 p->signal->group_exit_code = sig; 921 p->signal->group_stop_count = 0; 922 t = p; 923 do { 924 sigaddset(&t->pending.signal, SIGKILL); 925 signal_wake_up(t, 1); 926 t = next_thread(t); 927 } while (t != p); 928 return; 929 } 930 931 /* 932 * There will be a core dump. We make all threads other 933 * than the chosen one go into a group stop so that nothing 934 * happens until it gets scheduled, takes the signal off 935 * the shared queue, and does the core dump. This is a 936 * little more complicated than strictly necessary, but it 937 * keeps the signal state that winds up in the core dump 938 * unchanged from the death state, e.g. which thread had 939 * the core-dump signal unblocked. 940 */ 941 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); 942 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); 943 p->signal->group_stop_count = 0; 944 p->signal->group_exit_task = t; 945 t = p; 946 do { 947 p->signal->group_stop_count++; 948 signal_wake_up(t, 0); 949 t = next_thread(t); 950 } while (t != p); 951 wake_up_process(p->signal->group_exit_task); 952 return; 953 } 954 955 /* 956 * The signal is already in the shared-pending queue. 957 * Tell the chosen thread to wake up and dequeue it. 958 */ 959 signal_wake_up(t, sig == SIGKILL); 960 return; 961 } 962 963 int 964 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 965 { 966 int ret = 0; 967 968 assert_spin_locked(&p->sighand->siglock); 969 handle_stop_signal(sig, p); 970 971 /* Short-circuit ignored signals. */ 972 if (sig_ignored(p, sig)) 973 return ret; 974 975 if (LEGACY_QUEUE(&p->signal->shared_pending, sig)) 976 /* This is a non-RT signal and we already have one queued. */ 977 return ret; 978 979 /* 980 * Put this signal on the shared-pending queue, or fail with EAGAIN. 981 * We always use the shared queue for process-wide signals, 982 * to avoid several races. 983 */ 984 ret = send_signal(sig, info, p, &p->signal->shared_pending); 985 if (unlikely(ret)) 986 return ret; 987 988 __group_complete_signal(sig, p); 989 return 0; 990 } 991 992 /* 993 * Nuke all other threads in the group. 994 */ 995 void zap_other_threads(struct task_struct *p) 996 { 997 struct task_struct *t; 998 999 p->signal->flags = SIGNAL_GROUP_EXIT; 1000 p->signal->group_stop_count = 0; 1001 1002 if (thread_group_empty(p)) 1003 return; 1004 1005 for (t = next_thread(p); t != p; t = next_thread(t)) { 1006 /* 1007 * Don't bother with already dead threads 1008 */ 1009 if (t->exit_state) 1010 continue; 1011 1012 /* 1013 * We don't want to notify the parent, since we are 1014 * killed as part of a thread group due to another 1015 * thread doing an execve() or similar. So set the 1016 * exit signal to -1 to allow immediate reaping of 1017 * the process. But don't detach the thread group 1018 * leader. 1019 */ 1020 if (t != p->group_leader) 1021 t->exit_signal = -1; 1022 1023 /* SIGKILL will be handled before any pending SIGSTOP */ 1024 sigaddset(&t->pending.signal, SIGKILL); 1025 signal_wake_up(t, 1); 1026 } 1027 } 1028 1029 /* 1030 * Must be called under rcu_read_lock() or with tasklist_lock read-held. 1031 */ 1032 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) 1033 { 1034 struct sighand_struct *sighand; 1035 1036 for (;;) { 1037 sighand = rcu_dereference(tsk->sighand); 1038 if (unlikely(sighand == NULL)) 1039 break; 1040 1041 spin_lock_irqsave(&sighand->siglock, *flags); 1042 if (likely(sighand == tsk->sighand)) 1043 break; 1044 spin_unlock_irqrestore(&sighand->siglock, *flags); 1045 } 1046 1047 return sighand; 1048 } 1049 1050 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1051 { 1052 unsigned long flags; 1053 int ret; 1054 1055 ret = check_kill_permission(sig, info, p); 1056 1057 if (!ret && sig) { 1058 ret = -ESRCH; 1059 if (lock_task_sighand(p, &flags)) { 1060 ret = __group_send_sig_info(sig, info, p); 1061 unlock_task_sighand(p, &flags); 1062 } 1063 } 1064 1065 return ret; 1066 } 1067 1068 /* 1069 * kill_pgrp_info() sends a signal to a process group: this is what the tty 1070 * control characters do (^C, ^Z etc) 1071 */ 1072 1073 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) 1074 { 1075 struct task_struct *p = NULL; 1076 int retval, success; 1077 1078 success = 0; 1079 retval = -ESRCH; 1080 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 1081 int err = group_send_sig_info(sig, info, p); 1082 success |= !err; 1083 retval = err; 1084 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 1085 return success ? 0 : retval; 1086 } 1087 1088 int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) 1089 { 1090 int retval; 1091 1092 read_lock(&tasklist_lock); 1093 retval = __kill_pgrp_info(sig, info, pgrp); 1094 read_unlock(&tasklist_lock); 1095 1096 return retval; 1097 } 1098 1099 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp) 1100 { 1101 if (pgrp <= 0) 1102 return -EINVAL; 1103 1104 return __kill_pgrp_info(sig, info, find_pid(pgrp)); 1105 } 1106 1107 int 1108 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp) 1109 { 1110 int retval; 1111 1112 read_lock(&tasklist_lock); 1113 retval = __kill_pg_info(sig, info, pgrp); 1114 read_unlock(&tasklist_lock); 1115 1116 return retval; 1117 } 1118 1119 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) 1120 { 1121 int error; 1122 int acquired_tasklist_lock = 0; 1123 struct task_struct *p; 1124 1125 rcu_read_lock(); 1126 if (unlikely(sig_needs_tasklist(sig))) { 1127 read_lock(&tasklist_lock); 1128 acquired_tasklist_lock = 1; 1129 } 1130 p = pid_task(pid, PIDTYPE_PID); 1131 error = -ESRCH; 1132 if (p) 1133 error = group_send_sig_info(sig, info, p); 1134 if (unlikely(acquired_tasklist_lock)) 1135 read_unlock(&tasklist_lock); 1136 rcu_read_unlock(); 1137 return error; 1138 } 1139 1140 static int kill_proc_info(int sig, struct siginfo *info, pid_t pid) 1141 { 1142 int error; 1143 rcu_read_lock(); 1144 error = kill_pid_info(sig, info, find_pid(pid)); 1145 rcu_read_unlock(); 1146 return error; 1147 } 1148 1149 /* like kill_pid_info(), but doesn't use uid/euid of "current" */ 1150 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, 1151 uid_t uid, uid_t euid, u32 secid) 1152 { 1153 int ret = -EINVAL; 1154 struct task_struct *p; 1155 1156 if (!valid_signal(sig)) 1157 return ret; 1158 1159 read_lock(&tasklist_lock); 1160 p = pid_task(pid, PIDTYPE_PID); 1161 if (!p) { 1162 ret = -ESRCH; 1163 goto out_unlock; 1164 } 1165 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) 1166 && (euid != p->suid) && (euid != p->uid) 1167 && (uid != p->suid) && (uid != p->uid)) { 1168 ret = -EPERM; 1169 goto out_unlock; 1170 } 1171 ret = security_task_kill(p, info, sig, secid); 1172 if (ret) 1173 goto out_unlock; 1174 if (sig && p->sighand) { 1175 unsigned long flags; 1176 spin_lock_irqsave(&p->sighand->siglock, flags); 1177 ret = __group_send_sig_info(sig, info, p); 1178 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1179 } 1180 out_unlock: 1181 read_unlock(&tasklist_lock); 1182 return ret; 1183 } 1184 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); 1185 1186 /* 1187 * kill_something_info() interprets pid in interesting ways just like kill(2). 1188 * 1189 * POSIX specifies that kill(-1,sig) is unspecified, but what we have 1190 * is probably wrong. Should make it like BSD or SYSV. 1191 */ 1192 1193 static int kill_something_info(int sig, struct siginfo *info, int pid) 1194 { 1195 if (!pid) { 1196 return kill_pg_info(sig, info, process_group(current)); 1197 } else if (pid == -1) { 1198 int retval = 0, count = 0; 1199 struct task_struct * p; 1200 1201 read_lock(&tasklist_lock); 1202 for_each_process(p) { 1203 if (p->pid > 1 && p->tgid != current->tgid) { 1204 int err = group_send_sig_info(sig, info, p); 1205 ++count; 1206 if (err != -EPERM) 1207 retval = err; 1208 } 1209 } 1210 read_unlock(&tasklist_lock); 1211 return count ? retval : -ESRCH; 1212 } else if (pid < 0) { 1213 return kill_pg_info(sig, info, -pid); 1214 } else { 1215 return kill_proc_info(sig, info, pid); 1216 } 1217 } 1218 1219 /* 1220 * These are for backward compatibility with the rest of the kernel source. 1221 */ 1222 1223 /* 1224 * These two are the most common entry points. They send a signal 1225 * just to the specific thread. 1226 */ 1227 int 1228 send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1229 { 1230 int ret; 1231 unsigned long flags; 1232 1233 /* 1234 * Make sure legacy kernel users don't send in bad values 1235 * (normal paths check this in check_kill_permission). 1236 */ 1237 if (!valid_signal(sig)) 1238 return -EINVAL; 1239 1240 /* 1241 * We need the tasklist lock even for the specific 1242 * thread case (when we don't need to follow the group 1243 * lists) in order to avoid races with "p->sighand" 1244 * going away or changing from under us. 1245 */ 1246 read_lock(&tasklist_lock); 1247 spin_lock_irqsave(&p->sighand->siglock, flags); 1248 ret = specific_send_sig_info(sig, info, p); 1249 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1250 read_unlock(&tasklist_lock); 1251 return ret; 1252 } 1253 1254 #define __si_special(priv) \ 1255 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) 1256 1257 int 1258 send_sig(int sig, struct task_struct *p, int priv) 1259 { 1260 return send_sig_info(sig, __si_special(priv), p); 1261 } 1262 1263 /* 1264 * This is the entry point for "process-wide" signals. 1265 * They will go to an appropriate thread in the thread group. 1266 */ 1267 int 1268 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1269 { 1270 int ret; 1271 read_lock(&tasklist_lock); 1272 ret = group_send_sig_info(sig, info, p); 1273 read_unlock(&tasklist_lock); 1274 return ret; 1275 } 1276 1277 void 1278 force_sig(int sig, struct task_struct *p) 1279 { 1280 force_sig_info(sig, SEND_SIG_PRIV, p); 1281 } 1282 1283 /* 1284 * When things go south during signal handling, we 1285 * will force a SIGSEGV. And if the signal that caused 1286 * the problem was already a SIGSEGV, we'll want to 1287 * make sure we don't even try to deliver the signal.. 1288 */ 1289 int 1290 force_sigsegv(int sig, struct task_struct *p) 1291 { 1292 if (sig == SIGSEGV) { 1293 unsigned long flags; 1294 spin_lock_irqsave(&p->sighand->siglock, flags); 1295 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; 1296 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1297 } 1298 force_sig(SIGSEGV, p); 1299 return 0; 1300 } 1301 1302 int kill_pgrp(struct pid *pid, int sig, int priv) 1303 { 1304 return kill_pgrp_info(sig, __si_special(priv), pid); 1305 } 1306 EXPORT_SYMBOL(kill_pgrp); 1307 1308 int kill_pid(struct pid *pid, int sig, int priv) 1309 { 1310 return kill_pid_info(sig, __si_special(priv), pid); 1311 } 1312 EXPORT_SYMBOL(kill_pid); 1313 1314 int 1315 kill_pg(pid_t pgrp, int sig, int priv) 1316 { 1317 return kill_pg_info(sig, __si_special(priv), pgrp); 1318 } 1319 1320 int 1321 kill_proc(pid_t pid, int sig, int priv) 1322 { 1323 return kill_proc_info(sig, __si_special(priv), pid); 1324 } 1325 1326 /* 1327 * These functions support sending signals using preallocated sigqueue 1328 * structures. This is needed "because realtime applications cannot 1329 * afford to lose notifications of asynchronous events, like timer 1330 * expirations or I/O completions". In the case of Posix Timers 1331 * we allocate the sigqueue structure from the timer_create. If this 1332 * allocation fails we are able to report the failure to the application 1333 * with an EAGAIN error. 1334 */ 1335 1336 struct sigqueue *sigqueue_alloc(void) 1337 { 1338 struct sigqueue *q; 1339 1340 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) 1341 q->flags |= SIGQUEUE_PREALLOC; 1342 return(q); 1343 } 1344 1345 void sigqueue_free(struct sigqueue *q) 1346 { 1347 unsigned long flags; 1348 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1349 /* 1350 * If the signal is still pending remove it from the 1351 * pending queue. 1352 */ 1353 if (unlikely(!list_empty(&q->list))) { 1354 spinlock_t *lock = ¤t->sighand->siglock; 1355 read_lock(&tasklist_lock); 1356 spin_lock_irqsave(lock, flags); 1357 if (!list_empty(&q->list)) 1358 list_del_init(&q->list); 1359 spin_unlock_irqrestore(lock, flags); 1360 read_unlock(&tasklist_lock); 1361 } 1362 q->flags &= ~SIGQUEUE_PREALLOC; 1363 __sigqueue_free(q); 1364 } 1365 1366 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) 1367 { 1368 unsigned long flags; 1369 int ret = 0; 1370 1371 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1372 1373 /* 1374 * The rcu based delayed sighand destroy makes it possible to 1375 * run this without tasklist lock held. The task struct itself 1376 * cannot go away as create_timer did get_task_struct(). 1377 * 1378 * We return -1, when the task is marked exiting, so 1379 * posix_timer_event can redirect it to the group leader 1380 */ 1381 rcu_read_lock(); 1382 1383 if (!likely(lock_task_sighand(p, &flags))) { 1384 ret = -1; 1385 goto out_err; 1386 } 1387 1388 if (unlikely(!list_empty(&q->list))) { 1389 /* 1390 * If an SI_TIMER entry is already queue just increment 1391 * the overrun count. 1392 */ 1393 BUG_ON(q->info.si_code != SI_TIMER); 1394 q->info.si_overrun++; 1395 goto out; 1396 } 1397 /* Short-circuit ignored signals. */ 1398 if (sig_ignored(p, sig)) { 1399 ret = 1; 1400 goto out; 1401 } 1402 1403 list_add_tail(&q->list, &p->pending.list); 1404 sigaddset(&p->pending.signal, sig); 1405 if (!sigismember(&p->blocked, sig)) 1406 signal_wake_up(p, sig == SIGKILL); 1407 1408 out: 1409 unlock_task_sighand(p, &flags); 1410 out_err: 1411 rcu_read_unlock(); 1412 1413 return ret; 1414 } 1415 1416 int 1417 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) 1418 { 1419 unsigned long flags; 1420 int ret = 0; 1421 1422 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1423 1424 read_lock(&tasklist_lock); 1425 /* Since it_lock is held, p->sighand cannot be NULL. */ 1426 spin_lock_irqsave(&p->sighand->siglock, flags); 1427 handle_stop_signal(sig, p); 1428 1429 /* Short-circuit ignored signals. */ 1430 if (sig_ignored(p, sig)) { 1431 ret = 1; 1432 goto out; 1433 } 1434 1435 if (unlikely(!list_empty(&q->list))) { 1436 /* 1437 * If an SI_TIMER entry is already queue just increment 1438 * the overrun count. Other uses should not try to 1439 * send the signal multiple times. 1440 */ 1441 BUG_ON(q->info.si_code != SI_TIMER); 1442 q->info.si_overrun++; 1443 goto out; 1444 } 1445 1446 /* 1447 * Put this signal on the shared-pending queue. 1448 * We always use the shared queue for process-wide signals, 1449 * to avoid several races. 1450 */ 1451 list_add_tail(&q->list, &p->signal->shared_pending.list); 1452 sigaddset(&p->signal->shared_pending.signal, sig); 1453 1454 __group_complete_signal(sig, p); 1455 out: 1456 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1457 read_unlock(&tasklist_lock); 1458 return ret; 1459 } 1460 1461 /* 1462 * Wake up any threads in the parent blocked in wait* syscalls. 1463 */ 1464 static inline void __wake_up_parent(struct task_struct *p, 1465 struct task_struct *parent) 1466 { 1467 wake_up_interruptible_sync(&parent->signal->wait_chldexit); 1468 } 1469 1470 /* 1471 * Let a parent know about the death of a child. 1472 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1473 */ 1474 1475 void do_notify_parent(struct task_struct *tsk, int sig) 1476 { 1477 struct siginfo info; 1478 unsigned long flags; 1479 struct sighand_struct *psig; 1480 1481 BUG_ON(sig == -1); 1482 1483 /* do_notify_parent_cldstop should have been called instead. */ 1484 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED)); 1485 1486 BUG_ON(!tsk->ptrace && 1487 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 1488 1489 info.si_signo = sig; 1490 info.si_errno = 0; 1491 info.si_pid = tsk->pid; 1492 info.si_uid = tsk->uid; 1493 1494 /* FIXME: find out whether or not this is supposed to be c*time. */ 1495 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime, 1496 tsk->signal->utime)); 1497 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime, 1498 tsk->signal->stime)); 1499 1500 info.si_status = tsk->exit_code & 0x7f; 1501 if (tsk->exit_code & 0x80) 1502 info.si_code = CLD_DUMPED; 1503 else if (tsk->exit_code & 0x7f) 1504 info.si_code = CLD_KILLED; 1505 else { 1506 info.si_code = CLD_EXITED; 1507 info.si_status = tsk->exit_code >> 8; 1508 } 1509 1510 psig = tsk->parent->sighand; 1511 spin_lock_irqsave(&psig->siglock, flags); 1512 if (!tsk->ptrace && sig == SIGCHLD && 1513 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || 1514 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { 1515 /* 1516 * We are exiting and our parent doesn't care. POSIX.1 1517 * defines special semantics for setting SIGCHLD to SIG_IGN 1518 * or setting the SA_NOCLDWAIT flag: we should be reaped 1519 * automatically and not left for our parent's wait4 call. 1520 * Rather than having the parent do it as a magic kind of 1521 * signal handler, we just set this to tell do_exit that we 1522 * can be cleaned up without becoming a zombie. Note that 1523 * we still call __wake_up_parent in this case, because a 1524 * blocked sys_wait4 might now return -ECHILD. 1525 * 1526 * Whether we send SIGCHLD or not for SA_NOCLDWAIT 1527 * is implementation-defined: we do (if you don't want 1528 * it, just use SIG_IGN instead). 1529 */ 1530 tsk->exit_signal = -1; 1531 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1532 sig = 0; 1533 } 1534 if (valid_signal(sig) && sig > 0) 1535 __group_send_sig_info(sig, &info, tsk->parent); 1536 __wake_up_parent(tsk, tsk->parent); 1537 spin_unlock_irqrestore(&psig->siglock, flags); 1538 } 1539 1540 static void do_notify_parent_cldstop(struct task_struct *tsk, int why) 1541 { 1542 struct siginfo info; 1543 unsigned long flags; 1544 struct task_struct *parent; 1545 struct sighand_struct *sighand; 1546 1547 if (tsk->ptrace & PT_PTRACED) 1548 parent = tsk->parent; 1549 else { 1550 tsk = tsk->group_leader; 1551 parent = tsk->real_parent; 1552 } 1553 1554 info.si_signo = SIGCHLD; 1555 info.si_errno = 0; 1556 info.si_pid = tsk->pid; 1557 info.si_uid = tsk->uid; 1558 1559 /* FIXME: find out whether or not this is supposed to be c*time. */ 1560 info.si_utime = cputime_to_jiffies(tsk->utime); 1561 info.si_stime = cputime_to_jiffies(tsk->stime); 1562 1563 info.si_code = why; 1564 switch (why) { 1565 case CLD_CONTINUED: 1566 info.si_status = SIGCONT; 1567 break; 1568 case CLD_STOPPED: 1569 info.si_status = tsk->signal->group_exit_code & 0x7f; 1570 break; 1571 case CLD_TRAPPED: 1572 info.si_status = tsk->exit_code & 0x7f; 1573 break; 1574 default: 1575 BUG(); 1576 } 1577 1578 sighand = parent->sighand; 1579 spin_lock_irqsave(&sighand->siglock, flags); 1580 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && 1581 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) 1582 __group_send_sig_info(SIGCHLD, &info, parent); 1583 /* 1584 * Even if SIGCHLD is not generated, we must wake up wait4 calls. 1585 */ 1586 __wake_up_parent(tsk, parent); 1587 spin_unlock_irqrestore(&sighand->siglock, flags); 1588 } 1589 1590 static inline int may_ptrace_stop(void) 1591 { 1592 if (!likely(current->ptrace & PT_PTRACED)) 1593 return 0; 1594 1595 if (unlikely(current->parent == current->real_parent && 1596 (current->ptrace & PT_ATTACHED))) 1597 return 0; 1598 1599 if (unlikely(current->signal == current->parent->signal) && 1600 unlikely(current->signal->flags & SIGNAL_GROUP_EXIT)) 1601 return 0; 1602 1603 /* 1604 * Are we in the middle of do_coredump? 1605 * If so and our tracer is also part of the coredump stopping 1606 * is a deadlock situation, and pointless because our tracer 1607 * is dead so don't allow us to stop. 1608 * If SIGKILL was already sent before the caller unlocked 1609 * ->siglock we must see ->core_waiters != 0. Otherwise it 1610 * is safe to enter schedule(). 1611 */ 1612 if (unlikely(current->mm->core_waiters) && 1613 unlikely(current->mm == current->parent->mm)) 1614 return 0; 1615 1616 return 1; 1617 } 1618 1619 /* 1620 * This must be called with current->sighand->siglock held. 1621 * 1622 * This should be the path for all ptrace stops. 1623 * We always set current->last_siginfo while stopped here. 1624 * That makes it a way to test a stopped process for 1625 * being ptrace-stopped vs being job-control-stopped. 1626 * 1627 * If we actually decide not to stop at all because the tracer is gone, 1628 * we leave nostop_code in current->exit_code. 1629 */ 1630 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) 1631 { 1632 /* 1633 * If there is a group stop in progress, 1634 * we must participate in the bookkeeping. 1635 */ 1636 if (current->signal->group_stop_count > 0) 1637 --current->signal->group_stop_count; 1638 1639 current->last_siginfo = info; 1640 current->exit_code = exit_code; 1641 1642 /* Let the debugger run. */ 1643 set_current_state(TASK_TRACED); 1644 spin_unlock_irq(¤t->sighand->siglock); 1645 try_to_freeze(); 1646 read_lock(&tasklist_lock); 1647 if (may_ptrace_stop()) { 1648 do_notify_parent_cldstop(current, CLD_TRAPPED); 1649 read_unlock(&tasklist_lock); 1650 schedule(); 1651 } else { 1652 /* 1653 * By the time we got the lock, our tracer went away. 1654 * Don't stop here. 1655 */ 1656 read_unlock(&tasklist_lock); 1657 set_current_state(TASK_RUNNING); 1658 current->exit_code = nostop_code; 1659 } 1660 1661 /* 1662 * We are back. Now reacquire the siglock before touching 1663 * last_siginfo, so that we are sure to have synchronized with 1664 * any signal-sending on another CPU that wants to examine it. 1665 */ 1666 spin_lock_irq(¤t->sighand->siglock); 1667 current->last_siginfo = NULL; 1668 1669 /* 1670 * Queued signals ignored us while we were stopped for tracing. 1671 * So check for any that we should take before resuming user mode. 1672 */ 1673 recalc_sigpending(); 1674 } 1675 1676 void ptrace_notify(int exit_code) 1677 { 1678 siginfo_t info; 1679 1680 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); 1681 1682 memset(&info, 0, sizeof info); 1683 info.si_signo = SIGTRAP; 1684 info.si_code = exit_code; 1685 info.si_pid = current->pid; 1686 info.si_uid = current->uid; 1687 1688 /* Let the debugger run. */ 1689 spin_lock_irq(¤t->sighand->siglock); 1690 ptrace_stop(exit_code, 0, &info); 1691 spin_unlock_irq(¤t->sighand->siglock); 1692 } 1693 1694 static void 1695 finish_stop(int stop_count) 1696 { 1697 /* 1698 * If there are no other threads in the group, or if there is 1699 * a group stop in progress and we are the last to stop, 1700 * report to the parent. When ptraced, every thread reports itself. 1701 */ 1702 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { 1703 read_lock(&tasklist_lock); 1704 do_notify_parent_cldstop(current, CLD_STOPPED); 1705 read_unlock(&tasklist_lock); 1706 } 1707 1708 schedule(); 1709 /* 1710 * Now we don't run again until continued. 1711 */ 1712 current->exit_code = 0; 1713 } 1714 1715 /* 1716 * This performs the stopping for SIGSTOP and other stop signals. 1717 * We have to stop all threads in the thread group. 1718 * Returns nonzero if we've actually stopped and released the siglock. 1719 * Returns zero if we didn't stop and still hold the siglock. 1720 */ 1721 static int do_signal_stop(int signr) 1722 { 1723 struct signal_struct *sig = current->signal; 1724 int stop_count; 1725 1726 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) 1727 return 0; 1728 1729 if (sig->group_stop_count > 0) { 1730 /* 1731 * There is a group stop in progress. We don't need to 1732 * start another one. 1733 */ 1734 stop_count = --sig->group_stop_count; 1735 } else { 1736 /* 1737 * There is no group stop already in progress. 1738 * We must initiate one now. 1739 */ 1740 struct task_struct *t; 1741 1742 sig->group_exit_code = signr; 1743 1744 stop_count = 0; 1745 for (t = next_thread(current); t != current; t = next_thread(t)) 1746 /* 1747 * Setting state to TASK_STOPPED for a group 1748 * stop is always done with the siglock held, 1749 * so this check has no races. 1750 */ 1751 if (!t->exit_state && 1752 !(t->state & (TASK_STOPPED|TASK_TRACED))) { 1753 stop_count++; 1754 signal_wake_up(t, 0); 1755 } 1756 sig->group_stop_count = stop_count; 1757 } 1758 1759 if (stop_count == 0) 1760 sig->flags = SIGNAL_STOP_STOPPED; 1761 current->exit_code = sig->group_exit_code; 1762 __set_current_state(TASK_STOPPED); 1763 1764 spin_unlock_irq(¤t->sighand->siglock); 1765 finish_stop(stop_count); 1766 return 1; 1767 } 1768 1769 /* 1770 * Do appropriate magic when group_stop_count > 0. 1771 * We return nonzero if we stopped, after releasing the siglock. 1772 * We return zero if we still hold the siglock and should look 1773 * for another signal without checking group_stop_count again. 1774 */ 1775 static int handle_group_stop(void) 1776 { 1777 int stop_count; 1778 1779 if (current->signal->group_exit_task == current) { 1780 /* 1781 * Group stop is so we can do a core dump, 1782 * We are the initiating thread, so get on with it. 1783 */ 1784 current->signal->group_exit_task = NULL; 1785 return 0; 1786 } 1787 1788 if (current->signal->flags & SIGNAL_GROUP_EXIT) 1789 /* 1790 * Group stop is so another thread can do a core dump, 1791 * or else we are racing against a death signal. 1792 * Just punt the stop so we can get the next signal. 1793 */ 1794 return 0; 1795 1796 /* 1797 * There is a group stop in progress. We stop 1798 * without any associated signal being in our queue. 1799 */ 1800 stop_count = --current->signal->group_stop_count; 1801 if (stop_count == 0) 1802 current->signal->flags = SIGNAL_STOP_STOPPED; 1803 current->exit_code = current->signal->group_exit_code; 1804 set_current_state(TASK_STOPPED); 1805 spin_unlock_irq(¤t->sighand->siglock); 1806 finish_stop(stop_count); 1807 return 1; 1808 } 1809 1810 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, 1811 struct pt_regs *regs, void *cookie) 1812 { 1813 sigset_t *mask = ¤t->blocked; 1814 int signr = 0; 1815 1816 try_to_freeze(); 1817 1818 relock: 1819 spin_lock_irq(¤t->sighand->siglock); 1820 for (;;) { 1821 struct k_sigaction *ka; 1822 1823 if (unlikely(current->signal->group_stop_count > 0) && 1824 handle_group_stop()) 1825 goto relock; 1826 1827 signr = dequeue_signal(current, mask, info); 1828 1829 if (!signr) 1830 break; /* will return 0 */ 1831 1832 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { 1833 ptrace_signal_deliver(regs, cookie); 1834 1835 /* Let the debugger run. */ 1836 ptrace_stop(signr, signr, info); 1837 1838 /* We're back. Did the debugger cancel the sig? */ 1839 signr = current->exit_code; 1840 if (signr == 0) 1841 continue; 1842 1843 current->exit_code = 0; 1844 1845 /* Update the siginfo structure if the signal has 1846 changed. If the debugger wanted something 1847 specific in the siginfo structure then it should 1848 have updated *info via PTRACE_SETSIGINFO. */ 1849 if (signr != info->si_signo) { 1850 info->si_signo = signr; 1851 info->si_errno = 0; 1852 info->si_code = SI_USER; 1853 info->si_pid = current->parent->pid; 1854 info->si_uid = current->parent->uid; 1855 } 1856 1857 /* If the (new) signal is now blocked, requeue it. */ 1858 if (sigismember(¤t->blocked, signr)) { 1859 specific_send_sig_info(signr, info, current); 1860 continue; 1861 } 1862 } 1863 1864 ka = ¤t->sighand->action[signr-1]; 1865 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 1866 continue; 1867 if (ka->sa.sa_handler != SIG_DFL) { 1868 /* Run the handler. */ 1869 *return_ka = *ka; 1870 1871 if (ka->sa.sa_flags & SA_ONESHOT) 1872 ka->sa.sa_handler = SIG_DFL; 1873 1874 break; /* will return non-zero "signr" value */ 1875 } 1876 1877 /* 1878 * Now we are doing the default action for this signal. 1879 */ 1880 if (sig_kernel_ignore(signr)) /* Default is nothing. */ 1881 continue; 1882 1883 /* 1884 * Init of a pid space gets no signals it doesn't want from 1885 * within that pid space. It can of course get signals from 1886 * its parent pid space. 1887 */ 1888 if (current == child_reaper(current)) 1889 continue; 1890 1891 if (sig_kernel_stop(signr)) { 1892 /* 1893 * The default action is to stop all threads in 1894 * the thread group. The job control signals 1895 * do nothing in an orphaned pgrp, but SIGSTOP 1896 * always works. Note that siglock needs to be 1897 * dropped during the call to is_orphaned_pgrp() 1898 * because of lock ordering with tasklist_lock. 1899 * This allows an intervening SIGCONT to be posted. 1900 * We need to check for that and bail out if necessary. 1901 */ 1902 if (signr != SIGSTOP) { 1903 spin_unlock_irq(¤t->sighand->siglock); 1904 1905 /* signals can be posted during this window */ 1906 1907 if (is_orphaned_pgrp(process_group(current))) 1908 goto relock; 1909 1910 spin_lock_irq(¤t->sighand->siglock); 1911 } 1912 1913 if (likely(do_signal_stop(signr))) { 1914 /* It released the siglock. */ 1915 goto relock; 1916 } 1917 1918 /* 1919 * We didn't actually stop, due to a race 1920 * with SIGCONT or something like that. 1921 */ 1922 continue; 1923 } 1924 1925 spin_unlock_irq(¤t->sighand->siglock); 1926 1927 /* 1928 * Anything else is fatal, maybe with a core dump. 1929 */ 1930 current->flags |= PF_SIGNALED; 1931 if (sig_kernel_coredump(signr)) { 1932 /* 1933 * If it was able to dump core, this kills all 1934 * other threads in the group and synchronizes with 1935 * their demise. If we lost the race with another 1936 * thread getting here, it set group_exit_code 1937 * first and our do_group_exit call below will use 1938 * that value and ignore the one we pass it. 1939 */ 1940 do_coredump((long)signr, signr, regs); 1941 } 1942 1943 /* 1944 * Death signals, no core dump. 1945 */ 1946 do_group_exit(signr); 1947 /* NOTREACHED */ 1948 } 1949 spin_unlock_irq(¤t->sighand->siglock); 1950 return signr; 1951 } 1952 1953 EXPORT_SYMBOL(recalc_sigpending); 1954 EXPORT_SYMBOL_GPL(dequeue_signal); 1955 EXPORT_SYMBOL(flush_signals); 1956 EXPORT_SYMBOL(force_sig); 1957 EXPORT_SYMBOL(kill_pg); 1958 EXPORT_SYMBOL(kill_proc); 1959 EXPORT_SYMBOL(ptrace_notify); 1960 EXPORT_SYMBOL(send_sig); 1961 EXPORT_SYMBOL(send_sig_info); 1962 EXPORT_SYMBOL(sigprocmask); 1963 EXPORT_SYMBOL(block_all_signals); 1964 EXPORT_SYMBOL(unblock_all_signals); 1965 1966 1967 /* 1968 * System call entry points. 1969 */ 1970 1971 asmlinkage long sys_restart_syscall(void) 1972 { 1973 struct restart_block *restart = ¤t_thread_info()->restart_block; 1974 return restart->fn(restart); 1975 } 1976 1977 long do_no_restart_syscall(struct restart_block *param) 1978 { 1979 return -EINTR; 1980 } 1981 1982 /* 1983 * We don't need to get the kernel lock - this is all local to this 1984 * particular thread.. (and that's good, because this is _heavily_ 1985 * used by various programs) 1986 */ 1987 1988 /* 1989 * This is also useful for kernel threads that want to temporarily 1990 * (or permanently) block certain signals. 1991 * 1992 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel 1993 * interface happily blocks "unblockable" signals like SIGKILL 1994 * and friends. 1995 */ 1996 int sigprocmask(int how, sigset_t *set, sigset_t *oldset) 1997 { 1998 int error; 1999 2000 spin_lock_irq(¤t->sighand->siglock); 2001 if (oldset) 2002 *oldset = current->blocked; 2003 2004 error = 0; 2005 switch (how) { 2006 case SIG_BLOCK: 2007 sigorsets(¤t->blocked, ¤t->blocked, set); 2008 break; 2009 case SIG_UNBLOCK: 2010 signandsets(¤t->blocked, ¤t->blocked, set); 2011 break; 2012 case SIG_SETMASK: 2013 current->blocked = *set; 2014 break; 2015 default: 2016 error = -EINVAL; 2017 } 2018 recalc_sigpending(); 2019 spin_unlock_irq(¤t->sighand->siglock); 2020 2021 return error; 2022 } 2023 2024 asmlinkage long 2025 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize) 2026 { 2027 int error = -EINVAL; 2028 sigset_t old_set, new_set; 2029 2030 /* XXX: Don't preclude handling different sized sigset_t's. */ 2031 if (sigsetsize != sizeof(sigset_t)) 2032 goto out; 2033 2034 if (set) { 2035 error = -EFAULT; 2036 if (copy_from_user(&new_set, set, sizeof(*set))) 2037 goto out; 2038 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2039 2040 error = sigprocmask(how, &new_set, &old_set); 2041 if (error) 2042 goto out; 2043 if (oset) 2044 goto set_old; 2045 } else if (oset) { 2046 spin_lock_irq(¤t->sighand->siglock); 2047 old_set = current->blocked; 2048 spin_unlock_irq(¤t->sighand->siglock); 2049 2050 set_old: 2051 error = -EFAULT; 2052 if (copy_to_user(oset, &old_set, sizeof(*oset))) 2053 goto out; 2054 } 2055 error = 0; 2056 out: 2057 return error; 2058 } 2059 2060 long do_sigpending(void __user *set, unsigned long sigsetsize) 2061 { 2062 long error = -EINVAL; 2063 sigset_t pending; 2064 2065 if (sigsetsize > sizeof(sigset_t)) 2066 goto out; 2067 2068 spin_lock_irq(¤t->sighand->siglock); 2069 sigorsets(&pending, ¤t->pending.signal, 2070 ¤t->signal->shared_pending.signal); 2071 spin_unlock_irq(¤t->sighand->siglock); 2072 2073 /* Outside the lock because only this thread touches it. */ 2074 sigandsets(&pending, ¤t->blocked, &pending); 2075 2076 error = -EFAULT; 2077 if (!copy_to_user(set, &pending, sigsetsize)) 2078 error = 0; 2079 2080 out: 2081 return error; 2082 } 2083 2084 asmlinkage long 2085 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize) 2086 { 2087 return do_sigpending(set, sigsetsize); 2088 } 2089 2090 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER 2091 2092 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) 2093 { 2094 int err; 2095 2096 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) 2097 return -EFAULT; 2098 if (from->si_code < 0) 2099 return __copy_to_user(to, from, sizeof(siginfo_t)) 2100 ? -EFAULT : 0; 2101 /* 2102 * If you change siginfo_t structure, please be sure 2103 * this code is fixed accordingly. 2104 * It should never copy any pad contained in the structure 2105 * to avoid security leaks, but must copy the generic 2106 * 3 ints plus the relevant union member. 2107 */ 2108 err = __put_user(from->si_signo, &to->si_signo); 2109 err |= __put_user(from->si_errno, &to->si_errno); 2110 err |= __put_user((short)from->si_code, &to->si_code); 2111 switch (from->si_code & __SI_MASK) { 2112 case __SI_KILL: 2113 err |= __put_user(from->si_pid, &to->si_pid); 2114 err |= __put_user(from->si_uid, &to->si_uid); 2115 break; 2116 case __SI_TIMER: 2117 err |= __put_user(from->si_tid, &to->si_tid); 2118 err |= __put_user(from->si_overrun, &to->si_overrun); 2119 err |= __put_user(from->si_ptr, &to->si_ptr); 2120 break; 2121 case __SI_POLL: 2122 err |= __put_user(from->si_band, &to->si_band); 2123 err |= __put_user(from->si_fd, &to->si_fd); 2124 break; 2125 case __SI_FAULT: 2126 err |= __put_user(from->si_addr, &to->si_addr); 2127 #ifdef __ARCH_SI_TRAPNO 2128 err |= __put_user(from->si_trapno, &to->si_trapno); 2129 #endif 2130 break; 2131 case __SI_CHLD: 2132 err |= __put_user(from->si_pid, &to->si_pid); 2133 err |= __put_user(from->si_uid, &to->si_uid); 2134 err |= __put_user(from->si_status, &to->si_status); 2135 err |= __put_user(from->si_utime, &to->si_utime); 2136 err |= __put_user(from->si_stime, &to->si_stime); 2137 break; 2138 case __SI_RT: /* This is not generated by the kernel as of now. */ 2139 case __SI_MESGQ: /* But this is */ 2140 err |= __put_user(from->si_pid, &to->si_pid); 2141 err |= __put_user(from->si_uid, &to->si_uid); 2142 err |= __put_user(from->si_ptr, &to->si_ptr); 2143 break; 2144 default: /* this is just in case for now ... */ 2145 err |= __put_user(from->si_pid, &to->si_pid); 2146 err |= __put_user(from->si_uid, &to->si_uid); 2147 break; 2148 } 2149 return err; 2150 } 2151 2152 #endif 2153 2154 asmlinkage long 2155 sys_rt_sigtimedwait(const sigset_t __user *uthese, 2156 siginfo_t __user *uinfo, 2157 const struct timespec __user *uts, 2158 size_t sigsetsize) 2159 { 2160 int ret, sig; 2161 sigset_t these; 2162 struct timespec ts; 2163 siginfo_t info; 2164 long timeout = 0; 2165 2166 /* XXX: Don't preclude handling different sized sigset_t's. */ 2167 if (sigsetsize != sizeof(sigset_t)) 2168 return -EINVAL; 2169 2170 if (copy_from_user(&these, uthese, sizeof(these))) 2171 return -EFAULT; 2172 2173 /* 2174 * Invert the set of allowed signals to get those we 2175 * want to block. 2176 */ 2177 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2178 signotset(&these); 2179 2180 if (uts) { 2181 if (copy_from_user(&ts, uts, sizeof(ts))) 2182 return -EFAULT; 2183 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 2184 || ts.tv_sec < 0) 2185 return -EINVAL; 2186 } 2187 2188 spin_lock_irq(¤t->sighand->siglock); 2189 sig = dequeue_signal(current, &these, &info); 2190 if (!sig) { 2191 timeout = MAX_SCHEDULE_TIMEOUT; 2192 if (uts) 2193 timeout = (timespec_to_jiffies(&ts) 2194 + (ts.tv_sec || ts.tv_nsec)); 2195 2196 if (timeout) { 2197 /* None ready -- temporarily unblock those we're 2198 * interested while we are sleeping in so that we'll 2199 * be awakened when they arrive. */ 2200 current->real_blocked = current->blocked; 2201 sigandsets(¤t->blocked, ¤t->blocked, &these); 2202 recalc_sigpending(); 2203 spin_unlock_irq(¤t->sighand->siglock); 2204 2205 timeout = schedule_timeout_interruptible(timeout); 2206 2207 spin_lock_irq(¤t->sighand->siglock); 2208 sig = dequeue_signal(current, &these, &info); 2209 current->blocked = current->real_blocked; 2210 siginitset(¤t->real_blocked, 0); 2211 recalc_sigpending(); 2212 } 2213 } 2214 spin_unlock_irq(¤t->sighand->siglock); 2215 2216 if (sig) { 2217 ret = sig; 2218 if (uinfo) { 2219 if (copy_siginfo_to_user(uinfo, &info)) 2220 ret = -EFAULT; 2221 } 2222 } else { 2223 ret = -EAGAIN; 2224 if (timeout) 2225 ret = -EINTR; 2226 } 2227 2228 return ret; 2229 } 2230 2231 asmlinkage long 2232 sys_kill(int pid, int sig) 2233 { 2234 struct siginfo info; 2235 2236 info.si_signo = sig; 2237 info.si_errno = 0; 2238 info.si_code = SI_USER; 2239 info.si_pid = current->tgid; 2240 info.si_uid = current->uid; 2241 2242 return kill_something_info(sig, &info, pid); 2243 } 2244 2245 static int do_tkill(int tgid, int pid, int sig) 2246 { 2247 int error; 2248 struct siginfo info; 2249 struct task_struct *p; 2250 2251 error = -ESRCH; 2252 info.si_signo = sig; 2253 info.si_errno = 0; 2254 info.si_code = SI_TKILL; 2255 info.si_pid = current->tgid; 2256 info.si_uid = current->uid; 2257 2258 read_lock(&tasklist_lock); 2259 p = find_task_by_pid(pid); 2260 if (p && (tgid <= 0 || p->tgid == tgid)) { 2261 error = check_kill_permission(sig, &info, p); 2262 /* 2263 * The null signal is a permissions and process existence 2264 * probe. No signal is actually delivered. 2265 */ 2266 if (!error && sig && p->sighand) { 2267 spin_lock_irq(&p->sighand->siglock); 2268 handle_stop_signal(sig, p); 2269 error = specific_send_sig_info(sig, &info, p); 2270 spin_unlock_irq(&p->sighand->siglock); 2271 } 2272 } 2273 read_unlock(&tasklist_lock); 2274 2275 return error; 2276 } 2277 2278 /** 2279 * sys_tgkill - send signal to one specific thread 2280 * @tgid: the thread group ID of the thread 2281 * @pid: the PID of the thread 2282 * @sig: signal to be sent 2283 * 2284 * This syscall also checks the tgid and returns -ESRCH even if the PID 2285 * exists but it's not belonging to the target process anymore. This 2286 * method solves the problem of threads exiting and PIDs getting reused. 2287 */ 2288 asmlinkage long sys_tgkill(int tgid, int pid, int sig) 2289 { 2290 /* This is only valid for single tasks */ 2291 if (pid <= 0 || tgid <= 0) 2292 return -EINVAL; 2293 2294 return do_tkill(tgid, pid, sig); 2295 } 2296 2297 /* 2298 * Send a signal to only one task, even if it's a CLONE_THREAD task. 2299 */ 2300 asmlinkage long 2301 sys_tkill(int pid, int sig) 2302 { 2303 /* This is only valid for single tasks */ 2304 if (pid <= 0) 2305 return -EINVAL; 2306 2307 return do_tkill(0, pid, sig); 2308 } 2309 2310 asmlinkage long 2311 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) 2312 { 2313 siginfo_t info; 2314 2315 if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) 2316 return -EFAULT; 2317 2318 /* Not even root can pretend to send signals from the kernel. 2319 Nor can they impersonate a kill(), which adds source info. */ 2320 if (info.si_code >= 0) 2321 return -EPERM; 2322 info.si_signo = sig; 2323 2324 /* POSIX.1b doesn't mention process groups. */ 2325 return kill_proc_info(sig, &info, pid); 2326 } 2327 2328 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) 2329 { 2330 struct k_sigaction *k; 2331 sigset_t mask; 2332 2333 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) 2334 return -EINVAL; 2335 2336 k = ¤t->sighand->action[sig-1]; 2337 2338 spin_lock_irq(¤t->sighand->siglock); 2339 if (signal_pending(current)) { 2340 /* 2341 * If there might be a fatal signal pending on multiple 2342 * threads, make sure we take it before changing the action. 2343 */ 2344 spin_unlock_irq(¤t->sighand->siglock); 2345 return -ERESTARTNOINTR; 2346 } 2347 2348 if (oact) 2349 *oact = *k; 2350 2351 if (act) { 2352 sigdelsetmask(&act->sa.sa_mask, 2353 sigmask(SIGKILL) | sigmask(SIGSTOP)); 2354 *k = *act; 2355 /* 2356 * POSIX 3.3.1.3: 2357 * "Setting a signal action to SIG_IGN for a signal that is 2358 * pending shall cause the pending signal to be discarded, 2359 * whether or not it is blocked." 2360 * 2361 * "Setting a signal action to SIG_DFL for a signal that is 2362 * pending and whose default action is to ignore the signal 2363 * (for example, SIGCHLD), shall cause the pending signal to 2364 * be discarded, whether or not it is blocked" 2365 */ 2366 if (act->sa.sa_handler == SIG_IGN || 2367 (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) { 2368 struct task_struct *t = current; 2369 sigemptyset(&mask); 2370 sigaddset(&mask, sig); 2371 rm_from_queue_full(&mask, &t->signal->shared_pending); 2372 do { 2373 rm_from_queue_full(&mask, &t->pending); 2374 recalc_sigpending_tsk(t); 2375 t = next_thread(t); 2376 } while (t != current); 2377 } 2378 } 2379 2380 spin_unlock_irq(¤t->sighand->siglock); 2381 return 0; 2382 } 2383 2384 int 2385 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) 2386 { 2387 stack_t oss; 2388 int error; 2389 2390 if (uoss) { 2391 oss.ss_sp = (void __user *) current->sas_ss_sp; 2392 oss.ss_size = current->sas_ss_size; 2393 oss.ss_flags = sas_ss_flags(sp); 2394 } 2395 2396 if (uss) { 2397 void __user *ss_sp; 2398 size_t ss_size; 2399 int ss_flags; 2400 2401 error = -EFAULT; 2402 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) 2403 || __get_user(ss_sp, &uss->ss_sp) 2404 || __get_user(ss_flags, &uss->ss_flags) 2405 || __get_user(ss_size, &uss->ss_size)) 2406 goto out; 2407 2408 error = -EPERM; 2409 if (on_sig_stack(sp)) 2410 goto out; 2411 2412 error = -EINVAL; 2413 /* 2414 * 2415 * Note - this code used to test ss_flags incorrectly 2416 * old code may have been written using ss_flags==0 2417 * to mean ss_flags==SS_ONSTACK (as this was the only 2418 * way that worked) - this fix preserves that older 2419 * mechanism 2420 */ 2421 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) 2422 goto out; 2423 2424 if (ss_flags == SS_DISABLE) { 2425 ss_size = 0; 2426 ss_sp = NULL; 2427 } else { 2428 error = -ENOMEM; 2429 if (ss_size < MINSIGSTKSZ) 2430 goto out; 2431 } 2432 2433 current->sas_ss_sp = (unsigned long) ss_sp; 2434 current->sas_ss_size = ss_size; 2435 } 2436 2437 if (uoss) { 2438 error = -EFAULT; 2439 if (copy_to_user(uoss, &oss, sizeof(oss))) 2440 goto out; 2441 } 2442 2443 error = 0; 2444 out: 2445 return error; 2446 } 2447 2448 #ifdef __ARCH_WANT_SYS_SIGPENDING 2449 2450 asmlinkage long 2451 sys_sigpending(old_sigset_t __user *set) 2452 { 2453 return do_sigpending(set, sizeof(*set)); 2454 } 2455 2456 #endif 2457 2458 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 2459 /* Some platforms have their own version with special arguments others 2460 support only sys_rt_sigprocmask. */ 2461 2462 asmlinkage long 2463 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset) 2464 { 2465 int error; 2466 old_sigset_t old_set, new_set; 2467 2468 if (set) { 2469 error = -EFAULT; 2470 if (copy_from_user(&new_set, set, sizeof(*set))) 2471 goto out; 2472 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); 2473 2474 spin_lock_irq(¤t->sighand->siglock); 2475 old_set = current->blocked.sig[0]; 2476 2477 error = 0; 2478 switch (how) { 2479 default: 2480 error = -EINVAL; 2481 break; 2482 case SIG_BLOCK: 2483 sigaddsetmask(¤t->blocked, new_set); 2484 break; 2485 case SIG_UNBLOCK: 2486 sigdelsetmask(¤t->blocked, new_set); 2487 break; 2488 case SIG_SETMASK: 2489 current->blocked.sig[0] = new_set; 2490 break; 2491 } 2492 2493 recalc_sigpending(); 2494 spin_unlock_irq(¤t->sighand->siglock); 2495 if (error) 2496 goto out; 2497 if (oset) 2498 goto set_old; 2499 } else if (oset) { 2500 old_set = current->blocked.sig[0]; 2501 set_old: 2502 error = -EFAULT; 2503 if (copy_to_user(oset, &old_set, sizeof(*oset))) 2504 goto out; 2505 } 2506 error = 0; 2507 out: 2508 return error; 2509 } 2510 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 2511 2512 #ifdef __ARCH_WANT_SYS_RT_SIGACTION 2513 asmlinkage long 2514 sys_rt_sigaction(int sig, 2515 const struct sigaction __user *act, 2516 struct sigaction __user *oact, 2517 size_t sigsetsize) 2518 { 2519 struct k_sigaction new_sa, old_sa; 2520 int ret = -EINVAL; 2521 2522 /* XXX: Don't preclude handling different sized sigset_t's. */ 2523 if (sigsetsize != sizeof(sigset_t)) 2524 goto out; 2525 2526 if (act) { 2527 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) 2528 return -EFAULT; 2529 } 2530 2531 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); 2532 2533 if (!ret && oact) { 2534 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) 2535 return -EFAULT; 2536 } 2537 out: 2538 return ret; 2539 } 2540 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ 2541 2542 #ifdef __ARCH_WANT_SYS_SGETMASK 2543 2544 /* 2545 * For backwards compatibility. Functionality superseded by sigprocmask. 2546 */ 2547 asmlinkage long 2548 sys_sgetmask(void) 2549 { 2550 /* SMP safe */ 2551 return current->blocked.sig[0]; 2552 } 2553 2554 asmlinkage long 2555 sys_ssetmask(int newmask) 2556 { 2557 int old; 2558 2559 spin_lock_irq(¤t->sighand->siglock); 2560 old = current->blocked.sig[0]; 2561 2562 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)| 2563 sigmask(SIGSTOP))); 2564 recalc_sigpending(); 2565 spin_unlock_irq(¤t->sighand->siglock); 2566 2567 return old; 2568 } 2569 #endif /* __ARCH_WANT_SGETMASK */ 2570 2571 #ifdef __ARCH_WANT_SYS_SIGNAL 2572 /* 2573 * For backwards compatibility. Functionality superseded by sigaction. 2574 */ 2575 asmlinkage unsigned long 2576 sys_signal(int sig, __sighandler_t handler) 2577 { 2578 struct k_sigaction new_sa, old_sa; 2579 int ret; 2580 2581 new_sa.sa.sa_handler = handler; 2582 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; 2583 sigemptyset(&new_sa.sa.sa_mask); 2584 2585 ret = do_sigaction(sig, &new_sa, &old_sa); 2586 2587 return ret ? ret : (unsigned long)old_sa.sa.sa_handler; 2588 } 2589 #endif /* __ARCH_WANT_SYS_SIGNAL */ 2590 2591 #ifdef __ARCH_WANT_SYS_PAUSE 2592 2593 asmlinkage long 2594 sys_pause(void) 2595 { 2596 current->state = TASK_INTERRUPTIBLE; 2597 schedule(); 2598 return -ERESTARTNOHAND; 2599 } 2600 2601 #endif 2602 2603 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND 2604 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) 2605 { 2606 sigset_t newset; 2607 2608 /* XXX: Don't preclude handling different sized sigset_t's. */ 2609 if (sigsetsize != sizeof(sigset_t)) 2610 return -EINVAL; 2611 2612 if (copy_from_user(&newset, unewset, sizeof(newset))) 2613 return -EFAULT; 2614 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2615 2616 spin_lock_irq(¤t->sighand->siglock); 2617 current->saved_sigmask = current->blocked; 2618 current->blocked = newset; 2619 recalc_sigpending(); 2620 spin_unlock_irq(¤t->sighand->siglock); 2621 2622 current->state = TASK_INTERRUPTIBLE; 2623 schedule(); 2624 set_thread_flag(TIF_RESTORE_SIGMASK); 2625 return -ERESTARTNOHAND; 2626 } 2627 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ 2628 2629 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) 2630 { 2631 return NULL; 2632 } 2633 2634 void __init signals_init(void) 2635 { 2636 sigqueue_cachep = 2637 kmem_cache_create("sigqueue", 2638 sizeof(struct sigqueue), 2639 __alignof__(struct sigqueue), 2640 SLAB_PANIC, NULL, NULL); 2641 } 2642