xref: /linux/kernel/signal.c (revision 405849610fd96b4f34cd1875c4c033228fea6c0f)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
29 
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h"	/* audit_signal_info() */
35 
36 /*
37  * SLAB caches for signal bits.
38  */
39 
40 static struct kmem_cache *sigqueue_cachep;
41 
42 
43 static int sig_ignored(struct task_struct *t, int sig)
44 {
45 	void __user * handler;
46 
47 	/*
48 	 * Tracers always want to know about signals..
49 	 */
50 	if (t->ptrace & PT_PTRACED)
51 		return 0;
52 
53 	/*
54 	 * Blocked signals are never ignored, since the
55 	 * signal handler may change by the time it is
56 	 * unblocked.
57 	 */
58 	if (sigismember(&t->blocked, sig))
59 		return 0;
60 
61 	/* Is it explicitly or implicitly ignored? */
62 	handler = t->sighand->action[sig-1].sa.sa_handler;
63 	return   handler == SIG_IGN ||
64 		(handler == SIG_DFL && sig_kernel_ignore(sig));
65 }
66 
67 /*
68  * Re-calculate pending state from the set of locally pending
69  * signals, globally pending signals, and blocked signals.
70  */
71 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
72 {
73 	unsigned long ready;
74 	long i;
75 
76 	switch (_NSIG_WORDS) {
77 	default:
78 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
79 			ready |= signal->sig[i] &~ blocked->sig[i];
80 		break;
81 
82 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
83 		ready |= signal->sig[2] &~ blocked->sig[2];
84 		ready |= signal->sig[1] &~ blocked->sig[1];
85 		ready |= signal->sig[0] &~ blocked->sig[0];
86 		break;
87 
88 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
89 		ready |= signal->sig[0] &~ blocked->sig[0];
90 		break;
91 
92 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
93 	}
94 	return ready !=	0;
95 }
96 
97 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
98 
99 static int recalc_sigpending_tsk(struct task_struct *t)
100 {
101 	if (t->signal->group_stop_count > 0 ||
102 	    (freezing(t)) ||
103 	    PENDING(&t->pending, &t->blocked) ||
104 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
105 		set_tsk_thread_flag(t, TIF_SIGPENDING);
106 		return 1;
107 	}
108 	/*
109 	 * We must never clear the flag in another thread, or in current
110 	 * when it's possible the current syscall is returning -ERESTART*.
111 	 * So we don't clear it here, and only callers who know they should do.
112 	 */
113 	return 0;
114 }
115 
116 /*
117  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
118  * This is superfluous when called on current, the wakeup is a harmless no-op.
119  */
120 void recalc_sigpending_and_wake(struct task_struct *t)
121 {
122 	if (recalc_sigpending_tsk(t))
123 		signal_wake_up(t, 0);
124 }
125 
126 void recalc_sigpending(void)
127 {
128 	if (!recalc_sigpending_tsk(current))
129 		clear_thread_flag(TIF_SIGPENDING);
130 
131 }
132 
133 /* Given the mask, find the first available signal that should be serviced. */
134 
135 int next_signal(struct sigpending *pending, sigset_t *mask)
136 {
137 	unsigned long i, *s, *m, x;
138 	int sig = 0;
139 
140 	s = pending->signal.sig;
141 	m = mask->sig;
142 	switch (_NSIG_WORDS) {
143 	default:
144 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
145 			if ((x = *s &~ *m) != 0) {
146 				sig = ffz(~x) + i*_NSIG_BPW + 1;
147 				break;
148 			}
149 		break;
150 
151 	case 2: if ((x = s[0] &~ m[0]) != 0)
152 			sig = 1;
153 		else if ((x = s[1] &~ m[1]) != 0)
154 			sig = _NSIG_BPW + 1;
155 		else
156 			break;
157 		sig += ffz(~x);
158 		break;
159 
160 	case 1: if ((x = *s &~ *m) != 0)
161 			sig = ffz(~x) + 1;
162 		break;
163 	}
164 
165 	return sig;
166 }
167 
168 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
169 					 int override_rlimit)
170 {
171 	struct sigqueue *q = NULL;
172 	struct user_struct *user;
173 
174 	/*
175 	 * In order to avoid problems with "switch_user()", we want to make
176 	 * sure that the compiler doesn't re-load "t->user"
177 	 */
178 	user = t->user;
179 	barrier();
180 	atomic_inc(&user->sigpending);
181 	if (override_rlimit ||
182 	    atomic_read(&user->sigpending) <=
183 			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
184 		q = kmem_cache_alloc(sigqueue_cachep, flags);
185 	if (unlikely(q == NULL)) {
186 		atomic_dec(&user->sigpending);
187 	} else {
188 		INIT_LIST_HEAD(&q->list);
189 		q->flags = 0;
190 		q->user = get_uid(user);
191 	}
192 	return(q);
193 }
194 
195 static void __sigqueue_free(struct sigqueue *q)
196 {
197 	if (q->flags & SIGQUEUE_PREALLOC)
198 		return;
199 	atomic_dec(&q->user->sigpending);
200 	free_uid(q->user);
201 	kmem_cache_free(sigqueue_cachep, q);
202 }
203 
204 void flush_sigqueue(struct sigpending *queue)
205 {
206 	struct sigqueue *q;
207 
208 	sigemptyset(&queue->signal);
209 	while (!list_empty(&queue->list)) {
210 		q = list_entry(queue->list.next, struct sigqueue , list);
211 		list_del_init(&q->list);
212 		__sigqueue_free(q);
213 	}
214 }
215 
216 /*
217  * Flush all pending signals for a task.
218  */
219 void flush_signals(struct task_struct *t)
220 {
221 	unsigned long flags;
222 
223 	spin_lock_irqsave(&t->sighand->siglock, flags);
224 	clear_tsk_thread_flag(t,TIF_SIGPENDING);
225 	flush_sigqueue(&t->pending);
226 	flush_sigqueue(&t->signal->shared_pending);
227 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
228 }
229 
230 void ignore_signals(struct task_struct *t)
231 {
232 	int i;
233 
234 	for (i = 0; i < _NSIG; ++i)
235 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
236 
237 	flush_signals(t);
238 }
239 
240 /*
241  * Flush all handlers for a task.
242  */
243 
244 void
245 flush_signal_handlers(struct task_struct *t, int force_default)
246 {
247 	int i;
248 	struct k_sigaction *ka = &t->sighand->action[0];
249 	for (i = _NSIG ; i != 0 ; i--) {
250 		if (force_default || ka->sa.sa_handler != SIG_IGN)
251 			ka->sa.sa_handler = SIG_DFL;
252 		ka->sa.sa_flags = 0;
253 		sigemptyset(&ka->sa.sa_mask);
254 		ka++;
255 	}
256 }
257 
258 int unhandled_signal(struct task_struct *tsk, int sig)
259 {
260 	if (is_init(tsk))
261 		return 1;
262 	if (tsk->ptrace & PT_PTRACED)
263 		return 0;
264 	return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
265 		(tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
266 }
267 
268 
269 /* Notify the system that a driver wants to block all signals for this
270  * process, and wants to be notified if any signals at all were to be
271  * sent/acted upon.  If the notifier routine returns non-zero, then the
272  * signal will be acted upon after all.  If the notifier routine returns 0,
273  * then then signal will be blocked.  Only one block per process is
274  * allowed.  priv is a pointer to private data that the notifier routine
275  * can use to determine if the signal should be blocked or not.  */
276 
277 void
278 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
279 {
280 	unsigned long flags;
281 
282 	spin_lock_irqsave(&current->sighand->siglock, flags);
283 	current->notifier_mask = mask;
284 	current->notifier_data = priv;
285 	current->notifier = notifier;
286 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
287 }
288 
289 /* Notify the system that blocking has ended. */
290 
291 void
292 unblock_all_signals(void)
293 {
294 	unsigned long flags;
295 
296 	spin_lock_irqsave(&current->sighand->siglock, flags);
297 	current->notifier = NULL;
298 	current->notifier_data = NULL;
299 	recalc_sigpending();
300 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
301 }
302 
303 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
304 {
305 	struct sigqueue *q, *first = NULL;
306 	int still_pending = 0;
307 
308 	if (unlikely(!sigismember(&list->signal, sig)))
309 		return 0;
310 
311 	/*
312 	 * Collect the siginfo appropriate to this signal.  Check if
313 	 * there is another siginfo for the same signal.
314 	*/
315 	list_for_each_entry(q, &list->list, list) {
316 		if (q->info.si_signo == sig) {
317 			if (first) {
318 				still_pending = 1;
319 				break;
320 			}
321 			first = q;
322 		}
323 	}
324 	if (first) {
325 		list_del_init(&first->list);
326 		copy_siginfo(info, &first->info);
327 		__sigqueue_free(first);
328 		if (!still_pending)
329 			sigdelset(&list->signal, sig);
330 	} else {
331 
332 		/* Ok, it wasn't in the queue.  This must be
333 		   a fast-pathed signal or we must have been
334 		   out of queue space.  So zero out the info.
335 		 */
336 		sigdelset(&list->signal, sig);
337 		info->si_signo = sig;
338 		info->si_errno = 0;
339 		info->si_code = 0;
340 		info->si_pid = 0;
341 		info->si_uid = 0;
342 	}
343 	return 1;
344 }
345 
346 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
347 			siginfo_t *info)
348 {
349 	int sig = next_signal(pending, mask);
350 
351 	if (sig) {
352 		if (current->notifier) {
353 			if (sigismember(current->notifier_mask, sig)) {
354 				if (!(current->notifier)(current->notifier_data)) {
355 					clear_thread_flag(TIF_SIGPENDING);
356 					return 0;
357 				}
358 			}
359 		}
360 
361 		if (!collect_signal(sig, pending, info))
362 			sig = 0;
363 	}
364 
365 	return sig;
366 }
367 
368 /*
369  * Dequeue a signal and return the element to the caller, which is
370  * expected to free it.
371  *
372  * All callers have to hold the siglock.
373  */
374 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
375 {
376 	int signr = 0;
377 
378 	/* We only dequeue private signals from ourselves, we don't let
379 	 * signalfd steal them
380 	 */
381 	if (tsk == current)
382 		signr = __dequeue_signal(&tsk->pending, mask, info);
383 	if (!signr) {
384 		signr = __dequeue_signal(&tsk->signal->shared_pending,
385 					 mask, info);
386 		/*
387 		 * itimer signal ?
388 		 *
389 		 * itimers are process shared and we restart periodic
390 		 * itimers in the signal delivery path to prevent DoS
391 		 * attacks in the high resolution timer case. This is
392 		 * compliant with the old way of self restarting
393 		 * itimers, as the SIGALRM is a legacy signal and only
394 		 * queued once. Changing the restart behaviour to
395 		 * restart the timer in the signal dequeue path is
396 		 * reducing the timer noise on heavy loaded !highres
397 		 * systems too.
398 		 */
399 		if (unlikely(signr == SIGALRM)) {
400 			struct hrtimer *tmr = &tsk->signal->real_timer;
401 
402 			if (!hrtimer_is_queued(tmr) &&
403 			    tsk->signal->it_real_incr.tv64 != 0) {
404 				hrtimer_forward(tmr, tmr->base->get_time(),
405 						tsk->signal->it_real_incr);
406 				hrtimer_restart(tmr);
407 			}
408 		}
409 	}
410 	if (likely(tsk == current))
411 		recalc_sigpending();
412 	if (signr && unlikely(sig_kernel_stop(signr))) {
413 		/*
414 		 * Set a marker that we have dequeued a stop signal.  Our
415 		 * caller might release the siglock and then the pending
416 		 * stop signal it is about to process is no longer in the
417 		 * pending bitmasks, but must still be cleared by a SIGCONT
418 		 * (and overruled by a SIGKILL).  So those cases clear this
419 		 * shared flag after we've set it.  Note that this flag may
420 		 * remain set after the signal we return is ignored or
421 		 * handled.  That doesn't matter because its only purpose
422 		 * is to alert stop-signal processing code when another
423 		 * processor has come along and cleared the flag.
424 		 */
425 		if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
426 			tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
427 	}
428 	if ( signr &&
429 	     ((info->si_code & __SI_MASK) == __SI_TIMER) &&
430 	     info->si_sys_private){
431 		/*
432 		 * Release the siglock to ensure proper locking order
433 		 * of timer locks outside of siglocks.  Note, we leave
434 		 * irqs disabled here, since the posix-timers code is
435 		 * about to disable them again anyway.
436 		 */
437 		spin_unlock(&tsk->sighand->siglock);
438 		do_schedule_next_timer(info);
439 		spin_lock(&tsk->sighand->siglock);
440 	}
441 	return signr;
442 }
443 
444 /*
445  * Tell a process that it has a new active signal..
446  *
447  * NOTE! we rely on the previous spin_lock to
448  * lock interrupts for us! We can only be called with
449  * "siglock" held, and the local interrupt must
450  * have been disabled when that got acquired!
451  *
452  * No need to set need_resched since signal event passing
453  * goes through ->blocked
454  */
455 void signal_wake_up(struct task_struct *t, int resume)
456 {
457 	unsigned int mask;
458 
459 	set_tsk_thread_flag(t, TIF_SIGPENDING);
460 
461 	/*
462 	 * For SIGKILL, we want to wake it up in the stopped/traced case.
463 	 * We don't check t->state here because there is a race with it
464 	 * executing another processor and just now entering stopped state.
465 	 * By using wake_up_state, we ensure the process will wake up and
466 	 * handle its death signal.
467 	 */
468 	mask = TASK_INTERRUPTIBLE;
469 	if (resume)
470 		mask |= TASK_STOPPED | TASK_TRACED;
471 	if (!wake_up_state(t, mask))
472 		kick_process(t);
473 }
474 
475 /*
476  * Remove signals in mask from the pending set and queue.
477  * Returns 1 if any signals were found.
478  *
479  * All callers must be holding the siglock.
480  *
481  * This version takes a sigset mask and looks at all signals,
482  * not just those in the first mask word.
483  */
484 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
485 {
486 	struct sigqueue *q, *n;
487 	sigset_t m;
488 
489 	sigandsets(&m, mask, &s->signal);
490 	if (sigisemptyset(&m))
491 		return 0;
492 
493 	signandsets(&s->signal, &s->signal, mask);
494 	list_for_each_entry_safe(q, n, &s->list, list) {
495 		if (sigismember(mask, q->info.si_signo)) {
496 			list_del_init(&q->list);
497 			__sigqueue_free(q);
498 		}
499 	}
500 	return 1;
501 }
502 /*
503  * Remove signals in mask from the pending set and queue.
504  * Returns 1 if any signals were found.
505  *
506  * All callers must be holding the siglock.
507  */
508 static int rm_from_queue(unsigned long mask, struct sigpending *s)
509 {
510 	struct sigqueue *q, *n;
511 
512 	if (!sigtestsetmask(&s->signal, mask))
513 		return 0;
514 
515 	sigdelsetmask(&s->signal, mask);
516 	list_for_each_entry_safe(q, n, &s->list, list) {
517 		if (q->info.si_signo < SIGRTMIN &&
518 		    (mask & sigmask(q->info.si_signo))) {
519 			list_del_init(&q->list);
520 			__sigqueue_free(q);
521 		}
522 	}
523 	return 1;
524 }
525 
526 /*
527  * Bad permissions for sending the signal
528  */
529 static int check_kill_permission(int sig, struct siginfo *info,
530 				 struct task_struct *t)
531 {
532 	int error = -EINVAL;
533 	if (!valid_signal(sig))
534 		return error;
535 
536 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
537 	if (error)
538 		return error;
539 
540 	error = -EPERM;
541 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
542 	    && ((sig != SIGCONT) ||
543 		(process_session(current) != process_session(t)))
544 	    && (current->euid ^ t->suid) && (current->euid ^ t->uid)
545 	    && (current->uid ^ t->suid) && (current->uid ^ t->uid)
546 	    && !capable(CAP_KILL))
547 		return error;
548 
549 	return security_task_kill(t, info, sig, 0);
550 }
551 
552 /* forward decl */
553 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
554 
555 /*
556  * Handle magic process-wide effects of stop/continue signals.
557  * Unlike the signal actions, these happen immediately at signal-generation
558  * time regardless of blocking, ignoring, or handling.  This does the
559  * actual continuing for SIGCONT, but not the actual stopping for stop
560  * signals.  The process stop is done as a signal action for SIG_DFL.
561  */
562 static void handle_stop_signal(int sig, struct task_struct *p)
563 {
564 	struct task_struct *t;
565 
566 	if (p->signal->flags & SIGNAL_GROUP_EXIT)
567 		/*
568 		 * The process is in the middle of dying already.
569 		 */
570 		return;
571 
572 	if (sig_kernel_stop(sig)) {
573 		/*
574 		 * This is a stop signal.  Remove SIGCONT from all queues.
575 		 */
576 		rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
577 		t = p;
578 		do {
579 			rm_from_queue(sigmask(SIGCONT), &t->pending);
580 			t = next_thread(t);
581 		} while (t != p);
582 	} else if (sig == SIGCONT) {
583 		/*
584 		 * Remove all stop signals from all queues,
585 		 * and wake all threads.
586 		 */
587 		if (unlikely(p->signal->group_stop_count > 0)) {
588 			/*
589 			 * There was a group stop in progress.  We'll
590 			 * pretend it finished before we got here.  We are
591 			 * obliged to report it to the parent: if the
592 			 * SIGSTOP happened "after" this SIGCONT, then it
593 			 * would have cleared this pending SIGCONT.  If it
594 			 * happened "before" this SIGCONT, then the parent
595 			 * got the SIGCHLD about the stop finishing before
596 			 * the continue happened.  We do the notification
597 			 * now, and it's as if the stop had finished and
598 			 * the SIGCHLD was pending on entry to this kill.
599 			 */
600 			p->signal->group_stop_count = 0;
601 			p->signal->flags = SIGNAL_STOP_CONTINUED;
602 			spin_unlock(&p->sighand->siglock);
603 			do_notify_parent_cldstop(p, CLD_STOPPED);
604 			spin_lock(&p->sighand->siglock);
605 		}
606 		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
607 		t = p;
608 		do {
609 			unsigned int state;
610 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
611 
612 			/*
613 			 * If there is a handler for SIGCONT, we must make
614 			 * sure that no thread returns to user mode before
615 			 * we post the signal, in case it was the only
616 			 * thread eligible to run the signal handler--then
617 			 * it must not do anything between resuming and
618 			 * running the handler.  With the TIF_SIGPENDING
619 			 * flag set, the thread will pause and acquire the
620 			 * siglock that we hold now and until we've queued
621 			 * the pending signal.
622 			 *
623 			 * Wake up the stopped thread _after_ setting
624 			 * TIF_SIGPENDING
625 			 */
626 			state = TASK_STOPPED;
627 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
628 				set_tsk_thread_flag(t, TIF_SIGPENDING);
629 				state |= TASK_INTERRUPTIBLE;
630 			}
631 			wake_up_state(t, state);
632 
633 			t = next_thread(t);
634 		} while (t != p);
635 
636 		if (p->signal->flags & SIGNAL_STOP_STOPPED) {
637 			/*
638 			 * We were in fact stopped, and are now continued.
639 			 * Notify the parent with CLD_CONTINUED.
640 			 */
641 			p->signal->flags = SIGNAL_STOP_CONTINUED;
642 			p->signal->group_exit_code = 0;
643 			spin_unlock(&p->sighand->siglock);
644 			do_notify_parent_cldstop(p, CLD_CONTINUED);
645 			spin_lock(&p->sighand->siglock);
646 		} else {
647 			/*
648 			 * We are not stopped, but there could be a stop
649 			 * signal in the middle of being processed after
650 			 * being removed from the queue.  Clear that too.
651 			 */
652 			p->signal->flags = 0;
653 		}
654 	} else if (sig == SIGKILL) {
655 		/*
656 		 * Make sure that any pending stop signal already dequeued
657 		 * is undone by the wakeup for SIGKILL.
658 		 */
659 		p->signal->flags = 0;
660 	}
661 }
662 
663 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
664 			struct sigpending *signals)
665 {
666 	struct sigqueue * q = NULL;
667 	int ret = 0;
668 
669 	/*
670 	 * Deliver the signal to listening signalfds. This must be called
671 	 * with the sighand lock held.
672 	 */
673 	signalfd_notify(t, sig);
674 
675 	/*
676 	 * fast-pathed signals for kernel-internal things like SIGSTOP
677 	 * or SIGKILL.
678 	 */
679 	if (info == SEND_SIG_FORCED)
680 		goto out_set;
681 
682 	/* Real-time signals must be queued if sent by sigqueue, or
683 	   some other real-time mechanism.  It is implementation
684 	   defined whether kill() does so.  We attempt to do so, on
685 	   the principle of least surprise, but since kill is not
686 	   allowed to fail with EAGAIN when low on memory we just
687 	   make sure at least one signal gets delivered and don't
688 	   pass on the info struct.  */
689 
690 	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
691 					     (is_si_special(info) ||
692 					      info->si_code >= 0)));
693 	if (q) {
694 		list_add_tail(&q->list, &signals->list);
695 		switch ((unsigned long) info) {
696 		case (unsigned long) SEND_SIG_NOINFO:
697 			q->info.si_signo = sig;
698 			q->info.si_errno = 0;
699 			q->info.si_code = SI_USER;
700 			q->info.si_pid = current->pid;
701 			q->info.si_uid = current->uid;
702 			break;
703 		case (unsigned long) SEND_SIG_PRIV:
704 			q->info.si_signo = sig;
705 			q->info.si_errno = 0;
706 			q->info.si_code = SI_KERNEL;
707 			q->info.si_pid = 0;
708 			q->info.si_uid = 0;
709 			break;
710 		default:
711 			copy_siginfo(&q->info, info);
712 			break;
713 		}
714 	} else if (!is_si_special(info)) {
715 		if (sig >= SIGRTMIN && info->si_code != SI_USER)
716 		/*
717 		 * Queue overflow, abort.  We may abort if the signal was rt
718 		 * and sent by user using something other than kill().
719 		 */
720 			return -EAGAIN;
721 	}
722 
723 out_set:
724 	sigaddset(&signals->signal, sig);
725 	return ret;
726 }
727 
728 #define LEGACY_QUEUE(sigptr, sig) \
729 	(((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
730 
731 int print_fatal_signals;
732 
733 static void print_fatal_signal(struct pt_regs *regs, int signr)
734 {
735 	printk("%s/%d: potentially unexpected fatal signal %d.\n",
736 		current->comm, current->pid, signr);
737 
738 #ifdef __i386__
739 	printk("code at %08lx: ", regs->eip);
740 	{
741 		int i;
742 		for (i = 0; i < 16; i++) {
743 			unsigned char insn;
744 
745 			__get_user(insn, (unsigned char *)(regs->eip + i));
746 			printk("%02x ", insn);
747 		}
748 	}
749 #endif
750 	printk("\n");
751 	show_regs(regs);
752 }
753 
754 static int __init setup_print_fatal_signals(char *str)
755 {
756 	get_option (&str, &print_fatal_signals);
757 
758 	return 1;
759 }
760 
761 __setup("print-fatal-signals=", setup_print_fatal_signals);
762 
763 static int
764 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
765 {
766 	int ret = 0;
767 
768 	BUG_ON(!irqs_disabled());
769 	assert_spin_locked(&t->sighand->siglock);
770 
771 	/* Short-circuit ignored signals.  */
772 	if (sig_ignored(t, sig))
773 		goto out;
774 
775 	/* Support queueing exactly one non-rt signal, so that we
776 	   can get more detailed information about the cause of
777 	   the signal. */
778 	if (LEGACY_QUEUE(&t->pending, sig))
779 		goto out;
780 
781 	ret = send_signal(sig, info, t, &t->pending);
782 	if (!ret && !sigismember(&t->blocked, sig))
783 		signal_wake_up(t, sig == SIGKILL);
784 out:
785 	return ret;
786 }
787 
788 /*
789  * Force a signal that the process can't ignore: if necessary
790  * we unblock the signal and change any SIG_IGN to SIG_DFL.
791  *
792  * Note: If we unblock the signal, we always reset it to SIG_DFL,
793  * since we do not want to have a signal handler that was blocked
794  * be invoked when user space had explicitly blocked it.
795  *
796  * We don't want to have recursive SIGSEGV's etc, for example.
797  */
798 int
799 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
800 {
801 	unsigned long int flags;
802 	int ret, blocked, ignored;
803 	struct k_sigaction *action;
804 
805 	spin_lock_irqsave(&t->sighand->siglock, flags);
806 	action = &t->sighand->action[sig-1];
807 	ignored = action->sa.sa_handler == SIG_IGN;
808 	blocked = sigismember(&t->blocked, sig);
809 	if (blocked || ignored) {
810 		action->sa.sa_handler = SIG_DFL;
811 		if (blocked) {
812 			sigdelset(&t->blocked, sig);
813 			recalc_sigpending_and_wake(t);
814 		}
815 	}
816 	ret = specific_send_sig_info(sig, info, t);
817 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
818 
819 	return ret;
820 }
821 
822 void
823 force_sig_specific(int sig, struct task_struct *t)
824 {
825 	force_sig_info(sig, SEND_SIG_FORCED, t);
826 }
827 
828 /*
829  * Test if P wants to take SIG.  After we've checked all threads with this,
830  * it's equivalent to finding no threads not blocking SIG.  Any threads not
831  * blocking SIG were ruled out because they are not running and already
832  * have pending signals.  Such threads will dequeue from the shared queue
833  * as soon as they're available, so putting the signal on the shared queue
834  * will be equivalent to sending it to one such thread.
835  */
836 static inline int wants_signal(int sig, struct task_struct *p)
837 {
838 	if (sigismember(&p->blocked, sig))
839 		return 0;
840 	if (p->flags & PF_EXITING)
841 		return 0;
842 	if (sig == SIGKILL)
843 		return 1;
844 	if (p->state & (TASK_STOPPED | TASK_TRACED))
845 		return 0;
846 	return task_curr(p) || !signal_pending(p);
847 }
848 
849 static void
850 __group_complete_signal(int sig, struct task_struct *p)
851 {
852 	struct task_struct *t;
853 
854 	/*
855 	 * Now find a thread we can wake up to take the signal off the queue.
856 	 *
857 	 * If the main thread wants the signal, it gets first crack.
858 	 * Probably the least surprising to the average bear.
859 	 */
860 	if (wants_signal(sig, p))
861 		t = p;
862 	else if (thread_group_empty(p))
863 		/*
864 		 * There is just one thread and it does not need to be woken.
865 		 * It will dequeue unblocked signals before it runs again.
866 		 */
867 		return;
868 	else {
869 		/*
870 		 * Otherwise try to find a suitable thread.
871 		 */
872 		t = p->signal->curr_target;
873 		if (t == NULL)
874 			/* restart balancing at this thread */
875 			t = p->signal->curr_target = p;
876 
877 		while (!wants_signal(sig, t)) {
878 			t = next_thread(t);
879 			if (t == p->signal->curr_target)
880 				/*
881 				 * No thread needs to be woken.
882 				 * Any eligible threads will see
883 				 * the signal in the queue soon.
884 				 */
885 				return;
886 		}
887 		p->signal->curr_target = t;
888 	}
889 
890 	/*
891 	 * Found a killable thread.  If the signal will be fatal,
892 	 * then start taking the whole group down immediately.
893 	 */
894 	if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
895 	    !sigismember(&t->real_blocked, sig) &&
896 	    (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
897 		/*
898 		 * This signal will be fatal to the whole group.
899 		 */
900 		if (!sig_kernel_coredump(sig)) {
901 			/*
902 			 * Start a group exit and wake everybody up.
903 			 * This way we don't have other threads
904 			 * running and doing things after a slower
905 			 * thread has the fatal signal pending.
906 			 */
907 			p->signal->flags = SIGNAL_GROUP_EXIT;
908 			p->signal->group_exit_code = sig;
909 			p->signal->group_stop_count = 0;
910 			t = p;
911 			do {
912 				sigaddset(&t->pending.signal, SIGKILL);
913 				signal_wake_up(t, 1);
914 				t = next_thread(t);
915 			} while (t != p);
916 			return;
917 		}
918 
919 		/*
920 		 * There will be a core dump.  We make all threads other
921 		 * than the chosen one go into a group stop so that nothing
922 		 * happens until it gets scheduled, takes the signal off
923 		 * the shared queue, and does the core dump.  This is a
924 		 * little more complicated than strictly necessary, but it
925 		 * keeps the signal state that winds up in the core dump
926 		 * unchanged from the death state, e.g. which thread had
927 		 * the core-dump signal unblocked.
928 		 */
929 		rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
930 		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
931 		p->signal->group_stop_count = 0;
932 		p->signal->group_exit_task = t;
933 		t = p;
934 		do {
935 			p->signal->group_stop_count++;
936 			signal_wake_up(t, 0);
937 			t = next_thread(t);
938 		} while (t != p);
939 		wake_up_process(p->signal->group_exit_task);
940 		return;
941 	}
942 
943 	/*
944 	 * The signal is already in the shared-pending queue.
945 	 * Tell the chosen thread to wake up and dequeue it.
946 	 */
947 	signal_wake_up(t, sig == SIGKILL);
948 	return;
949 }
950 
951 int
952 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
953 {
954 	int ret = 0;
955 
956 	assert_spin_locked(&p->sighand->siglock);
957 	handle_stop_signal(sig, p);
958 
959 	/* Short-circuit ignored signals.  */
960 	if (sig_ignored(p, sig))
961 		return ret;
962 
963 	if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
964 		/* This is a non-RT signal and we already have one queued.  */
965 		return ret;
966 
967 	/*
968 	 * Put this signal on the shared-pending queue, or fail with EAGAIN.
969 	 * We always use the shared queue for process-wide signals,
970 	 * to avoid several races.
971 	 */
972 	ret = send_signal(sig, info, p, &p->signal->shared_pending);
973 	if (unlikely(ret))
974 		return ret;
975 
976 	__group_complete_signal(sig, p);
977 	return 0;
978 }
979 
980 /*
981  * Nuke all other threads in the group.
982  */
983 void zap_other_threads(struct task_struct *p)
984 {
985 	struct task_struct *t;
986 
987 	p->signal->flags = SIGNAL_GROUP_EXIT;
988 	p->signal->group_stop_count = 0;
989 
990 	if (thread_group_empty(p))
991 		return;
992 
993 	for (t = next_thread(p); t != p; t = next_thread(t)) {
994 		/*
995 		 * Don't bother with already dead threads
996 		 */
997 		if (t->exit_state)
998 			continue;
999 
1000 		/* SIGKILL will be handled before any pending SIGSTOP */
1001 		sigaddset(&t->pending.signal, SIGKILL);
1002 		signal_wake_up(t, 1);
1003 	}
1004 }
1005 
1006 /*
1007  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1008  */
1009 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1010 {
1011 	struct sighand_struct *sighand;
1012 
1013 	for (;;) {
1014 		sighand = rcu_dereference(tsk->sighand);
1015 		if (unlikely(sighand == NULL))
1016 			break;
1017 
1018 		spin_lock_irqsave(&sighand->siglock, *flags);
1019 		if (likely(sighand == tsk->sighand))
1020 			break;
1021 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1022 	}
1023 
1024 	return sighand;
1025 }
1026 
1027 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1028 {
1029 	unsigned long flags;
1030 	int ret;
1031 
1032 	ret = check_kill_permission(sig, info, p);
1033 
1034 	if (!ret && sig) {
1035 		ret = -ESRCH;
1036 		if (lock_task_sighand(p, &flags)) {
1037 			ret = __group_send_sig_info(sig, info, p);
1038 			unlock_task_sighand(p, &flags);
1039 		}
1040 	}
1041 
1042 	return ret;
1043 }
1044 
1045 /*
1046  * kill_pgrp_info() sends a signal to a process group: this is what the tty
1047  * control characters do (^C, ^Z etc)
1048  */
1049 
1050 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1051 {
1052 	struct task_struct *p = NULL;
1053 	int retval, success;
1054 
1055 	success = 0;
1056 	retval = -ESRCH;
1057 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1058 		int err = group_send_sig_info(sig, info, p);
1059 		success |= !err;
1060 		retval = err;
1061 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1062 	return success ? 0 : retval;
1063 }
1064 
1065 int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1066 {
1067 	int retval;
1068 
1069 	read_lock(&tasklist_lock);
1070 	retval = __kill_pgrp_info(sig, info, pgrp);
1071 	read_unlock(&tasklist_lock);
1072 
1073 	return retval;
1074 }
1075 
1076 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1077 {
1078 	int error;
1079 	struct task_struct *p;
1080 
1081 	rcu_read_lock();
1082 	if (unlikely(sig_needs_tasklist(sig)))
1083 		read_lock(&tasklist_lock);
1084 
1085 	p = pid_task(pid, PIDTYPE_PID);
1086 	error = -ESRCH;
1087 	if (p)
1088 		error = group_send_sig_info(sig, info, p);
1089 
1090 	if (unlikely(sig_needs_tasklist(sig)))
1091 		read_unlock(&tasklist_lock);
1092 	rcu_read_unlock();
1093 	return error;
1094 }
1095 
1096 int
1097 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1098 {
1099 	int error;
1100 	rcu_read_lock();
1101 	error = kill_pid_info(sig, info, find_pid(pid));
1102 	rcu_read_unlock();
1103 	return error;
1104 }
1105 
1106 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1107 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1108 		      uid_t uid, uid_t euid, u32 secid)
1109 {
1110 	int ret = -EINVAL;
1111 	struct task_struct *p;
1112 
1113 	if (!valid_signal(sig))
1114 		return ret;
1115 
1116 	read_lock(&tasklist_lock);
1117 	p = pid_task(pid, PIDTYPE_PID);
1118 	if (!p) {
1119 		ret = -ESRCH;
1120 		goto out_unlock;
1121 	}
1122 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1123 	    && (euid != p->suid) && (euid != p->uid)
1124 	    && (uid != p->suid) && (uid != p->uid)) {
1125 		ret = -EPERM;
1126 		goto out_unlock;
1127 	}
1128 	ret = security_task_kill(p, info, sig, secid);
1129 	if (ret)
1130 		goto out_unlock;
1131 	if (sig && p->sighand) {
1132 		unsigned long flags;
1133 		spin_lock_irqsave(&p->sighand->siglock, flags);
1134 		ret = __group_send_sig_info(sig, info, p);
1135 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1136 	}
1137 out_unlock:
1138 	read_unlock(&tasklist_lock);
1139 	return ret;
1140 }
1141 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1142 
1143 /*
1144  * kill_something_info() interprets pid in interesting ways just like kill(2).
1145  *
1146  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1147  * is probably wrong.  Should make it like BSD or SYSV.
1148  */
1149 
1150 static int kill_something_info(int sig, struct siginfo *info, int pid)
1151 {
1152 	int ret;
1153 	rcu_read_lock();
1154 	if (!pid) {
1155 		ret = kill_pgrp_info(sig, info, task_pgrp(current));
1156 	} else if (pid == -1) {
1157 		int retval = 0, count = 0;
1158 		struct task_struct * p;
1159 
1160 		read_lock(&tasklist_lock);
1161 		for_each_process(p) {
1162 			if (p->pid > 1 && p->tgid != current->tgid) {
1163 				int err = group_send_sig_info(sig, info, p);
1164 				++count;
1165 				if (err != -EPERM)
1166 					retval = err;
1167 			}
1168 		}
1169 		read_unlock(&tasklist_lock);
1170 		ret = count ? retval : -ESRCH;
1171 	} else if (pid < 0) {
1172 		ret = kill_pgrp_info(sig, info, find_pid(-pid));
1173 	} else {
1174 		ret = kill_pid_info(sig, info, find_pid(pid));
1175 	}
1176 	rcu_read_unlock();
1177 	return ret;
1178 }
1179 
1180 /*
1181  * These are for backward compatibility with the rest of the kernel source.
1182  */
1183 
1184 /*
1185  * These two are the most common entry points.  They send a signal
1186  * just to the specific thread.
1187  */
1188 int
1189 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1190 {
1191 	int ret;
1192 	unsigned long flags;
1193 
1194 	/*
1195 	 * Make sure legacy kernel users don't send in bad values
1196 	 * (normal paths check this in check_kill_permission).
1197 	 */
1198 	if (!valid_signal(sig))
1199 		return -EINVAL;
1200 
1201 	/*
1202 	 * We need the tasklist lock even for the specific
1203 	 * thread case (when we don't need to follow the group
1204 	 * lists) in order to avoid races with "p->sighand"
1205 	 * going away or changing from under us.
1206 	 */
1207 	read_lock(&tasklist_lock);
1208 	spin_lock_irqsave(&p->sighand->siglock, flags);
1209 	ret = specific_send_sig_info(sig, info, p);
1210 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1211 	read_unlock(&tasklist_lock);
1212 	return ret;
1213 }
1214 
1215 #define __si_special(priv) \
1216 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1217 
1218 int
1219 send_sig(int sig, struct task_struct *p, int priv)
1220 {
1221 	return send_sig_info(sig, __si_special(priv), p);
1222 }
1223 
1224 /*
1225  * This is the entry point for "process-wide" signals.
1226  * They will go to an appropriate thread in the thread group.
1227  */
1228 int
1229 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1230 {
1231 	int ret;
1232 	read_lock(&tasklist_lock);
1233 	ret = group_send_sig_info(sig, info, p);
1234 	read_unlock(&tasklist_lock);
1235 	return ret;
1236 }
1237 
1238 void
1239 force_sig(int sig, struct task_struct *p)
1240 {
1241 	force_sig_info(sig, SEND_SIG_PRIV, p);
1242 }
1243 
1244 /*
1245  * When things go south during signal handling, we
1246  * will force a SIGSEGV. And if the signal that caused
1247  * the problem was already a SIGSEGV, we'll want to
1248  * make sure we don't even try to deliver the signal..
1249  */
1250 int
1251 force_sigsegv(int sig, struct task_struct *p)
1252 {
1253 	if (sig == SIGSEGV) {
1254 		unsigned long flags;
1255 		spin_lock_irqsave(&p->sighand->siglock, flags);
1256 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1257 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1258 	}
1259 	force_sig(SIGSEGV, p);
1260 	return 0;
1261 }
1262 
1263 int kill_pgrp(struct pid *pid, int sig, int priv)
1264 {
1265 	return kill_pgrp_info(sig, __si_special(priv), pid);
1266 }
1267 EXPORT_SYMBOL(kill_pgrp);
1268 
1269 int kill_pid(struct pid *pid, int sig, int priv)
1270 {
1271 	return kill_pid_info(sig, __si_special(priv), pid);
1272 }
1273 EXPORT_SYMBOL(kill_pid);
1274 
1275 int
1276 kill_proc(pid_t pid, int sig, int priv)
1277 {
1278 	return kill_proc_info(sig, __si_special(priv), pid);
1279 }
1280 
1281 /*
1282  * These functions support sending signals using preallocated sigqueue
1283  * structures.  This is needed "because realtime applications cannot
1284  * afford to lose notifications of asynchronous events, like timer
1285  * expirations or I/O completions".  In the case of Posix Timers
1286  * we allocate the sigqueue structure from the timer_create.  If this
1287  * allocation fails we are able to report the failure to the application
1288  * with an EAGAIN error.
1289  */
1290 
1291 struct sigqueue *sigqueue_alloc(void)
1292 {
1293 	struct sigqueue *q;
1294 
1295 	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1296 		q->flags |= SIGQUEUE_PREALLOC;
1297 	return(q);
1298 }
1299 
1300 void sigqueue_free(struct sigqueue *q)
1301 {
1302 	unsigned long flags;
1303 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1304 	/*
1305 	 * If the signal is still pending remove it from the
1306 	 * pending queue.
1307 	 */
1308 	if (unlikely(!list_empty(&q->list))) {
1309 		spinlock_t *lock = &current->sighand->siglock;
1310 		read_lock(&tasklist_lock);
1311 		spin_lock_irqsave(lock, flags);
1312 		if (!list_empty(&q->list))
1313 			list_del_init(&q->list);
1314 		spin_unlock_irqrestore(lock, flags);
1315 		read_unlock(&tasklist_lock);
1316 	}
1317 	q->flags &= ~SIGQUEUE_PREALLOC;
1318 	__sigqueue_free(q);
1319 }
1320 
1321 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1322 {
1323 	unsigned long flags;
1324 	int ret = 0;
1325 
1326 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1327 
1328 	/*
1329 	 * The rcu based delayed sighand destroy makes it possible to
1330 	 * run this without tasklist lock held. The task struct itself
1331 	 * cannot go away as create_timer did get_task_struct().
1332 	 *
1333 	 * We return -1, when the task is marked exiting, so
1334 	 * posix_timer_event can redirect it to the group leader
1335 	 */
1336 	rcu_read_lock();
1337 
1338 	if (!likely(lock_task_sighand(p, &flags))) {
1339 		ret = -1;
1340 		goto out_err;
1341 	}
1342 
1343 	if (unlikely(!list_empty(&q->list))) {
1344 		/*
1345 		 * If an SI_TIMER entry is already queue just increment
1346 		 * the overrun count.
1347 		 */
1348 		BUG_ON(q->info.si_code != SI_TIMER);
1349 		q->info.si_overrun++;
1350 		goto out;
1351 	}
1352 	/* Short-circuit ignored signals.  */
1353 	if (sig_ignored(p, sig)) {
1354 		ret = 1;
1355 		goto out;
1356 	}
1357 	/*
1358 	 * Deliver the signal to listening signalfds. This must be called
1359 	 * with the sighand lock held.
1360 	 */
1361 	signalfd_notify(p, sig);
1362 
1363 	list_add_tail(&q->list, &p->pending.list);
1364 	sigaddset(&p->pending.signal, sig);
1365 	if (!sigismember(&p->blocked, sig))
1366 		signal_wake_up(p, sig == SIGKILL);
1367 
1368 out:
1369 	unlock_task_sighand(p, &flags);
1370 out_err:
1371 	rcu_read_unlock();
1372 
1373 	return ret;
1374 }
1375 
1376 int
1377 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1378 {
1379 	unsigned long flags;
1380 	int ret = 0;
1381 
1382 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1383 
1384 	read_lock(&tasklist_lock);
1385 	/* Since it_lock is held, p->sighand cannot be NULL. */
1386 	spin_lock_irqsave(&p->sighand->siglock, flags);
1387 	handle_stop_signal(sig, p);
1388 
1389 	/* Short-circuit ignored signals.  */
1390 	if (sig_ignored(p, sig)) {
1391 		ret = 1;
1392 		goto out;
1393 	}
1394 
1395 	if (unlikely(!list_empty(&q->list))) {
1396 		/*
1397 		 * If an SI_TIMER entry is already queue just increment
1398 		 * the overrun count.  Other uses should not try to
1399 		 * send the signal multiple times.
1400 		 */
1401 		BUG_ON(q->info.si_code != SI_TIMER);
1402 		q->info.si_overrun++;
1403 		goto out;
1404 	}
1405 	/*
1406 	 * Deliver the signal to listening signalfds. This must be called
1407 	 * with the sighand lock held.
1408 	 */
1409 	signalfd_notify(p, sig);
1410 
1411 	/*
1412 	 * Put this signal on the shared-pending queue.
1413 	 * We always use the shared queue for process-wide signals,
1414 	 * to avoid several races.
1415 	 */
1416 	list_add_tail(&q->list, &p->signal->shared_pending.list);
1417 	sigaddset(&p->signal->shared_pending.signal, sig);
1418 
1419 	__group_complete_signal(sig, p);
1420 out:
1421 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1422 	read_unlock(&tasklist_lock);
1423 	return ret;
1424 }
1425 
1426 /*
1427  * Wake up any threads in the parent blocked in wait* syscalls.
1428  */
1429 static inline void __wake_up_parent(struct task_struct *p,
1430 				    struct task_struct *parent)
1431 {
1432 	wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1433 }
1434 
1435 /*
1436  * Let a parent know about the death of a child.
1437  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1438  */
1439 
1440 void do_notify_parent(struct task_struct *tsk, int sig)
1441 {
1442 	struct siginfo info;
1443 	unsigned long flags;
1444 	struct sighand_struct *psig;
1445 
1446 	BUG_ON(sig == -1);
1447 
1448  	/* do_notify_parent_cldstop should have been called instead.  */
1449  	BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1450 
1451 	BUG_ON(!tsk->ptrace &&
1452 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1453 
1454 	info.si_signo = sig;
1455 	info.si_errno = 0;
1456 	info.si_pid = tsk->pid;
1457 	info.si_uid = tsk->uid;
1458 
1459 	/* FIXME: find out whether or not this is supposed to be c*time. */
1460 	info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1461 						       tsk->signal->utime));
1462 	info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1463 						       tsk->signal->stime));
1464 
1465 	info.si_status = tsk->exit_code & 0x7f;
1466 	if (tsk->exit_code & 0x80)
1467 		info.si_code = CLD_DUMPED;
1468 	else if (tsk->exit_code & 0x7f)
1469 		info.si_code = CLD_KILLED;
1470 	else {
1471 		info.si_code = CLD_EXITED;
1472 		info.si_status = tsk->exit_code >> 8;
1473 	}
1474 
1475 	psig = tsk->parent->sighand;
1476 	spin_lock_irqsave(&psig->siglock, flags);
1477 	if (!tsk->ptrace && sig == SIGCHLD &&
1478 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1479 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1480 		/*
1481 		 * We are exiting and our parent doesn't care.  POSIX.1
1482 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1483 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1484 		 * automatically and not left for our parent's wait4 call.
1485 		 * Rather than having the parent do it as a magic kind of
1486 		 * signal handler, we just set this to tell do_exit that we
1487 		 * can be cleaned up without becoming a zombie.  Note that
1488 		 * we still call __wake_up_parent in this case, because a
1489 		 * blocked sys_wait4 might now return -ECHILD.
1490 		 *
1491 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1492 		 * is implementation-defined: we do (if you don't want
1493 		 * it, just use SIG_IGN instead).
1494 		 */
1495 		tsk->exit_signal = -1;
1496 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1497 			sig = 0;
1498 	}
1499 	if (valid_signal(sig) && sig > 0)
1500 		__group_send_sig_info(sig, &info, tsk->parent);
1501 	__wake_up_parent(tsk, tsk->parent);
1502 	spin_unlock_irqrestore(&psig->siglock, flags);
1503 }
1504 
1505 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1506 {
1507 	struct siginfo info;
1508 	unsigned long flags;
1509 	struct task_struct *parent;
1510 	struct sighand_struct *sighand;
1511 
1512 	if (tsk->ptrace & PT_PTRACED)
1513 		parent = tsk->parent;
1514 	else {
1515 		tsk = tsk->group_leader;
1516 		parent = tsk->real_parent;
1517 	}
1518 
1519 	info.si_signo = SIGCHLD;
1520 	info.si_errno = 0;
1521 	info.si_pid = tsk->pid;
1522 	info.si_uid = tsk->uid;
1523 
1524 	/* FIXME: find out whether or not this is supposed to be c*time. */
1525 	info.si_utime = cputime_to_jiffies(tsk->utime);
1526 	info.si_stime = cputime_to_jiffies(tsk->stime);
1527 
1528  	info.si_code = why;
1529  	switch (why) {
1530  	case CLD_CONTINUED:
1531  		info.si_status = SIGCONT;
1532  		break;
1533  	case CLD_STOPPED:
1534  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1535  		break;
1536  	case CLD_TRAPPED:
1537  		info.si_status = tsk->exit_code & 0x7f;
1538  		break;
1539  	default:
1540  		BUG();
1541  	}
1542 
1543 	sighand = parent->sighand;
1544 	spin_lock_irqsave(&sighand->siglock, flags);
1545 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1546 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1547 		__group_send_sig_info(SIGCHLD, &info, parent);
1548 	/*
1549 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1550 	 */
1551 	__wake_up_parent(tsk, parent);
1552 	spin_unlock_irqrestore(&sighand->siglock, flags);
1553 }
1554 
1555 static inline int may_ptrace_stop(void)
1556 {
1557 	if (!likely(current->ptrace & PT_PTRACED))
1558 		return 0;
1559 
1560 	if (unlikely(current->parent == current->real_parent &&
1561 		    (current->ptrace & PT_ATTACHED)))
1562 		return 0;
1563 
1564 	/*
1565 	 * Are we in the middle of do_coredump?
1566 	 * If so and our tracer is also part of the coredump stopping
1567 	 * is a deadlock situation, and pointless because our tracer
1568 	 * is dead so don't allow us to stop.
1569 	 * If SIGKILL was already sent before the caller unlocked
1570 	 * ->siglock we must see ->core_waiters != 0. Otherwise it
1571 	 * is safe to enter schedule().
1572 	 */
1573 	if (unlikely(current->mm->core_waiters) &&
1574 	    unlikely(current->mm == current->parent->mm))
1575 		return 0;
1576 
1577 	return 1;
1578 }
1579 
1580 /*
1581  * This must be called with current->sighand->siglock held.
1582  *
1583  * This should be the path for all ptrace stops.
1584  * We always set current->last_siginfo while stopped here.
1585  * That makes it a way to test a stopped process for
1586  * being ptrace-stopped vs being job-control-stopped.
1587  *
1588  * If we actually decide not to stop at all because the tracer is gone,
1589  * we leave nostop_code in current->exit_code.
1590  */
1591 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1592 {
1593 	/*
1594 	 * If there is a group stop in progress,
1595 	 * we must participate in the bookkeeping.
1596 	 */
1597 	if (current->signal->group_stop_count > 0)
1598 		--current->signal->group_stop_count;
1599 
1600 	current->last_siginfo = info;
1601 	current->exit_code = exit_code;
1602 
1603 	/* Let the debugger run.  */
1604 	set_current_state(TASK_TRACED);
1605 	spin_unlock_irq(&current->sighand->siglock);
1606 	try_to_freeze();
1607 	read_lock(&tasklist_lock);
1608 	if (may_ptrace_stop()) {
1609 		do_notify_parent_cldstop(current, CLD_TRAPPED);
1610 		read_unlock(&tasklist_lock);
1611 		schedule();
1612 	} else {
1613 		/*
1614 		 * By the time we got the lock, our tracer went away.
1615 		 * Don't stop here.
1616 		 */
1617 		read_unlock(&tasklist_lock);
1618 		set_current_state(TASK_RUNNING);
1619 		current->exit_code = nostop_code;
1620 	}
1621 
1622 	/*
1623 	 * We are back.  Now reacquire the siglock before touching
1624 	 * last_siginfo, so that we are sure to have synchronized with
1625 	 * any signal-sending on another CPU that wants to examine it.
1626 	 */
1627 	spin_lock_irq(&current->sighand->siglock);
1628 	current->last_siginfo = NULL;
1629 
1630 	/*
1631 	 * Queued signals ignored us while we were stopped for tracing.
1632 	 * So check for any that we should take before resuming user mode.
1633 	 * This sets TIF_SIGPENDING, but never clears it.
1634 	 */
1635 	recalc_sigpending_tsk(current);
1636 }
1637 
1638 void ptrace_notify(int exit_code)
1639 {
1640 	siginfo_t info;
1641 
1642 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1643 
1644 	memset(&info, 0, sizeof info);
1645 	info.si_signo = SIGTRAP;
1646 	info.si_code = exit_code;
1647 	info.si_pid = current->pid;
1648 	info.si_uid = current->uid;
1649 
1650 	/* Let the debugger run.  */
1651 	spin_lock_irq(&current->sighand->siglock);
1652 	ptrace_stop(exit_code, 0, &info);
1653 	spin_unlock_irq(&current->sighand->siglock);
1654 }
1655 
1656 static void
1657 finish_stop(int stop_count)
1658 {
1659 	/*
1660 	 * If there are no other threads in the group, or if there is
1661 	 * a group stop in progress and we are the last to stop,
1662 	 * report to the parent.  When ptraced, every thread reports itself.
1663 	 */
1664 	if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1665 		read_lock(&tasklist_lock);
1666 		do_notify_parent_cldstop(current, CLD_STOPPED);
1667 		read_unlock(&tasklist_lock);
1668 	}
1669 
1670 	do {
1671 		schedule();
1672 	} while (try_to_freeze());
1673 	/*
1674 	 * Now we don't run again until continued.
1675 	 */
1676 	current->exit_code = 0;
1677 }
1678 
1679 /*
1680  * This performs the stopping for SIGSTOP and other stop signals.
1681  * We have to stop all threads in the thread group.
1682  * Returns nonzero if we've actually stopped and released the siglock.
1683  * Returns zero if we didn't stop and still hold the siglock.
1684  */
1685 static int do_signal_stop(int signr)
1686 {
1687 	struct signal_struct *sig = current->signal;
1688 	int stop_count;
1689 
1690 	if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1691 		return 0;
1692 
1693 	if (sig->group_stop_count > 0) {
1694 		/*
1695 		 * There is a group stop in progress.  We don't need to
1696 		 * start another one.
1697 		 */
1698 		stop_count = --sig->group_stop_count;
1699 	} else {
1700 		/*
1701 		 * There is no group stop already in progress.
1702 		 * We must initiate one now.
1703 		 */
1704 		struct task_struct *t;
1705 
1706 		sig->group_exit_code = signr;
1707 
1708 		stop_count = 0;
1709 		for (t = next_thread(current); t != current; t = next_thread(t))
1710 			/*
1711 			 * Setting state to TASK_STOPPED for a group
1712 			 * stop is always done with the siglock held,
1713 			 * so this check has no races.
1714 			 */
1715 			if (!t->exit_state &&
1716 			    !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1717 				stop_count++;
1718 				signal_wake_up(t, 0);
1719 			}
1720 		sig->group_stop_count = stop_count;
1721 	}
1722 
1723 	if (stop_count == 0)
1724 		sig->flags = SIGNAL_STOP_STOPPED;
1725 	current->exit_code = sig->group_exit_code;
1726 	__set_current_state(TASK_STOPPED);
1727 
1728 	spin_unlock_irq(&current->sighand->siglock);
1729 	finish_stop(stop_count);
1730 	return 1;
1731 }
1732 
1733 /*
1734  * Do appropriate magic when group_stop_count > 0.
1735  * We return nonzero if we stopped, after releasing the siglock.
1736  * We return zero if we still hold the siglock and should look
1737  * for another signal without checking group_stop_count again.
1738  */
1739 static int handle_group_stop(void)
1740 {
1741 	int stop_count;
1742 
1743 	if (current->signal->group_exit_task == current) {
1744 		/*
1745 		 * Group stop is so we can do a core dump,
1746 		 * We are the initiating thread, so get on with it.
1747 		 */
1748 		current->signal->group_exit_task = NULL;
1749 		return 0;
1750 	}
1751 
1752 	if (current->signal->flags & SIGNAL_GROUP_EXIT)
1753 		/*
1754 		 * Group stop is so another thread can do a core dump,
1755 		 * or else we are racing against a death signal.
1756 		 * Just punt the stop so we can get the next signal.
1757 		 */
1758 		return 0;
1759 
1760 	/*
1761 	 * There is a group stop in progress.  We stop
1762 	 * without any associated signal being in our queue.
1763 	 */
1764 	stop_count = --current->signal->group_stop_count;
1765 	if (stop_count == 0)
1766 		current->signal->flags = SIGNAL_STOP_STOPPED;
1767 	current->exit_code = current->signal->group_exit_code;
1768 	set_current_state(TASK_STOPPED);
1769 	spin_unlock_irq(&current->sighand->siglock);
1770 	finish_stop(stop_count);
1771 	return 1;
1772 }
1773 
1774 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1775 			  struct pt_regs *regs, void *cookie)
1776 {
1777 	sigset_t *mask = &current->blocked;
1778 	int signr = 0;
1779 
1780 	try_to_freeze();
1781 
1782 relock:
1783 	spin_lock_irq(&current->sighand->siglock);
1784 	for (;;) {
1785 		struct k_sigaction *ka;
1786 
1787 		if (unlikely(current->signal->group_stop_count > 0) &&
1788 		    handle_group_stop())
1789 			goto relock;
1790 
1791 		signr = dequeue_signal(current, mask, info);
1792 
1793 		if (!signr)
1794 			break; /* will return 0 */
1795 
1796 		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1797 			ptrace_signal_deliver(regs, cookie);
1798 
1799 			/* Let the debugger run.  */
1800 			ptrace_stop(signr, signr, info);
1801 
1802 			/* We're back.  Did the debugger cancel the sig?  */
1803 			signr = current->exit_code;
1804 			if (signr == 0)
1805 				continue;
1806 
1807 			current->exit_code = 0;
1808 
1809 			/* Update the siginfo structure if the signal has
1810 			   changed.  If the debugger wanted something
1811 			   specific in the siginfo structure then it should
1812 			   have updated *info via PTRACE_SETSIGINFO.  */
1813 			if (signr != info->si_signo) {
1814 				info->si_signo = signr;
1815 				info->si_errno = 0;
1816 				info->si_code = SI_USER;
1817 				info->si_pid = current->parent->pid;
1818 				info->si_uid = current->parent->uid;
1819 			}
1820 
1821 			/* If the (new) signal is now blocked, requeue it.  */
1822 			if (sigismember(&current->blocked, signr)) {
1823 				specific_send_sig_info(signr, info, current);
1824 				continue;
1825 			}
1826 		}
1827 
1828 		ka = &current->sighand->action[signr-1];
1829 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1830 			continue;
1831 		if (ka->sa.sa_handler != SIG_DFL) {
1832 			/* Run the handler.  */
1833 			*return_ka = *ka;
1834 
1835 			if (ka->sa.sa_flags & SA_ONESHOT)
1836 				ka->sa.sa_handler = SIG_DFL;
1837 
1838 			break; /* will return non-zero "signr" value */
1839 		}
1840 
1841 		/*
1842 		 * Now we are doing the default action for this signal.
1843 		 */
1844 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1845 			continue;
1846 
1847 		/*
1848 		 * Init of a pid space gets no signals it doesn't want from
1849 		 * within that pid space. It can of course get signals from
1850 		 * its parent pid space.
1851 		 */
1852 		if (current == child_reaper(current))
1853 			continue;
1854 
1855 		if (sig_kernel_stop(signr)) {
1856 			/*
1857 			 * The default action is to stop all threads in
1858 			 * the thread group.  The job control signals
1859 			 * do nothing in an orphaned pgrp, but SIGSTOP
1860 			 * always works.  Note that siglock needs to be
1861 			 * dropped during the call to is_orphaned_pgrp()
1862 			 * because of lock ordering with tasklist_lock.
1863 			 * This allows an intervening SIGCONT to be posted.
1864 			 * We need to check for that and bail out if necessary.
1865 			 */
1866 			if (signr != SIGSTOP) {
1867 				spin_unlock_irq(&current->sighand->siglock);
1868 
1869 				/* signals can be posted during this window */
1870 
1871 				if (is_current_pgrp_orphaned())
1872 					goto relock;
1873 
1874 				spin_lock_irq(&current->sighand->siglock);
1875 			}
1876 
1877 			if (likely(do_signal_stop(signr))) {
1878 				/* It released the siglock.  */
1879 				goto relock;
1880 			}
1881 
1882 			/*
1883 			 * We didn't actually stop, due to a race
1884 			 * with SIGCONT or something like that.
1885 			 */
1886 			continue;
1887 		}
1888 
1889 		spin_unlock_irq(&current->sighand->siglock);
1890 
1891 		/*
1892 		 * Anything else is fatal, maybe with a core dump.
1893 		 */
1894 		current->flags |= PF_SIGNALED;
1895 		if ((signr != SIGKILL) && print_fatal_signals)
1896 			print_fatal_signal(regs, signr);
1897 		if (sig_kernel_coredump(signr)) {
1898 			/*
1899 			 * If it was able to dump core, this kills all
1900 			 * other threads in the group and synchronizes with
1901 			 * their demise.  If we lost the race with another
1902 			 * thread getting here, it set group_exit_code
1903 			 * first and our do_group_exit call below will use
1904 			 * that value and ignore the one we pass it.
1905 			 */
1906 			do_coredump((long)signr, signr, regs);
1907 		}
1908 
1909 		/*
1910 		 * Death signals, no core dump.
1911 		 */
1912 		do_group_exit(signr);
1913 		/* NOTREACHED */
1914 	}
1915 	spin_unlock_irq(&current->sighand->siglock);
1916 	return signr;
1917 }
1918 
1919 EXPORT_SYMBOL(recalc_sigpending);
1920 EXPORT_SYMBOL_GPL(dequeue_signal);
1921 EXPORT_SYMBOL(flush_signals);
1922 EXPORT_SYMBOL(force_sig);
1923 EXPORT_SYMBOL(kill_proc);
1924 EXPORT_SYMBOL(ptrace_notify);
1925 EXPORT_SYMBOL(send_sig);
1926 EXPORT_SYMBOL(send_sig_info);
1927 EXPORT_SYMBOL(sigprocmask);
1928 EXPORT_SYMBOL(block_all_signals);
1929 EXPORT_SYMBOL(unblock_all_signals);
1930 
1931 
1932 /*
1933  * System call entry points.
1934  */
1935 
1936 asmlinkage long sys_restart_syscall(void)
1937 {
1938 	struct restart_block *restart = &current_thread_info()->restart_block;
1939 	return restart->fn(restart);
1940 }
1941 
1942 long do_no_restart_syscall(struct restart_block *param)
1943 {
1944 	return -EINTR;
1945 }
1946 
1947 /*
1948  * We don't need to get the kernel lock - this is all local to this
1949  * particular thread.. (and that's good, because this is _heavily_
1950  * used by various programs)
1951  */
1952 
1953 /*
1954  * This is also useful for kernel threads that want to temporarily
1955  * (or permanently) block certain signals.
1956  *
1957  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1958  * interface happily blocks "unblockable" signals like SIGKILL
1959  * and friends.
1960  */
1961 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1962 {
1963 	int error;
1964 
1965 	spin_lock_irq(&current->sighand->siglock);
1966 	if (oldset)
1967 		*oldset = current->blocked;
1968 
1969 	error = 0;
1970 	switch (how) {
1971 	case SIG_BLOCK:
1972 		sigorsets(&current->blocked, &current->blocked, set);
1973 		break;
1974 	case SIG_UNBLOCK:
1975 		signandsets(&current->blocked, &current->blocked, set);
1976 		break;
1977 	case SIG_SETMASK:
1978 		current->blocked = *set;
1979 		break;
1980 	default:
1981 		error = -EINVAL;
1982 	}
1983 	recalc_sigpending();
1984 	spin_unlock_irq(&current->sighand->siglock);
1985 
1986 	return error;
1987 }
1988 
1989 asmlinkage long
1990 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1991 {
1992 	int error = -EINVAL;
1993 	sigset_t old_set, new_set;
1994 
1995 	/* XXX: Don't preclude handling different sized sigset_t's.  */
1996 	if (sigsetsize != sizeof(sigset_t))
1997 		goto out;
1998 
1999 	if (set) {
2000 		error = -EFAULT;
2001 		if (copy_from_user(&new_set, set, sizeof(*set)))
2002 			goto out;
2003 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2004 
2005 		error = sigprocmask(how, &new_set, &old_set);
2006 		if (error)
2007 			goto out;
2008 		if (oset)
2009 			goto set_old;
2010 	} else if (oset) {
2011 		spin_lock_irq(&current->sighand->siglock);
2012 		old_set = current->blocked;
2013 		spin_unlock_irq(&current->sighand->siglock);
2014 
2015 	set_old:
2016 		error = -EFAULT;
2017 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2018 			goto out;
2019 	}
2020 	error = 0;
2021 out:
2022 	return error;
2023 }
2024 
2025 long do_sigpending(void __user *set, unsigned long sigsetsize)
2026 {
2027 	long error = -EINVAL;
2028 	sigset_t pending;
2029 
2030 	if (sigsetsize > sizeof(sigset_t))
2031 		goto out;
2032 
2033 	spin_lock_irq(&current->sighand->siglock);
2034 	sigorsets(&pending, &current->pending.signal,
2035 		  &current->signal->shared_pending.signal);
2036 	spin_unlock_irq(&current->sighand->siglock);
2037 
2038 	/* Outside the lock because only this thread touches it.  */
2039 	sigandsets(&pending, &current->blocked, &pending);
2040 
2041 	error = -EFAULT;
2042 	if (!copy_to_user(set, &pending, sigsetsize))
2043 		error = 0;
2044 
2045 out:
2046 	return error;
2047 }
2048 
2049 asmlinkage long
2050 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2051 {
2052 	return do_sigpending(set, sigsetsize);
2053 }
2054 
2055 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2056 
2057 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2058 {
2059 	int err;
2060 
2061 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2062 		return -EFAULT;
2063 	if (from->si_code < 0)
2064 		return __copy_to_user(to, from, sizeof(siginfo_t))
2065 			? -EFAULT : 0;
2066 	/*
2067 	 * If you change siginfo_t structure, please be sure
2068 	 * this code is fixed accordingly.
2069 	 * Please remember to update the signalfd_copyinfo() function
2070 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2071 	 * It should never copy any pad contained in the structure
2072 	 * to avoid security leaks, but must copy the generic
2073 	 * 3 ints plus the relevant union member.
2074 	 */
2075 	err = __put_user(from->si_signo, &to->si_signo);
2076 	err |= __put_user(from->si_errno, &to->si_errno);
2077 	err |= __put_user((short)from->si_code, &to->si_code);
2078 	switch (from->si_code & __SI_MASK) {
2079 	case __SI_KILL:
2080 		err |= __put_user(from->si_pid, &to->si_pid);
2081 		err |= __put_user(from->si_uid, &to->si_uid);
2082 		break;
2083 	case __SI_TIMER:
2084 		 err |= __put_user(from->si_tid, &to->si_tid);
2085 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2086 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2087 		break;
2088 	case __SI_POLL:
2089 		err |= __put_user(from->si_band, &to->si_band);
2090 		err |= __put_user(from->si_fd, &to->si_fd);
2091 		break;
2092 	case __SI_FAULT:
2093 		err |= __put_user(from->si_addr, &to->si_addr);
2094 #ifdef __ARCH_SI_TRAPNO
2095 		err |= __put_user(from->si_trapno, &to->si_trapno);
2096 #endif
2097 		break;
2098 	case __SI_CHLD:
2099 		err |= __put_user(from->si_pid, &to->si_pid);
2100 		err |= __put_user(from->si_uid, &to->si_uid);
2101 		err |= __put_user(from->si_status, &to->si_status);
2102 		err |= __put_user(from->si_utime, &to->si_utime);
2103 		err |= __put_user(from->si_stime, &to->si_stime);
2104 		break;
2105 	case __SI_RT: /* This is not generated by the kernel as of now. */
2106 	case __SI_MESGQ: /* But this is */
2107 		err |= __put_user(from->si_pid, &to->si_pid);
2108 		err |= __put_user(from->si_uid, &to->si_uid);
2109 		err |= __put_user(from->si_ptr, &to->si_ptr);
2110 		break;
2111 	default: /* this is just in case for now ... */
2112 		err |= __put_user(from->si_pid, &to->si_pid);
2113 		err |= __put_user(from->si_uid, &to->si_uid);
2114 		break;
2115 	}
2116 	return err;
2117 }
2118 
2119 #endif
2120 
2121 asmlinkage long
2122 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2123 		    siginfo_t __user *uinfo,
2124 		    const struct timespec __user *uts,
2125 		    size_t sigsetsize)
2126 {
2127 	int ret, sig;
2128 	sigset_t these;
2129 	struct timespec ts;
2130 	siginfo_t info;
2131 	long timeout = 0;
2132 
2133 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2134 	if (sigsetsize != sizeof(sigset_t))
2135 		return -EINVAL;
2136 
2137 	if (copy_from_user(&these, uthese, sizeof(these)))
2138 		return -EFAULT;
2139 
2140 	/*
2141 	 * Invert the set of allowed signals to get those we
2142 	 * want to block.
2143 	 */
2144 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2145 	signotset(&these);
2146 
2147 	if (uts) {
2148 		if (copy_from_user(&ts, uts, sizeof(ts)))
2149 			return -EFAULT;
2150 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2151 		    || ts.tv_sec < 0)
2152 			return -EINVAL;
2153 	}
2154 
2155 	spin_lock_irq(&current->sighand->siglock);
2156 	sig = dequeue_signal(current, &these, &info);
2157 	if (!sig) {
2158 		timeout = MAX_SCHEDULE_TIMEOUT;
2159 		if (uts)
2160 			timeout = (timespec_to_jiffies(&ts)
2161 				   + (ts.tv_sec || ts.tv_nsec));
2162 
2163 		if (timeout) {
2164 			/* None ready -- temporarily unblock those we're
2165 			 * interested while we are sleeping in so that we'll
2166 			 * be awakened when they arrive.  */
2167 			current->real_blocked = current->blocked;
2168 			sigandsets(&current->blocked, &current->blocked, &these);
2169 			recalc_sigpending();
2170 			spin_unlock_irq(&current->sighand->siglock);
2171 
2172 			timeout = schedule_timeout_interruptible(timeout);
2173 
2174 			spin_lock_irq(&current->sighand->siglock);
2175 			sig = dequeue_signal(current, &these, &info);
2176 			current->blocked = current->real_blocked;
2177 			siginitset(&current->real_blocked, 0);
2178 			recalc_sigpending();
2179 		}
2180 	}
2181 	spin_unlock_irq(&current->sighand->siglock);
2182 
2183 	if (sig) {
2184 		ret = sig;
2185 		if (uinfo) {
2186 			if (copy_siginfo_to_user(uinfo, &info))
2187 				ret = -EFAULT;
2188 		}
2189 	} else {
2190 		ret = -EAGAIN;
2191 		if (timeout)
2192 			ret = -EINTR;
2193 	}
2194 
2195 	return ret;
2196 }
2197 
2198 asmlinkage long
2199 sys_kill(int pid, int sig)
2200 {
2201 	struct siginfo info;
2202 
2203 	info.si_signo = sig;
2204 	info.si_errno = 0;
2205 	info.si_code = SI_USER;
2206 	info.si_pid = current->tgid;
2207 	info.si_uid = current->uid;
2208 
2209 	return kill_something_info(sig, &info, pid);
2210 }
2211 
2212 static int do_tkill(int tgid, int pid, int sig)
2213 {
2214 	int error;
2215 	struct siginfo info;
2216 	struct task_struct *p;
2217 
2218 	error = -ESRCH;
2219 	info.si_signo = sig;
2220 	info.si_errno = 0;
2221 	info.si_code = SI_TKILL;
2222 	info.si_pid = current->tgid;
2223 	info.si_uid = current->uid;
2224 
2225 	read_lock(&tasklist_lock);
2226 	p = find_task_by_pid(pid);
2227 	if (p && (tgid <= 0 || p->tgid == tgid)) {
2228 		error = check_kill_permission(sig, &info, p);
2229 		/*
2230 		 * The null signal is a permissions and process existence
2231 		 * probe.  No signal is actually delivered.
2232 		 */
2233 		if (!error && sig && p->sighand) {
2234 			spin_lock_irq(&p->sighand->siglock);
2235 			handle_stop_signal(sig, p);
2236 			error = specific_send_sig_info(sig, &info, p);
2237 			spin_unlock_irq(&p->sighand->siglock);
2238 		}
2239 	}
2240 	read_unlock(&tasklist_lock);
2241 
2242 	return error;
2243 }
2244 
2245 /**
2246  *  sys_tgkill - send signal to one specific thread
2247  *  @tgid: the thread group ID of the thread
2248  *  @pid: the PID of the thread
2249  *  @sig: signal to be sent
2250  *
2251  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2252  *  exists but it's not belonging to the target process anymore. This
2253  *  method solves the problem of threads exiting and PIDs getting reused.
2254  */
2255 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2256 {
2257 	/* This is only valid for single tasks */
2258 	if (pid <= 0 || tgid <= 0)
2259 		return -EINVAL;
2260 
2261 	return do_tkill(tgid, pid, sig);
2262 }
2263 
2264 /*
2265  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2266  */
2267 asmlinkage long
2268 sys_tkill(int pid, int sig)
2269 {
2270 	/* This is only valid for single tasks */
2271 	if (pid <= 0)
2272 		return -EINVAL;
2273 
2274 	return do_tkill(0, pid, sig);
2275 }
2276 
2277 asmlinkage long
2278 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2279 {
2280 	siginfo_t info;
2281 
2282 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2283 		return -EFAULT;
2284 
2285 	/* Not even root can pretend to send signals from the kernel.
2286 	   Nor can they impersonate a kill(), which adds source info.  */
2287 	if (info.si_code >= 0)
2288 		return -EPERM;
2289 	info.si_signo = sig;
2290 
2291 	/* POSIX.1b doesn't mention process groups.  */
2292 	return kill_proc_info(sig, &info, pid);
2293 }
2294 
2295 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2296 {
2297 	struct k_sigaction *k;
2298 	sigset_t mask;
2299 
2300 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2301 		return -EINVAL;
2302 
2303 	k = &current->sighand->action[sig-1];
2304 
2305 	spin_lock_irq(&current->sighand->siglock);
2306 	if (signal_pending(current)) {
2307 		/*
2308 		 * If there might be a fatal signal pending on multiple
2309 		 * threads, make sure we take it before changing the action.
2310 		 */
2311 		spin_unlock_irq(&current->sighand->siglock);
2312 		return -ERESTARTNOINTR;
2313 	}
2314 
2315 	if (oact)
2316 		*oact = *k;
2317 
2318 	if (act) {
2319 		sigdelsetmask(&act->sa.sa_mask,
2320 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2321 		*k = *act;
2322 		/*
2323 		 * POSIX 3.3.1.3:
2324 		 *  "Setting a signal action to SIG_IGN for a signal that is
2325 		 *   pending shall cause the pending signal to be discarded,
2326 		 *   whether or not it is blocked."
2327 		 *
2328 		 *  "Setting a signal action to SIG_DFL for a signal that is
2329 		 *   pending and whose default action is to ignore the signal
2330 		 *   (for example, SIGCHLD), shall cause the pending signal to
2331 		 *   be discarded, whether or not it is blocked"
2332 		 */
2333 		if (act->sa.sa_handler == SIG_IGN ||
2334 		   (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2335 			struct task_struct *t = current;
2336 			sigemptyset(&mask);
2337 			sigaddset(&mask, sig);
2338 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2339 			do {
2340 				rm_from_queue_full(&mask, &t->pending);
2341 				recalc_sigpending_and_wake(t);
2342 				t = next_thread(t);
2343 			} while (t != current);
2344 		}
2345 	}
2346 
2347 	spin_unlock_irq(&current->sighand->siglock);
2348 	return 0;
2349 }
2350 
2351 int
2352 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2353 {
2354 	stack_t oss;
2355 	int error;
2356 
2357 	if (uoss) {
2358 		oss.ss_sp = (void __user *) current->sas_ss_sp;
2359 		oss.ss_size = current->sas_ss_size;
2360 		oss.ss_flags = sas_ss_flags(sp);
2361 	}
2362 
2363 	if (uss) {
2364 		void __user *ss_sp;
2365 		size_t ss_size;
2366 		int ss_flags;
2367 
2368 		error = -EFAULT;
2369 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2370 		    || __get_user(ss_sp, &uss->ss_sp)
2371 		    || __get_user(ss_flags, &uss->ss_flags)
2372 		    || __get_user(ss_size, &uss->ss_size))
2373 			goto out;
2374 
2375 		error = -EPERM;
2376 		if (on_sig_stack(sp))
2377 			goto out;
2378 
2379 		error = -EINVAL;
2380 		/*
2381 		 *
2382 		 * Note - this code used to test ss_flags incorrectly
2383 		 *  	  old code may have been written using ss_flags==0
2384 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2385 		 *	  way that worked) - this fix preserves that older
2386 		 *	  mechanism
2387 		 */
2388 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2389 			goto out;
2390 
2391 		if (ss_flags == SS_DISABLE) {
2392 			ss_size = 0;
2393 			ss_sp = NULL;
2394 		} else {
2395 			error = -ENOMEM;
2396 			if (ss_size < MINSIGSTKSZ)
2397 				goto out;
2398 		}
2399 
2400 		current->sas_ss_sp = (unsigned long) ss_sp;
2401 		current->sas_ss_size = ss_size;
2402 	}
2403 
2404 	if (uoss) {
2405 		error = -EFAULT;
2406 		if (copy_to_user(uoss, &oss, sizeof(oss)))
2407 			goto out;
2408 	}
2409 
2410 	error = 0;
2411 out:
2412 	return error;
2413 }
2414 
2415 #ifdef __ARCH_WANT_SYS_SIGPENDING
2416 
2417 asmlinkage long
2418 sys_sigpending(old_sigset_t __user *set)
2419 {
2420 	return do_sigpending(set, sizeof(*set));
2421 }
2422 
2423 #endif
2424 
2425 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2426 /* Some platforms have their own version with special arguments others
2427    support only sys_rt_sigprocmask.  */
2428 
2429 asmlinkage long
2430 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2431 {
2432 	int error;
2433 	old_sigset_t old_set, new_set;
2434 
2435 	if (set) {
2436 		error = -EFAULT;
2437 		if (copy_from_user(&new_set, set, sizeof(*set)))
2438 			goto out;
2439 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2440 
2441 		spin_lock_irq(&current->sighand->siglock);
2442 		old_set = current->blocked.sig[0];
2443 
2444 		error = 0;
2445 		switch (how) {
2446 		default:
2447 			error = -EINVAL;
2448 			break;
2449 		case SIG_BLOCK:
2450 			sigaddsetmask(&current->blocked, new_set);
2451 			break;
2452 		case SIG_UNBLOCK:
2453 			sigdelsetmask(&current->blocked, new_set);
2454 			break;
2455 		case SIG_SETMASK:
2456 			current->blocked.sig[0] = new_set;
2457 			break;
2458 		}
2459 
2460 		recalc_sigpending();
2461 		spin_unlock_irq(&current->sighand->siglock);
2462 		if (error)
2463 			goto out;
2464 		if (oset)
2465 			goto set_old;
2466 	} else if (oset) {
2467 		old_set = current->blocked.sig[0];
2468 	set_old:
2469 		error = -EFAULT;
2470 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2471 			goto out;
2472 	}
2473 	error = 0;
2474 out:
2475 	return error;
2476 }
2477 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2478 
2479 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2480 asmlinkage long
2481 sys_rt_sigaction(int sig,
2482 		 const struct sigaction __user *act,
2483 		 struct sigaction __user *oact,
2484 		 size_t sigsetsize)
2485 {
2486 	struct k_sigaction new_sa, old_sa;
2487 	int ret = -EINVAL;
2488 
2489 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2490 	if (sigsetsize != sizeof(sigset_t))
2491 		goto out;
2492 
2493 	if (act) {
2494 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2495 			return -EFAULT;
2496 	}
2497 
2498 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2499 
2500 	if (!ret && oact) {
2501 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2502 			return -EFAULT;
2503 	}
2504 out:
2505 	return ret;
2506 }
2507 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2508 
2509 #ifdef __ARCH_WANT_SYS_SGETMASK
2510 
2511 /*
2512  * For backwards compatibility.  Functionality superseded by sigprocmask.
2513  */
2514 asmlinkage long
2515 sys_sgetmask(void)
2516 {
2517 	/* SMP safe */
2518 	return current->blocked.sig[0];
2519 }
2520 
2521 asmlinkage long
2522 sys_ssetmask(int newmask)
2523 {
2524 	int old;
2525 
2526 	spin_lock_irq(&current->sighand->siglock);
2527 	old = current->blocked.sig[0];
2528 
2529 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2530 						  sigmask(SIGSTOP)));
2531 	recalc_sigpending();
2532 	spin_unlock_irq(&current->sighand->siglock);
2533 
2534 	return old;
2535 }
2536 #endif /* __ARCH_WANT_SGETMASK */
2537 
2538 #ifdef __ARCH_WANT_SYS_SIGNAL
2539 /*
2540  * For backwards compatibility.  Functionality superseded by sigaction.
2541  */
2542 asmlinkage unsigned long
2543 sys_signal(int sig, __sighandler_t handler)
2544 {
2545 	struct k_sigaction new_sa, old_sa;
2546 	int ret;
2547 
2548 	new_sa.sa.sa_handler = handler;
2549 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2550 	sigemptyset(&new_sa.sa.sa_mask);
2551 
2552 	ret = do_sigaction(sig, &new_sa, &old_sa);
2553 
2554 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2555 }
2556 #endif /* __ARCH_WANT_SYS_SIGNAL */
2557 
2558 #ifdef __ARCH_WANT_SYS_PAUSE
2559 
2560 asmlinkage long
2561 sys_pause(void)
2562 {
2563 	current->state = TASK_INTERRUPTIBLE;
2564 	schedule();
2565 	return -ERESTARTNOHAND;
2566 }
2567 
2568 #endif
2569 
2570 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2571 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2572 {
2573 	sigset_t newset;
2574 
2575 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2576 	if (sigsetsize != sizeof(sigset_t))
2577 		return -EINVAL;
2578 
2579 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2580 		return -EFAULT;
2581 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2582 
2583 	spin_lock_irq(&current->sighand->siglock);
2584 	current->saved_sigmask = current->blocked;
2585 	current->blocked = newset;
2586 	recalc_sigpending();
2587 	spin_unlock_irq(&current->sighand->siglock);
2588 
2589 	current->state = TASK_INTERRUPTIBLE;
2590 	schedule();
2591 	set_thread_flag(TIF_RESTORE_SIGMASK);
2592 	return -ERESTARTNOHAND;
2593 }
2594 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2595 
2596 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2597 {
2598 	return NULL;
2599 }
2600 
2601 void __init signals_init(void)
2602 {
2603 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2604 }
2605