xref: /linux/kernel/signal.c (revision 5e8d780d745c1619aba81fe7166c5a4b5cad2b84)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/fs.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/signal.h>
26 #include <linux/capability.h>
27 #include <asm/param.h>
28 #include <asm/uaccess.h>
29 #include <asm/unistd.h>
30 #include <asm/siginfo.h>
31 #include "audit.h"	/* audit_signal_info() */
32 
33 /*
34  * SLAB caches for signal bits.
35  */
36 
37 static kmem_cache_t *sigqueue_cachep;
38 
39 /*
40  * In POSIX a signal is sent either to a specific thread (Linux task)
41  * or to the process as a whole (Linux thread group).  How the signal
42  * is sent determines whether it's to one thread or the whole group,
43  * which determines which signal mask(s) are involved in blocking it
44  * from being delivered until later.  When the signal is delivered,
45  * either it's caught or ignored by a user handler or it has a default
46  * effect that applies to the whole thread group (POSIX process).
47  *
48  * The possible effects an unblocked signal set to SIG_DFL can have are:
49  *   ignore	- Nothing Happens
50  *   terminate	- kill the process, i.e. all threads in the group,
51  * 		  similar to exit_group.  The group leader (only) reports
52  *		  WIFSIGNALED status to its parent.
53  *   coredump	- write a core dump file describing all threads using
54  *		  the same mm and then kill all those threads
55  *   stop 	- stop all the threads in the group, i.e. TASK_STOPPED state
56  *
57  * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
58  * Other signals when not blocked and set to SIG_DFL behaves as follows.
59  * The job control signals also have other special effects.
60  *
61  *	+--------------------+------------------+
62  *	|  POSIX signal      |  default action  |
63  *	+--------------------+------------------+
64  *	|  SIGHUP            |  terminate	|
65  *	|  SIGINT            |	terminate	|
66  *	|  SIGQUIT           |	coredump 	|
67  *	|  SIGILL            |	coredump 	|
68  *	|  SIGTRAP           |	coredump 	|
69  *	|  SIGABRT/SIGIOT    |	coredump 	|
70  *	|  SIGBUS            |	coredump 	|
71  *	|  SIGFPE            |	coredump 	|
72  *	|  SIGKILL           |	terminate(+)	|
73  *	|  SIGUSR1           |	terminate	|
74  *	|  SIGSEGV           |	coredump 	|
75  *	|  SIGUSR2           |	terminate	|
76  *	|  SIGPIPE           |	terminate	|
77  *	|  SIGALRM           |	terminate	|
78  *	|  SIGTERM           |	terminate	|
79  *	|  SIGCHLD           |	ignore   	|
80  *	|  SIGCONT           |	ignore(*)	|
81  *	|  SIGSTOP           |	stop(*)(+)  	|
82  *	|  SIGTSTP           |	stop(*)  	|
83  *	|  SIGTTIN           |	stop(*)  	|
84  *	|  SIGTTOU           |	stop(*)  	|
85  *	|  SIGURG            |	ignore   	|
86  *	|  SIGXCPU           |	coredump 	|
87  *	|  SIGXFSZ           |	coredump 	|
88  *	|  SIGVTALRM         |	terminate	|
89  *	|  SIGPROF           |	terminate	|
90  *	|  SIGPOLL/SIGIO     |	terminate	|
91  *	|  SIGSYS/SIGUNUSED  |	coredump 	|
92  *	|  SIGSTKFLT         |	terminate	|
93  *	|  SIGWINCH          |	ignore   	|
94  *	|  SIGPWR            |	terminate	|
95  *	|  SIGRTMIN-SIGRTMAX |	terminate       |
96  *	+--------------------+------------------+
97  *	|  non-POSIX signal  |  default action  |
98  *	+--------------------+------------------+
99  *	|  SIGEMT            |  coredump	|
100  *	+--------------------+------------------+
101  *
102  * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
103  * (*) Special job control effects:
104  * When SIGCONT is sent, it resumes the process (all threads in the group)
105  * from TASK_STOPPED state and also clears any pending/queued stop signals
106  * (any of those marked with "stop(*)").  This happens regardless of blocking,
107  * catching, or ignoring SIGCONT.  When any stop signal is sent, it clears
108  * any pending/queued SIGCONT signals; this happens regardless of blocking,
109  * catching, or ignored the stop signal, though (except for SIGSTOP) the
110  * default action of stopping the process may happen later or never.
111  */
112 
113 #ifdef SIGEMT
114 #define M_SIGEMT	M(SIGEMT)
115 #else
116 #define M_SIGEMT	0
117 #endif
118 
119 #if SIGRTMIN > BITS_PER_LONG
120 #define M(sig) (1ULL << ((sig)-1))
121 #else
122 #define M(sig) (1UL << ((sig)-1))
123 #endif
124 #define T(sig, mask) (M(sig) & (mask))
125 
126 #define SIG_KERNEL_ONLY_MASK (\
127 	M(SIGKILL)   |  M(SIGSTOP)                                   )
128 
129 #define SIG_KERNEL_STOP_MASK (\
130 	M(SIGSTOP)   |  M(SIGTSTP)   |  M(SIGTTIN)   |  M(SIGTTOU)   )
131 
132 #define SIG_KERNEL_COREDUMP_MASK (\
133         M(SIGQUIT)   |  M(SIGILL)    |  M(SIGTRAP)   |  M(SIGABRT)   | \
134         M(SIGFPE)    |  M(SIGSEGV)   |  M(SIGBUS)    |  M(SIGSYS)    | \
135         M(SIGXCPU)   |  M(SIGXFSZ)   |  M_SIGEMT                     )
136 
137 #define SIG_KERNEL_IGNORE_MASK (\
138         M(SIGCONT)   |  M(SIGCHLD)   |  M(SIGWINCH)  |  M(SIGURG)    )
139 
140 #define sig_kernel_only(sig) \
141 		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_ONLY_MASK))
142 #define sig_kernel_coredump(sig) \
143 		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_COREDUMP_MASK))
144 #define sig_kernel_ignore(sig) \
145 		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_IGNORE_MASK))
146 #define sig_kernel_stop(sig) \
147 		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))
148 
149 #define sig_needs_tasklist(sig)	((sig) == SIGCONT)
150 
151 #define sig_user_defined(t, signr) \
152 	(((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) &&	\
153 	 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
154 
155 #define sig_fatal(t, signr) \
156 	(!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
157 	 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
158 
159 static int sig_ignored(struct task_struct *t, int sig)
160 {
161 	void __user * handler;
162 
163 	/*
164 	 * Tracers always want to know about signals..
165 	 */
166 	if (t->ptrace & PT_PTRACED)
167 		return 0;
168 
169 	/*
170 	 * Blocked signals are never ignored, since the
171 	 * signal handler may change by the time it is
172 	 * unblocked.
173 	 */
174 	if (sigismember(&t->blocked, sig))
175 		return 0;
176 
177 	/* Is it explicitly or implicitly ignored? */
178 	handler = t->sighand->action[sig-1].sa.sa_handler;
179 	return   handler == SIG_IGN ||
180 		(handler == SIG_DFL && sig_kernel_ignore(sig));
181 }
182 
183 /*
184  * Re-calculate pending state from the set of locally pending
185  * signals, globally pending signals, and blocked signals.
186  */
187 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
188 {
189 	unsigned long ready;
190 	long i;
191 
192 	switch (_NSIG_WORDS) {
193 	default:
194 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
195 			ready |= signal->sig[i] &~ blocked->sig[i];
196 		break;
197 
198 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
199 		ready |= signal->sig[2] &~ blocked->sig[2];
200 		ready |= signal->sig[1] &~ blocked->sig[1];
201 		ready |= signal->sig[0] &~ blocked->sig[0];
202 		break;
203 
204 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
205 		ready |= signal->sig[0] &~ blocked->sig[0];
206 		break;
207 
208 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
209 	}
210 	return ready !=	0;
211 }
212 
213 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
214 
215 fastcall void recalc_sigpending_tsk(struct task_struct *t)
216 {
217 	if (t->signal->group_stop_count > 0 ||
218 	    (freezing(t)) ||
219 	    PENDING(&t->pending, &t->blocked) ||
220 	    PENDING(&t->signal->shared_pending, &t->blocked))
221 		set_tsk_thread_flag(t, TIF_SIGPENDING);
222 	else
223 		clear_tsk_thread_flag(t, TIF_SIGPENDING);
224 }
225 
226 void recalc_sigpending(void)
227 {
228 	recalc_sigpending_tsk(current);
229 }
230 
231 /* Given the mask, find the first available signal that should be serviced. */
232 
233 static int
234 next_signal(struct sigpending *pending, sigset_t *mask)
235 {
236 	unsigned long i, *s, *m, x;
237 	int sig = 0;
238 
239 	s = pending->signal.sig;
240 	m = mask->sig;
241 	switch (_NSIG_WORDS) {
242 	default:
243 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
244 			if ((x = *s &~ *m) != 0) {
245 				sig = ffz(~x) + i*_NSIG_BPW + 1;
246 				break;
247 			}
248 		break;
249 
250 	case 2: if ((x = s[0] &~ m[0]) != 0)
251 			sig = 1;
252 		else if ((x = s[1] &~ m[1]) != 0)
253 			sig = _NSIG_BPW + 1;
254 		else
255 			break;
256 		sig += ffz(~x);
257 		break;
258 
259 	case 1: if ((x = *s &~ *m) != 0)
260 			sig = ffz(~x) + 1;
261 		break;
262 	}
263 
264 	return sig;
265 }
266 
267 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
268 					 int override_rlimit)
269 {
270 	struct sigqueue *q = NULL;
271 
272 	atomic_inc(&t->user->sigpending);
273 	if (override_rlimit ||
274 	    atomic_read(&t->user->sigpending) <=
275 			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
276 		q = kmem_cache_alloc(sigqueue_cachep, flags);
277 	if (unlikely(q == NULL)) {
278 		atomic_dec(&t->user->sigpending);
279 	} else {
280 		INIT_LIST_HEAD(&q->list);
281 		q->flags = 0;
282 		q->user = get_uid(t->user);
283 	}
284 	return(q);
285 }
286 
287 static void __sigqueue_free(struct sigqueue *q)
288 {
289 	if (q->flags & SIGQUEUE_PREALLOC)
290 		return;
291 	atomic_dec(&q->user->sigpending);
292 	free_uid(q->user);
293 	kmem_cache_free(sigqueue_cachep, q);
294 }
295 
296 void flush_sigqueue(struct sigpending *queue)
297 {
298 	struct sigqueue *q;
299 
300 	sigemptyset(&queue->signal);
301 	while (!list_empty(&queue->list)) {
302 		q = list_entry(queue->list.next, struct sigqueue , list);
303 		list_del_init(&q->list);
304 		__sigqueue_free(q);
305 	}
306 }
307 
308 /*
309  * Flush all pending signals for a task.
310  */
311 void flush_signals(struct task_struct *t)
312 {
313 	unsigned long flags;
314 
315 	spin_lock_irqsave(&t->sighand->siglock, flags);
316 	clear_tsk_thread_flag(t,TIF_SIGPENDING);
317 	flush_sigqueue(&t->pending);
318 	flush_sigqueue(&t->signal->shared_pending);
319 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
320 }
321 
322 /*
323  * Flush all handlers for a task.
324  */
325 
326 void
327 flush_signal_handlers(struct task_struct *t, int force_default)
328 {
329 	int i;
330 	struct k_sigaction *ka = &t->sighand->action[0];
331 	for (i = _NSIG ; i != 0 ; i--) {
332 		if (force_default || ka->sa.sa_handler != SIG_IGN)
333 			ka->sa.sa_handler = SIG_DFL;
334 		ka->sa.sa_flags = 0;
335 		sigemptyset(&ka->sa.sa_mask);
336 		ka++;
337 	}
338 }
339 
340 
341 /* Notify the system that a driver wants to block all signals for this
342  * process, and wants to be notified if any signals at all were to be
343  * sent/acted upon.  If the notifier routine returns non-zero, then the
344  * signal will be acted upon after all.  If the notifier routine returns 0,
345  * then then signal will be blocked.  Only one block per process is
346  * allowed.  priv is a pointer to private data that the notifier routine
347  * can use to determine if the signal should be blocked or not.  */
348 
349 void
350 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
351 {
352 	unsigned long flags;
353 
354 	spin_lock_irqsave(&current->sighand->siglock, flags);
355 	current->notifier_mask = mask;
356 	current->notifier_data = priv;
357 	current->notifier = notifier;
358 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
359 }
360 
361 /* Notify the system that blocking has ended. */
362 
363 void
364 unblock_all_signals(void)
365 {
366 	unsigned long flags;
367 
368 	spin_lock_irqsave(&current->sighand->siglock, flags);
369 	current->notifier = NULL;
370 	current->notifier_data = NULL;
371 	recalc_sigpending();
372 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
373 }
374 
375 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
376 {
377 	struct sigqueue *q, *first = NULL;
378 	int still_pending = 0;
379 
380 	if (unlikely(!sigismember(&list->signal, sig)))
381 		return 0;
382 
383 	/*
384 	 * Collect the siginfo appropriate to this signal.  Check if
385 	 * there is another siginfo for the same signal.
386 	*/
387 	list_for_each_entry(q, &list->list, list) {
388 		if (q->info.si_signo == sig) {
389 			if (first) {
390 				still_pending = 1;
391 				break;
392 			}
393 			first = q;
394 		}
395 	}
396 	if (first) {
397 		list_del_init(&first->list);
398 		copy_siginfo(info, &first->info);
399 		__sigqueue_free(first);
400 		if (!still_pending)
401 			sigdelset(&list->signal, sig);
402 	} else {
403 
404 		/* Ok, it wasn't in the queue.  This must be
405 		   a fast-pathed signal or we must have been
406 		   out of queue space.  So zero out the info.
407 		 */
408 		sigdelset(&list->signal, sig);
409 		info->si_signo = sig;
410 		info->si_errno = 0;
411 		info->si_code = 0;
412 		info->si_pid = 0;
413 		info->si_uid = 0;
414 	}
415 	return 1;
416 }
417 
418 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
419 			siginfo_t *info)
420 {
421 	int sig = 0;
422 
423 	sig = next_signal(pending, mask);
424 	if (sig) {
425 		if (current->notifier) {
426 			if (sigismember(current->notifier_mask, sig)) {
427 				if (!(current->notifier)(current->notifier_data)) {
428 					clear_thread_flag(TIF_SIGPENDING);
429 					return 0;
430 				}
431 			}
432 		}
433 
434 		if (!collect_signal(sig, pending, info))
435 			sig = 0;
436 
437 	}
438 	recalc_sigpending();
439 
440 	return sig;
441 }
442 
443 /*
444  * Dequeue a signal and return the element to the caller, which is
445  * expected to free it.
446  *
447  * All callers have to hold the siglock.
448  */
449 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
450 {
451 	int signr = __dequeue_signal(&tsk->pending, mask, info);
452 	if (!signr)
453 		signr = __dequeue_signal(&tsk->signal->shared_pending,
454 					 mask, info);
455  	if (signr && unlikely(sig_kernel_stop(signr))) {
456  		/*
457  		 * Set a marker that we have dequeued a stop signal.  Our
458  		 * caller might release the siglock and then the pending
459  		 * stop signal it is about to process is no longer in the
460  		 * pending bitmasks, but must still be cleared by a SIGCONT
461  		 * (and overruled by a SIGKILL).  So those cases clear this
462  		 * shared flag after we've set it.  Note that this flag may
463  		 * remain set after the signal we return is ignored or
464  		 * handled.  That doesn't matter because its only purpose
465  		 * is to alert stop-signal processing code when another
466  		 * processor has come along and cleared the flag.
467  		 */
468  		if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
469  			tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
470  	}
471 	if ( signr &&
472 	     ((info->si_code & __SI_MASK) == __SI_TIMER) &&
473 	     info->si_sys_private){
474 		/*
475 		 * Release the siglock to ensure proper locking order
476 		 * of timer locks outside of siglocks.  Note, we leave
477 		 * irqs disabled here, since the posix-timers code is
478 		 * about to disable them again anyway.
479 		 */
480 		spin_unlock(&tsk->sighand->siglock);
481 		do_schedule_next_timer(info);
482 		spin_lock(&tsk->sighand->siglock);
483 	}
484 	return signr;
485 }
486 
487 /*
488  * Tell a process that it has a new active signal..
489  *
490  * NOTE! we rely on the previous spin_lock to
491  * lock interrupts for us! We can only be called with
492  * "siglock" held, and the local interrupt must
493  * have been disabled when that got acquired!
494  *
495  * No need to set need_resched since signal event passing
496  * goes through ->blocked
497  */
498 void signal_wake_up(struct task_struct *t, int resume)
499 {
500 	unsigned int mask;
501 
502 	set_tsk_thread_flag(t, TIF_SIGPENDING);
503 
504 	/*
505 	 * For SIGKILL, we want to wake it up in the stopped/traced case.
506 	 * We don't check t->state here because there is a race with it
507 	 * executing another processor and just now entering stopped state.
508 	 * By using wake_up_state, we ensure the process will wake up and
509 	 * handle its death signal.
510 	 */
511 	mask = TASK_INTERRUPTIBLE;
512 	if (resume)
513 		mask |= TASK_STOPPED | TASK_TRACED;
514 	if (!wake_up_state(t, mask))
515 		kick_process(t);
516 }
517 
518 /*
519  * Remove signals in mask from the pending set and queue.
520  * Returns 1 if any signals were found.
521  *
522  * All callers must be holding the siglock.
523  *
524  * This version takes a sigset mask and looks at all signals,
525  * not just those in the first mask word.
526  */
527 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
528 {
529 	struct sigqueue *q, *n;
530 	sigset_t m;
531 
532 	sigandsets(&m, mask, &s->signal);
533 	if (sigisemptyset(&m))
534 		return 0;
535 
536 	signandsets(&s->signal, &s->signal, mask);
537 	list_for_each_entry_safe(q, n, &s->list, list) {
538 		if (sigismember(mask, q->info.si_signo)) {
539 			list_del_init(&q->list);
540 			__sigqueue_free(q);
541 		}
542 	}
543 	return 1;
544 }
545 /*
546  * Remove signals in mask from the pending set and queue.
547  * Returns 1 if any signals were found.
548  *
549  * All callers must be holding the siglock.
550  */
551 static int rm_from_queue(unsigned long mask, struct sigpending *s)
552 {
553 	struct sigqueue *q, *n;
554 
555 	if (!sigtestsetmask(&s->signal, mask))
556 		return 0;
557 
558 	sigdelsetmask(&s->signal, mask);
559 	list_for_each_entry_safe(q, n, &s->list, list) {
560 		if (q->info.si_signo < SIGRTMIN &&
561 		    (mask & sigmask(q->info.si_signo))) {
562 			list_del_init(&q->list);
563 			__sigqueue_free(q);
564 		}
565 	}
566 	return 1;
567 }
568 
569 /*
570  * Bad permissions for sending the signal
571  */
572 static int check_kill_permission(int sig, struct siginfo *info,
573 				 struct task_struct *t)
574 {
575 	int error = -EINVAL;
576 	if (!valid_signal(sig))
577 		return error;
578 	error = -EPERM;
579 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
580 	    && ((sig != SIGCONT) ||
581 		(current->signal->session != t->signal->session))
582 	    && (current->euid ^ t->suid) && (current->euid ^ t->uid)
583 	    && (current->uid ^ t->suid) && (current->uid ^ t->uid)
584 	    && !capable(CAP_KILL))
585 		return error;
586 
587 	error = security_task_kill(t, info, sig, 0);
588 	if (!error)
589 		audit_signal_info(sig, t); /* Let audit system see the signal */
590 	return error;
591 }
592 
593 /* forward decl */
594 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
595 
596 /*
597  * Handle magic process-wide effects of stop/continue signals.
598  * Unlike the signal actions, these happen immediately at signal-generation
599  * time regardless of blocking, ignoring, or handling.  This does the
600  * actual continuing for SIGCONT, but not the actual stopping for stop
601  * signals.  The process stop is done as a signal action for SIG_DFL.
602  */
603 static void handle_stop_signal(int sig, struct task_struct *p)
604 {
605 	struct task_struct *t;
606 
607 	if (p->signal->flags & SIGNAL_GROUP_EXIT)
608 		/*
609 		 * The process is in the middle of dying already.
610 		 */
611 		return;
612 
613 	if (sig_kernel_stop(sig)) {
614 		/*
615 		 * This is a stop signal.  Remove SIGCONT from all queues.
616 		 */
617 		rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
618 		t = p;
619 		do {
620 			rm_from_queue(sigmask(SIGCONT), &t->pending);
621 			t = next_thread(t);
622 		} while (t != p);
623 	} else if (sig == SIGCONT) {
624 		/*
625 		 * Remove all stop signals from all queues,
626 		 * and wake all threads.
627 		 */
628 		if (unlikely(p->signal->group_stop_count > 0)) {
629 			/*
630 			 * There was a group stop in progress.  We'll
631 			 * pretend it finished before we got here.  We are
632 			 * obliged to report it to the parent: if the
633 			 * SIGSTOP happened "after" this SIGCONT, then it
634 			 * would have cleared this pending SIGCONT.  If it
635 			 * happened "before" this SIGCONT, then the parent
636 			 * got the SIGCHLD about the stop finishing before
637 			 * the continue happened.  We do the notification
638 			 * now, and it's as if the stop had finished and
639 			 * the SIGCHLD was pending on entry to this kill.
640 			 */
641 			p->signal->group_stop_count = 0;
642 			p->signal->flags = SIGNAL_STOP_CONTINUED;
643 			spin_unlock(&p->sighand->siglock);
644 			do_notify_parent_cldstop(p, CLD_STOPPED);
645 			spin_lock(&p->sighand->siglock);
646 		}
647 		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
648 		t = p;
649 		do {
650 			unsigned int state;
651 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
652 
653 			/*
654 			 * If there is a handler for SIGCONT, we must make
655 			 * sure that no thread returns to user mode before
656 			 * we post the signal, in case it was the only
657 			 * thread eligible to run the signal handler--then
658 			 * it must not do anything between resuming and
659 			 * running the handler.  With the TIF_SIGPENDING
660 			 * flag set, the thread will pause and acquire the
661 			 * siglock that we hold now and until we've queued
662 			 * the pending signal.
663 			 *
664 			 * Wake up the stopped thread _after_ setting
665 			 * TIF_SIGPENDING
666 			 */
667 			state = TASK_STOPPED;
668 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
669 				set_tsk_thread_flag(t, TIF_SIGPENDING);
670 				state |= TASK_INTERRUPTIBLE;
671 			}
672 			wake_up_state(t, state);
673 
674 			t = next_thread(t);
675 		} while (t != p);
676 
677 		if (p->signal->flags & SIGNAL_STOP_STOPPED) {
678 			/*
679 			 * We were in fact stopped, and are now continued.
680 			 * Notify the parent with CLD_CONTINUED.
681 			 */
682 			p->signal->flags = SIGNAL_STOP_CONTINUED;
683 			p->signal->group_exit_code = 0;
684 			spin_unlock(&p->sighand->siglock);
685 			do_notify_parent_cldstop(p, CLD_CONTINUED);
686 			spin_lock(&p->sighand->siglock);
687 		} else {
688 			/*
689 			 * We are not stopped, but there could be a stop
690 			 * signal in the middle of being processed after
691 			 * being removed from the queue.  Clear that too.
692 			 */
693 			p->signal->flags = 0;
694 		}
695 	} else if (sig == SIGKILL) {
696 		/*
697 		 * Make sure that any pending stop signal already dequeued
698 		 * is undone by the wakeup for SIGKILL.
699 		 */
700 		p->signal->flags = 0;
701 	}
702 }
703 
704 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
705 			struct sigpending *signals)
706 {
707 	struct sigqueue * q = NULL;
708 	int ret = 0;
709 
710 	/*
711 	 * fast-pathed signals for kernel-internal things like SIGSTOP
712 	 * or SIGKILL.
713 	 */
714 	if (info == SEND_SIG_FORCED)
715 		goto out_set;
716 
717 	/* Real-time signals must be queued if sent by sigqueue, or
718 	   some other real-time mechanism.  It is implementation
719 	   defined whether kill() does so.  We attempt to do so, on
720 	   the principle of least surprise, but since kill is not
721 	   allowed to fail with EAGAIN when low on memory we just
722 	   make sure at least one signal gets delivered and don't
723 	   pass on the info struct.  */
724 
725 	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
726 					     (is_si_special(info) ||
727 					      info->si_code >= 0)));
728 	if (q) {
729 		list_add_tail(&q->list, &signals->list);
730 		switch ((unsigned long) info) {
731 		case (unsigned long) SEND_SIG_NOINFO:
732 			q->info.si_signo = sig;
733 			q->info.si_errno = 0;
734 			q->info.si_code = SI_USER;
735 			q->info.si_pid = current->pid;
736 			q->info.si_uid = current->uid;
737 			break;
738 		case (unsigned long) SEND_SIG_PRIV:
739 			q->info.si_signo = sig;
740 			q->info.si_errno = 0;
741 			q->info.si_code = SI_KERNEL;
742 			q->info.si_pid = 0;
743 			q->info.si_uid = 0;
744 			break;
745 		default:
746 			copy_siginfo(&q->info, info);
747 			break;
748 		}
749 	} else if (!is_si_special(info)) {
750 		if (sig >= SIGRTMIN && info->si_code != SI_USER)
751 		/*
752 		 * Queue overflow, abort.  We may abort if the signal was rt
753 		 * and sent by user using something other than kill().
754 		 */
755 			return -EAGAIN;
756 	}
757 
758 out_set:
759 	sigaddset(&signals->signal, sig);
760 	return ret;
761 }
762 
763 #define LEGACY_QUEUE(sigptr, sig) \
764 	(((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
765 
766 
767 static int
768 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
769 {
770 	int ret = 0;
771 
772 	BUG_ON(!irqs_disabled());
773 	assert_spin_locked(&t->sighand->siglock);
774 
775 	/* Short-circuit ignored signals.  */
776 	if (sig_ignored(t, sig))
777 		goto out;
778 
779 	/* Support queueing exactly one non-rt signal, so that we
780 	   can get more detailed information about the cause of
781 	   the signal. */
782 	if (LEGACY_QUEUE(&t->pending, sig))
783 		goto out;
784 
785 	ret = send_signal(sig, info, t, &t->pending);
786 	if (!ret && !sigismember(&t->blocked, sig))
787 		signal_wake_up(t, sig == SIGKILL);
788 out:
789 	return ret;
790 }
791 
792 /*
793  * Force a signal that the process can't ignore: if necessary
794  * we unblock the signal and change any SIG_IGN to SIG_DFL.
795  */
796 
797 int
798 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
799 {
800 	unsigned long int flags;
801 	int ret;
802 
803 	spin_lock_irqsave(&t->sighand->siglock, flags);
804 	if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
805 		t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
806 	}
807 	if (sigismember(&t->blocked, sig)) {
808 		sigdelset(&t->blocked, sig);
809 	}
810 	recalc_sigpending_tsk(t);
811 	ret = specific_send_sig_info(sig, info, t);
812 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
813 
814 	return ret;
815 }
816 
817 void
818 force_sig_specific(int sig, struct task_struct *t)
819 {
820 	force_sig_info(sig, SEND_SIG_FORCED, t);
821 }
822 
823 /*
824  * Test if P wants to take SIG.  After we've checked all threads with this,
825  * it's equivalent to finding no threads not blocking SIG.  Any threads not
826  * blocking SIG were ruled out because they are not running and already
827  * have pending signals.  Such threads will dequeue from the shared queue
828  * as soon as they're available, so putting the signal on the shared queue
829  * will be equivalent to sending it to one such thread.
830  */
831 static inline int wants_signal(int sig, struct task_struct *p)
832 {
833 	if (sigismember(&p->blocked, sig))
834 		return 0;
835 	if (p->flags & PF_EXITING)
836 		return 0;
837 	if (sig == SIGKILL)
838 		return 1;
839 	if (p->state & (TASK_STOPPED | TASK_TRACED))
840 		return 0;
841 	return task_curr(p) || !signal_pending(p);
842 }
843 
844 static void
845 __group_complete_signal(int sig, struct task_struct *p)
846 {
847 	struct task_struct *t;
848 
849 	/*
850 	 * Now find a thread we can wake up to take the signal off the queue.
851 	 *
852 	 * If the main thread wants the signal, it gets first crack.
853 	 * Probably the least surprising to the average bear.
854 	 */
855 	if (wants_signal(sig, p))
856 		t = p;
857 	else if (thread_group_empty(p))
858 		/*
859 		 * There is just one thread and it does not need to be woken.
860 		 * It will dequeue unblocked signals before it runs again.
861 		 */
862 		return;
863 	else {
864 		/*
865 		 * Otherwise try to find a suitable thread.
866 		 */
867 		t = p->signal->curr_target;
868 		if (t == NULL)
869 			/* restart balancing at this thread */
870 			t = p->signal->curr_target = p;
871 
872 		while (!wants_signal(sig, t)) {
873 			t = next_thread(t);
874 			if (t == p->signal->curr_target)
875 				/*
876 				 * No thread needs to be woken.
877 				 * Any eligible threads will see
878 				 * the signal in the queue soon.
879 				 */
880 				return;
881 		}
882 		p->signal->curr_target = t;
883 	}
884 
885 	/*
886 	 * Found a killable thread.  If the signal will be fatal,
887 	 * then start taking the whole group down immediately.
888 	 */
889 	if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
890 	    !sigismember(&t->real_blocked, sig) &&
891 	    (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
892 		/*
893 		 * This signal will be fatal to the whole group.
894 		 */
895 		if (!sig_kernel_coredump(sig)) {
896 			/*
897 			 * Start a group exit and wake everybody up.
898 			 * This way we don't have other threads
899 			 * running and doing things after a slower
900 			 * thread has the fatal signal pending.
901 			 */
902 			p->signal->flags = SIGNAL_GROUP_EXIT;
903 			p->signal->group_exit_code = sig;
904 			p->signal->group_stop_count = 0;
905 			t = p;
906 			do {
907 				sigaddset(&t->pending.signal, SIGKILL);
908 				signal_wake_up(t, 1);
909 				t = next_thread(t);
910 			} while (t != p);
911 			return;
912 		}
913 
914 		/*
915 		 * There will be a core dump.  We make all threads other
916 		 * than the chosen one go into a group stop so that nothing
917 		 * happens until it gets scheduled, takes the signal off
918 		 * the shared queue, and does the core dump.  This is a
919 		 * little more complicated than strictly necessary, but it
920 		 * keeps the signal state that winds up in the core dump
921 		 * unchanged from the death state, e.g. which thread had
922 		 * the core-dump signal unblocked.
923 		 */
924 		rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
925 		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
926 		p->signal->group_stop_count = 0;
927 		p->signal->group_exit_task = t;
928 		t = p;
929 		do {
930 			p->signal->group_stop_count++;
931 			signal_wake_up(t, 0);
932 			t = next_thread(t);
933 		} while (t != p);
934 		wake_up_process(p->signal->group_exit_task);
935 		return;
936 	}
937 
938 	/*
939 	 * The signal is already in the shared-pending queue.
940 	 * Tell the chosen thread to wake up and dequeue it.
941 	 */
942 	signal_wake_up(t, sig == SIGKILL);
943 	return;
944 }
945 
946 int
947 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
948 {
949 	int ret = 0;
950 
951 	assert_spin_locked(&p->sighand->siglock);
952 	handle_stop_signal(sig, p);
953 
954 	/* Short-circuit ignored signals.  */
955 	if (sig_ignored(p, sig))
956 		return ret;
957 
958 	if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
959 		/* This is a non-RT signal and we already have one queued.  */
960 		return ret;
961 
962 	/*
963 	 * Put this signal on the shared-pending queue, or fail with EAGAIN.
964 	 * We always use the shared queue for process-wide signals,
965 	 * to avoid several races.
966 	 */
967 	ret = send_signal(sig, info, p, &p->signal->shared_pending);
968 	if (unlikely(ret))
969 		return ret;
970 
971 	__group_complete_signal(sig, p);
972 	return 0;
973 }
974 
975 /*
976  * Nuke all other threads in the group.
977  */
978 void zap_other_threads(struct task_struct *p)
979 {
980 	struct task_struct *t;
981 
982 	p->signal->flags = SIGNAL_GROUP_EXIT;
983 	p->signal->group_stop_count = 0;
984 
985 	if (thread_group_empty(p))
986 		return;
987 
988 	for (t = next_thread(p); t != p; t = next_thread(t)) {
989 		/*
990 		 * Don't bother with already dead threads
991 		 */
992 		if (t->exit_state)
993 			continue;
994 
995 		/*
996 		 * We don't want to notify the parent, since we are
997 		 * killed as part of a thread group due to another
998 		 * thread doing an execve() or similar. So set the
999 		 * exit signal to -1 to allow immediate reaping of
1000 		 * the process.  But don't detach the thread group
1001 		 * leader.
1002 		 */
1003 		if (t != p->group_leader)
1004 			t->exit_signal = -1;
1005 
1006 		/* SIGKILL will be handled before any pending SIGSTOP */
1007 		sigaddset(&t->pending.signal, SIGKILL);
1008 		signal_wake_up(t, 1);
1009 	}
1010 }
1011 
1012 /*
1013  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1014  */
1015 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1016 {
1017 	struct sighand_struct *sighand;
1018 
1019 	for (;;) {
1020 		sighand = rcu_dereference(tsk->sighand);
1021 		if (unlikely(sighand == NULL))
1022 			break;
1023 
1024 		spin_lock_irqsave(&sighand->siglock, *flags);
1025 		if (likely(sighand == tsk->sighand))
1026 			break;
1027 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1028 	}
1029 
1030 	return sighand;
1031 }
1032 
1033 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1034 {
1035 	unsigned long flags;
1036 	int ret;
1037 
1038 	ret = check_kill_permission(sig, info, p);
1039 
1040 	if (!ret && sig) {
1041 		ret = -ESRCH;
1042 		if (lock_task_sighand(p, &flags)) {
1043 			ret = __group_send_sig_info(sig, info, p);
1044 			unlock_task_sighand(p, &flags);
1045 		}
1046 	}
1047 
1048 	return ret;
1049 }
1050 
1051 /*
1052  * kill_pg_info() sends a signal to a process group: this is what the tty
1053  * control characters do (^C, ^Z etc)
1054  */
1055 
1056 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1057 {
1058 	struct task_struct *p = NULL;
1059 	int retval, success;
1060 
1061 	if (pgrp <= 0)
1062 		return -EINVAL;
1063 
1064 	success = 0;
1065 	retval = -ESRCH;
1066 	do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1067 		int err = group_send_sig_info(sig, info, p);
1068 		success |= !err;
1069 		retval = err;
1070 	} while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1071 	return success ? 0 : retval;
1072 }
1073 
1074 int
1075 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1076 {
1077 	int retval;
1078 
1079 	read_lock(&tasklist_lock);
1080 	retval = __kill_pg_info(sig, info, pgrp);
1081 	read_unlock(&tasklist_lock);
1082 
1083 	return retval;
1084 }
1085 
1086 int
1087 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1088 {
1089 	int error;
1090 	int acquired_tasklist_lock = 0;
1091 	struct task_struct *p;
1092 
1093 	rcu_read_lock();
1094 	if (unlikely(sig_needs_tasklist(sig))) {
1095 		read_lock(&tasklist_lock);
1096 		acquired_tasklist_lock = 1;
1097 	}
1098 	p = find_task_by_pid(pid);
1099 	error = -ESRCH;
1100 	if (p)
1101 		error = group_send_sig_info(sig, info, p);
1102 	if (unlikely(acquired_tasklist_lock))
1103 		read_unlock(&tasklist_lock);
1104 	rcu_read_unlock();
1105 	return error;
1106 }
1107 
1108 /* like kill_proc_info(), but doesn't use uid/euid of "current" */
1109 int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1110 		      uid_t uid, uid_t euid, u32 secid)
1111 {
1112 	int ret = -EINVAL;
1113 	struct task_struct *p;
1114 
1115 	if (!valid_signal(sig))
1116 		return ret;
1117 
1118 	read_lock(&tasklist_lock);
1119 	p = find_task_by_pid(pid);
1120 	if (!p) {
1121 		ret = -ESRCH;
1122 		goto out_unlock;
1123 	}
1124 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1125 	    && (euid != p->suid) && (euid != p->uid)
1126 	    && (uid != p->suid) && (uid != p->uid)) {
1127 		ret = -EPERM;
1128 		goto out_unlock;
1129 	}
1130 	ret = security_task_kill(p, info, sig, secid);
1131 	if (ret)
1132 		goto out_unlock;
1133 	if (sig && p->sighand) {
1134 		unsigned long flags;
1135 		spin_lock_irqsave(&p->sighand->siglock, flags);
1136 		ret = __group_send_sig_info(sig, info, p);
1137 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1138 	}
1139 out_unlock:
1140 	read_unlock(&tasklist_lock);
1141 	return ret;
1142 }
1143 EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1144 
1145 /*
1146  * kill_something_info() interprets pid in interesting ways just like kill(2).
1147  *
1148  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1149  * is probably wrong.  Should make it like BSD or SYSV.
1150  */
1151 
1152 static int kill_something_info(int sig, struct siginfo *info, int pid)
1153 {
1154 	if (!pid) {
1155 		return kill_pg_info(sig, info, process_group(current));
1156 	} else if (pid == -1) {
1157 		int retval = 0, count = 0;
1158 		struct task_struct * p;
1159 
1160 		read_lock(&tasklist_lock);
1161 		for_each_process(p) {
1162 			if (p->pid > 1 && p->tgid != current->tgid) {
1163 				int err = group_send_sig_info(sig, info, p);
1164 				++count;
1165 				if (err != -EPERM)
1166 					retval = err;
1167 			}
1168 		}
1169 		read_unlock(&tasklist_lock);
1170 		return count ? retval : -ESRCH;
1171 	} else if (pid < 0) {
1172 		return kill_pg_info(sig, info, -pid);
1173 	} else {
1174 		return kill_proc_info(sig, info, pid);
1175 	}
1176 }
1177 
1178 /*
1179  * These are for backward compatibility with the rest of the kernel source.
1180  */
1181 
1182 /*
1183  * These two are the most common entry points.  They send a signal
1184  * just to the specific thread.
1185  */
1186 int
1187 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1188 {
1189 	int ret;
1190 	unsigned long flags;
1191 
1192 	/*
1193 	 * Make sure legacy kernel users don't send in bad values
1194 	 * (normal paths check this in check_kill_permission).
1195 	 */
1196 	if (!valid_signal(sig))
1197 		return -EINVAL;
1198 
1199 	/*
1200 	 * We need the tasklist lock even for the specific
1201 	 * thread case (when we don't need to follow the group
1202 	 * lists) in order to avoid races with "p->sighand"
1203 	 * going away or changing from under us.
1204 	 */
1205 	read_lock(&tasklist_lock);
1206 	spin_lock_irqsave(&p->sighand->siglock, flags);
1207 	ret = specific_send_sig_info(sig, info, p);
1208 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1209 	read_unlock(&tasklist_lock);
1210 	return ret;
1211 }
1212 
1213 #define __si_special(priv) \
1214 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1215 
1216 int
1217 send_sig(int sig, struct task_struct *p, int priv)
1218 {
1219 	return send_sig_info(sig, __si_special(priv), p);
1220 }
1221 
1222 /*
1223  * This is the entry point for "process-wide" signals.
1224  * They will go to an appropriate thread in the thread group.
1225  */
1226 int
1227 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1228 {
1229 	int ret;
1230 	read_lock(&tasklist_lock);
1231 	ret = group_send_sig_info(sig, info, p);
1232 	read_unlock(&tasklist_lock);
1233 	return ret;
1234 }
1235 
1236 void
1237 force_sig(int sig, struct task_struct *p)
1238 {
1239 	force_sig_info(sig, SEND_SIG_PRIV, p);
1240 }
1241 
1242 /*
1243  * When things go south during signal handling, we
1244  * will force a SIGSEGV. And if the signal that caused
1245  * the problem was already a SIGSEGV, we'll want to
1246  * make sure we don't even try to deliver the signal..
1247  */
1248 int
1249 force_sigsegv(int sig, struct task_struct *p)
1250 {
1251 	if (sig == SIGSEGV) {
1252 		unsigned long flags;
1253 		spin_lock_irqsave(&p->sighand->siglock, flags);
1254 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1255 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1256 	}
1257 	force_sig(SIGSEGV, p);
1258 	return 0;
1259 }
1260 
1261 int
1262 kill_pg(pid_t pgrp, int sig, int priv)
1263 {
1264 	return kill_pg_info(sig, __si_special(priv), pgrp);
1265 }
1266 
1267 int
1268 kill_proc(pid_t pid, int sig, int priv)
1269 {
1270 	return kill_proc_info(sig, __si_special(priv), pid);
1271 }
1272 
1273 /*
1274  * These functions support sending signals using preallocated sigqueue
1275  * structures.  This is needed "because realtime applications cannot
1276  * afford to lose notifications of asynchronous events, like timer
1277  * expirations or I/O completions".  In the case of Posix Timers
1278  * we allocate the sigqueue structure from the timer_create.  If this
1279  * allocation fails we are able to report the failure to the application
1280  * with an EAGAIN error.
1281  */
1282 
1283 struct sigqueue *sigqueue_alloc(void)
1284 {
1285 	struct sigqueue *q;
1286 
1287 	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1288 		q->flags |= SIGQUEUE_PREALLOC;
1289 	return(q);
1290 }
1291 
1292 void sigqueue_free(struct sigqueue *q)
1293 {
1294 	unsigned long flags;
1295 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1296 	/*
1297 	 * If the signal is still pending remove it from the
1298 	 * pending queue.
1299 	 */
1300 	if (unlikely(!list_empty(&q->list))) {
1301 		spinlock_t *lock = &current->sighand->siglock;
1302 		read_lock(&tasklist_lock);
1303 		spin_lock_irqsave(lock, flags);
1304 		if (!list_empty(&q->list))
1305 			list_del_init(&q->list);
1306 		spin_unlock_irqrestore(lock, flags);
1307 		read_unlock(&tasklist_lock);
1308 	}
1309 	q->flags &= ~SIGQUEUE_PREALLOC;
1310 	__sigqueue_free(q);
1311 }
1312 
1313 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1314 {
1315 	unsigned long flags;
1316 	int ret = 0;
1317 
1318 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1319 
1320 	/*
1321 	 * The rcu based delayed sighand destroy makes it possible to
1322 	 * run this without tasklist lock held. The task struct itself
1323 	 * cannot go away as create_timer did get_task_struct().
1324 	 *
1325 	 * We return -1, when the task is marked exiting, so
1326 	 * posix_timer_event can redirect it to the group leader
1327 	 */
1328 	rcu_read_lock();
1329 
1330 	if (!likely(lock_task_sighand(p, &flags))) {
1331 		ret = -1;
1332 		goto out_err;
1333 	}
1334 
1335 	if (unlikely(!list_empty(&q->list))) {
1336 		/*
1337 		 * If an SI_TIMER entry is already queue just increment
1338 		 * the overrun count.
1339 		 */
1340 		BUG_ON(q->info.si_code != SI_TIMER);
1341 		q->info.si_overrun++;
1342 		goto out;
1343 	}
1344 	/* Short-circuit ignored signals.  */
1345 	if (sig_ignored(p, sig)) {
1346 		ret = 1;
1347 		goto out;
1348 	}
1349 
1350 	list_add_tail(&q->list, &p->pending.list);
1351 	sigaddset(&p->pending.signal, sig);
1352 	if (!sigismember(&p->blocked, sig))
1353 		signal_wake_up(p, sig == SIGKILL);
1354 
1355 out:
1356 	unlock_task_sighand(p, &flags);
1357 out_err:
1358 	rcu_read_unlock();
1359 
1360 	return ret;
1361 }
1362 
1363 int
1364 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1365 {
1366 	unsigned long flags;
1367 	int ret = 0;
1368 
1369 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1370 
1371 	read_lock(&tasklist_lock);
1372 	/* Since it_lock is held, p->sighand cannot be NULL. */
1373 	spin_lock_irqsave(&p->sighand->siglock, flags);
1374 	handle_stop_signal(sig, p);
1375 
1376 	/* Short-circuit ignored signals.  */
1377 	if (sig_ignored(p, sig)) {
1378 		ret = 1;
1379 		goto out;
1380 	}
1381 
1382 	if (unlikely(!list_empty(&q->list))) {
1383 		/*
1384 		 * If an SI_TIMER entry is already queue just increment
1385 		 * the overrun count.  Other uses should not try to
1386 		 * send the signal multiple times.
1387 		 */
1388 		BUG_ON(q->info.si_code != SI_TIMER);
1389 		q->info.si_overrun++;
1390 		goto out;
1391 	}
1392 
1393 	/*
1394 	 * Put this signal on the shared-pending queue.
1395 	 * We always use the shared queue for process-wide signals,
1396 	 * to avoid several races.
1397 	 */
1398 	list_add_tail(&q->list, &p->signal->shared_pending.list);
1399 	sigaddset(&p->signal->shared_pending.signal, sig);
1400 
1401 	__group_complete_signal(sig, p);
1402 out:
1403 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1404 	read_unlock(&tasklist_lock);
1405 	return ret;
1406 }
1407 
1408 /*
1409  * Wake up any threads in the parent blocked in wait* syscalls.
1410  */
1411 static inline void __wake_up_parent(struct task_struct *p,
1412 				    struct task_struct *parent)
1413 {
1414 	wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1415 }
1416 
1417 /*
1418  * Let a parent know about the death of a child.
1419  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1420  */
1421 
1422 void do_notify_parent(struct task_struct *tsk, int sig)
1423 {
1424 	struct siginfo info;
1425 	unsigned long flags;
1426 	struct sighand_struct *psig;
1427 
1428 	BUG_ON(sig == -1);
1429 
1430  	/* do_notify_parent_cldstop should have been called instead.  */
1431  	BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1432 
1433 	BUG_ON(!tsk->ptrace &&
1434 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1435 
1436 	info.si_signo = sig;
1437 	info.si_errno = 0;
1438 	info.si_pid = tsk->pid;
1439 	info.si_uid = tsk->uid;
1440 
1441 	/* FIXME: find out whether or not this is supposed to be c*time. */
1442 	info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1443 						       tsk->signal->utime));
1444 	info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1445 						       tsk->signal->stime));
1446 
1447 	info.si_status = tsk->exit_code & 0x7f;
1448 	if (tsk->exit_code & 0x80)
1449 		info.si_code = CLD_DUMPED;
1450 	else if (tsk->exit_code & 0x7f)
1451 		info.si_code = CLD_KILLED;
1452 	else {
1453 		info.si_code = CLD_EXITED;
1454 		info.si_status = tsk->exit_code >> 8;
1455 	}
1456 
1457 	psig = tsk->parent->sighand;
1458 	spin_lock_irqsave(&psig->siglock, flags);
1459 	if (!tsk->ptrace && sig == SIGCHLD &&
1460 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1461 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1462 		/*
1463 		 * We are exiting and our parent doesn't care.  POSIX.1
1464 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1465 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1466 		 * automatically and not left for our parent's wait4 call.
1467 		 * Rather than having the parent do it as a magic kind of
1468 		 * signal handler, we just set this to tell do_exit that we
1469 		 * can be cleaned up without becoming a zombie.  Note that
1470 		 * we still call __wake_up_parent in this case, because a
1471 		 * blocked sys_wait4 might now return -ECHILD.
1472 		 *
1473 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1474 		 * is implementation-defined: we do (if you don't want
1475 		 * it, just use SIG_IGN instead).
1476 		 */
1477 		tsk->exit_signal = -1;
1478 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1479 			sig = 0;
1480 	}
1481 	if (valid_signal(sig) && sig > 0)
1482 		__group_send_sig_info(sig, &info, tsk->parent);
1483 	__wake_up_parent(tsk, tsk->parent);
1484 	spin_unlock_irqrestore(&psig->siglock, flags);
1485 }
1486 
1487 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1488 {
1489 	struct siginfo info;
1490 	unsigned long flags;
1491 	struct task_struct *parent;
1492 	struct sighand_struct *sighand;
1493 
1494 	if (tsk->ptrace & PT_PTRACED)
1495 		parent = tsk->parent;
1496 	else {
1497 		tsk = tsk->group_leader;
1498 		parent = tsk->real_parent;
1499 	}
1500 
1501 	info.si_signo = SIGCHLD;
1502 	info.si_errno = 0;
1503 	info.si_pid = tsk->pid;
1504 	info.si_uid = tsk->uid;
1505 
1506 	/* FIXME: find out whether or not this is supposed to be c*time. */
1507 	info.si_utime = cputime_to_jiffies(tsk->utime);
1508 	info.si_stime = cputime_to_jiffies(tsk->stime);
1509 
1510  	info.si_code = why;
1511  	switch (why) {
1512  	case CLD_CONTINUED:
1513  		info.si_status = SIGCONT;
1514  		break;
1515  	case CLD_STOPPED:
1516  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1517  		break;
1518  	case CLD_TRAPPED:
1519  		info.si_status = tsk->exit_code & 0x7f;
1520  		break;
1521  	default:
1522  		BUG();
1523  	}
1524 
1525 	sighand = parent->sighand;
1526 	spin_lock_irqsave(&sighand->siglock, flags);
1527 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1528 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1529 		__group_send_sig_info(SIGCHLD, &info, parent);
1530 	/*
1531 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1532 	 */
1533 	__wake_up_parent(tsk, parent);
1534 	spin_unlock_irqrestore(&sighand->siglock, flags);
1535 }
1536 
1537 static inline int may_ptrace_stop(void)
1538 {
1539 	if (!likely(current->ptrace & PT_PTRACED))
1540 		return 0;
1541 
1542 	if (unlikely(current->parent == current->real_parent &&
1543 		    (current->ptrace & PT_ATTACHED)))
1544 		return 0;
1545 
1546 	if (unlikely(current->signal == current->parent->signal) &&
1547 	    unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
1548 		return 0;
1549 
1550 	/*
1551 	 * Are we in the middle of do_coredump?
1552 	 * If so and our tracer is also part of the coredump stopping
1553 	 * is a deadlock situation, and pointless because our tracer
1554 	 * is dead so don't allow us to stop.
1555 	 * If SIGKILL was already sent before the caller unlocked
1556 	 * ->siglock we must see ->core_waiters != 0. Otherwise it
1557 	 * is safe to enter schedule().
1558 	 */
1559 	if (unlikely(current->mm->core_waiters) &&
1560 	    unlikely(current->mm == current->parent->mm))
1561 		return 0;
1562 
1563 	return 1;
1564 }
1565 
1566 /*
1567  * This must be called with current->sighand->siglock held.
1568  *
1569  * This should be the path for all ptrace stops.
1570  * We always set current->last_siginfo while stopped here.
1571  * That makes it a way to test a stopped process for
1572  * being ptrace-stopped vs being job-control-stopped.
1573  *
1574  * If we actually decide not to stop at all because the tracer is gone,
1575  * we leave nostop_code in current->exit_code.
1576  */
1577 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1578 {
1579 	/*
1580 	 * If there is a group stop in progress,
1581 	 * we must participate in the bookkeeping.
1582 	 */
1583 	if (current->signal->group_stop_count > 0)
1584 		--current->signal->group_stop_count;
1585 
1586 	current->last_siginfo = info;
1587 	current->exit_code = exit_code;
1588 
1589 	/* Let the debugger run.  */
1590 	set_current_state(TASK_TRACED);
1591 	spin_unlock_irq(&current->sighand->siglock);
1592 	try_to_freeze();
1593 	read_lock(&tasklist_lock);
1594 	if (may_ptrace_stop()) {
1595 		do_notify_parent_cldstop(current, CLD_TRAPPED);
1596 		read_unlock(&tasklist_lock);
1597 		schedule();
1598 	} else {
1599 		/*
1600 		 * By the time we got the lock, our tracer went away.
1601 		 * Don't stop here.
1602 		 */
1603 		read_unlock(&tasklist_lock);
1604 		set_current_state(TASK_RUNNING);
1605 		current->exit_code = nostop_code;
1606 	}
1607 
1608 	/*
1609 	 * We are back.  Now reacquire the siglock before touching
1610 	 * last_siginfo, so that we are sure to have synchronized with
1611 	 * any signal-sending on another CPU that wants to examine it.
1612 	 */
1613 	spin_lock_irq(&current->sighand->siglock);
1614 	current->last_siginfo = NULL;
1615 
1616 	/*
1617 	 * Queued signals ignored us while we were stopped for tracing.
1618 	 * So check for any that we should take before resuming user mode.
1619 	 */
1620 	recalc_sigpending();
1621 }
1622 
1623 void ptrace_notify(int exit_code)
1624 {
1625 	siginfo_t info;
1626 
1627 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1628 
1629 	memset(&info, 0, sizeof info);
1630 	info.si_signo = SIGTRAP;
1631 	info.si_code = exit_code;
1632 	info.si_pid = current->pid;
1633 	info.si_uid = current->uid;
1634 
1635 	/* Let the debugger run.  */
1636 	spin_lock_irq(&current->sighand->siglock);
1637 	ptrace_stop(exit_code, 0, &info);
1638 	spin_unlock_irq(&current->sighand->siglock);
1639 }
1640 
1641 static void
1642 finish_stop(int stop_count)
1643 {
1644 	/*
1645 	 * If there are no other threads in the group, or if there is
1646 	 * a group stop in progress and we are the last to stop,
1647 	 * report to the parent.  When ptraced, every thread reports itself.
1648 	 */
1649 	if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1650 		read_lock(&tasklist_lock);
1651 		do_notify_parent_cldstop(current, CLD_STOPPED);
1652 		read_unlock(&tasklist_lock);
1653 	}
1654 
1655 	schedule();
1656 	/*
1657 	 * Now we don't run again until continued.
1658 	 */
1659 	current->exit_code = 0;
1660 }
1661 
1662 /*
1663  * This performs the stopping for SIGSTOP and other stop signals.
1664  * We have to stop all threads in the thread group.
1665  * Returns nonzero if we've actually stopped and released the siglock.
1666  * Returns zero if we didn't stop and still hold the siglock.
1667  */
1668 static int do_signal_stop(int signr)
1669 {
1670 	struct signal_struct *sig = current->signal;
1671 	int stop_count;
1672 
1673 	if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1674 		return 0;
1675 
1676 	if (sig->group_stop_count > 0) {
1677 		/*
1678 		 * There is a group stop in progress.  We don't need to
1679 		 * start another one.
1680 		 */
1681 		stop_count = --sig->group_stop_count;
1682 	} else {
1683 		/*
1684 		 * There is no group stop already in progress.
1685 		 * We must initiate one now.
1686 		 */
1687 		struct task_struct *t;
1688 
1689 		sig->group_exit_code = signr;
1690 
1691 		stop_count = 0;
1692 		for (t = next_thread(current); t != current; t = next_thread(t))
1693 			/*
1694 			 * Setting state to TASK_STOPPED for a group
1695 			 * stop is always done with the siglock held,
1696 			 * so this check has no races.
1697 			 */
1698 			if (!t->exit_state &&
1699 			    !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1700 				stop_count++;
1701 				signal_wake_up(t, 0);
1702 			}
1703 		sig->group_stop_count = stop_count;
1704 	}
1705 
1706 	if (stop_count == 0)
1707 		sig->flags = SIGNAL_STOP_STOPPED;
1708 	current->exit_code = sig->group_exit_code;
1709 	__set_current_state(TASK_STOPPED);
1710 
1711 	spin_unlock_irq(&current->sighand->siglock);
1712 	finish_stop(stop_count);
1713 	return 1;
1714 }
1715 
1716 /*
1717  * Do appropriate magic when group_stop_count > 0.
1718  * We return nonzero if we stopped, after releasing the siglock.
1719  * We return zero if we still hold the siglock and should look
1720  * for another signal without checking group_stop_count again.
1721  */
1722 static int handle_group_stop(void)
1723 {
1724 	int stop_count;
1725 
1726 	if (current->signal->group_exit_task == current) {
1727 		/*
1728 		 * Group stop is so we can do a core dump,
1729 		 * We are the initiating thread, so get on with it.
1730 		 */
1731 		current->signal->group_exit_task = NULL;
1732 		return 0;
1733 	}
1734 
1735 	if (current->signal->flags & SIGNAL_GROUP_EXIT)
1736 		/*
1737 		 * Group stop is so another thread can do a core dump,
1738 		 * or else we are racing against a death signal.
1739 		 * Just punt the stop so we can get the next signal.
1740 		 */
1741 		return 0;
1742 
1743 	/*
1744 	 * There is a group stop in progress.  We stop
1745 	 * without any associated signal being in our queue.
1746 	 */
1747 	stop_count = --current->signal->group_stop_count;
1748 	if (stop_count == 0)
1749 		current->signal->flags = SIGNAL_STOP_STOPPED;
1750 	current->exit_code = current->signal->group_exit_code;
1751 	set_current_state(TASK_STOPPED);
1752 	spin_unlock_irq(&current->sighand->siglock);
1753 	finish_stop(stop_count);
1754 	return 1;
1755 }
1756 
1757 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1758 			  struct pt_regs *regs, void *cookie)
1759 {
1760 	sigset_t *mask = &current->blocked;
1761 	int signr = 0;
1762 
1763 	try_to_freeze();
1764 
1765 relock:
1766 	spin_lock_irq(&current->sighand->siglock);
1767 	for (;;) {
1768 		struct k_sigaction *ka;
1769 
1770 		if (unlikely(current->signal->group_stop_count > 0) &&
1771 		    handle_group_stop())
1772 			goto relock;
1773 
1774 		signr = dequeue_signal(current, mask, info);
1775 
1776 		if (!signr)
1777 			break; /* will return 0 */
1778 
1779 		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1780 			ptrace_signal_deliver(regs, cookie);
1781 
1782 			/* Let the debugger run.  */
1783 			ptrace_stop(signr, signr, info);
1784 
1785 			/* We're back.  Did the debugger cancel the sig?  */
1786 			signr = current->exit_code;
1787 			if (signr == 0)
1788 				continue;
1789 
1790 			current->exit_code = 0;
1791 
1792 			/* Update the siginfo structure if the signal has
1793 			   changed.  If the debugger wanted something
1794 			   specific in the siginfo structure then it should
1795 			   have updated *info via PTRACE_SETSIGINFO.  */
1796 			if (signr != info->si_signo) {
1797 				info->si_signo = signr;
1798 				info->si_errno = 0;
1799 				info->si_code = SI_USER;
1800 				info->si_pid = current->parent->pid;
1801 				info->si_uid = current->parent->uid;
1802 			}
1803 
1804 			/* If the (new) signal is now blocked, requeue it.  */
1805 			if (sigismember(&current->blocked, signr)) {
1806 				specific_send_sig_info(signr, info, current);
1807 				continue;
1808 			}
1809 		}
1810 
1811 		ka = &current->sighand->action[signr-1];
1812 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1813 			continue;
1814 		if (ka->sa.sa_handler != SIG_DFL) {
1815 			/* Run the handler.  */
1816 			*return_ka = *ka;
1817 
1818 			if (ka->sa.sa_flags & SA_ONESHOT)
1819 				ka->sa.sa_handler = SIG_DFL;
1820 
1821 			break; /* will return non-zero "signr" value */
1822 		}
1823 
1824 		/*
1825 		 * Now we are doing the default action for this signal.
1826 		 */
1827 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1828 			continue;
1829 
1830 		/* Init gets no signals it doesn't want.  */
1831 		if (current == child_reaper)
1832 			continue;
1833 
1834 		if (sig_kernel_stop(signr)) {
1835 			/*
1836 			 * The default action is to stop all threads in
1837 			 * the thread group.  The job control signals
1838 			 * do nothing in an orphaned pgrp, but SIGSTOP
1839 			 * always works.  Note that siglock needs to be
1840 			 * dropped during the call to is_orphaned_pgrp()
1841 			 * because of lock ordering with tasklist_lock.
1842 			 * This allows an intervening SIGCONT to be posted.
1843 			 * We need to check for that and bail out if necessary.
1844 			 */
1845 			if (signr != SIGSTOP) {
1846 				spin_unlock_irq(&current->sighand->siglock);
1847 
1848 				/* signals can be posted during this window */
1849 
1850 				if (is_orphaned_pgrp(process_group(current)))
1851 					goto relock;
1852 
1853 				spin_lock_irq(&current->sighand->siglock);
1854 			}
1855 
1856 			if (likely(do_signal_stop(signr))) {
1857 				/* It released the siglock.  */
1858 				goto relock;
1859 			}
1860 
1861 			/*
1862 			 * We didn't actually stop, due to a race
1863 			 * with SIGCONT or something like that.
1864 			 */
1865 			continue;
1866 		}
1867 
1868 		spin_unlock_irq(&current->sighand->siglock);
1869 
1870 		/*
1871 		 * Anything else is fatal, maybe with a core dump.
1872 		 */
1873 		current->flags |= PF_SIGNALED;
1874 		if (sig_kernel_coredump(signr)) {
1875 			/*
1876 			 * If it was able to dump core, this kills all
1877 			 * other threads in the group and synchronizes with
1878 			 * their demise.  If we lost the race with another
1879 			 * thread getting here, it set group_exit_code
1880 			 * first and our do_group_exit call below will use
1881 			 * that value and ignore the one we pass it.
1882 			 */
1883 			do_coredump((long)signr, signr, regs);
1884 		}
1885 
1886 		/*
1887 		 * Death signals, no core dump.
1888 		 */
1889 		do_group_exit(signr);
1890 		/* NOTREACHED */
1891 	}
1892 	spin_unlock_irq(&current->sighand->siglock);
1893 	return signr;
1894 }
1895 
1896 EXPORT_SYMBOL(recalc_sigpending);
1897 EXPORT_SYMBOL_GPL(dequeue_signal);
1898 EXPORT_SYMBOL(flush_signals);
1899 EXPORT_SYMBOL(force_sig);
1900 EXPORT_SYMBOL(kill_pg);
1901 EXPORT_SYMBOL(kill_proc);
1902 EXPORT_SYMBOL(ptrace_notify);
1903 EXPORT_SYMBOL(send_sig);
1904 EXPORT_SYMBOL(send_sig_info);
1905 EXPORT_SYMBOL(sigprocmask);
1906 EXPORT_SYMBOL(block_all_signals);
1907 EXPORT_SYMBOL(unblock_all_signals);
1908 
1909 
1910 /*
1911  * System call entry points.
1912  */
1913 
1914 asmlinkage long sys_restart_syscall(void)
1915 {
1916 	struct restart_block *restart = &current_thread_info()->restart_block;
1917 	return restart->fn(restart);
1918 }
1919 
1920 long do_no_restart_syscall(struct restart_block *param)
1921 {
1922 	return -EINTR;
1923 }
1924 
1925 /*
1926  * We don't need to get the kernel lock - this is all local to this
1927  * particular thread.. (and that's good, because this is _heavily_
1928  * used by various programs)
1929  */
1930 
1931 /*
1932  * This is also useful for kernel threads that want to temporarily
1933  * (or permanently) block certain signals.
1934  *
1935  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1936  * interface happily blocks "unblockable" signals like SIGKILL
1937  * and friends.
1938  */
1939 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1940 {
1941 	int error;
1942 
1943 	spin_lock_irq(&current->sighand->siglock);
1944 	if (oldset)
1945 		*oldset = current->blocked;
1946 
1947 	error = 0;
1948 	switch (how) {
1949 	case SIG_BLOCK:
1950 		sigorsets(&current->blocked, &current->blocked, set);
1951 		break;
1952 	case SIG_UNBLOCK:
1953 		signandsets(&current->blocked, &current->blocked, set);
1954 		break;
1955 	case SIG_SETMASK:
1956 		current->blocked = *set;
1957 		break;
1958 	default:
1959 		error = -EINVAL;
1960 	}
1961 	recalc_sigpending();
1962 	spin_unlock_irq(&current->sighand->siglock);
1963 
1964 	return error;
1965 }
1966 
1967 asmlinkage long
1968 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1969 {
1970 	int error = -EINVAL;
1971 	sigset_t old_set, new_set;
1972 
1973 	/* XXX: Don't preclude handling different sized sigset_t's.  */
1974 	if (sigsetsize != sizeof(sigset_t))
1975 		goto out;
1976 
1977 	if (set) {
1978 		error = -EFAULT;
1979 		if (copy_from_user(&new_set, set, sizeof(*set)))
1980 			goto out;
1981 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1982 
1983 		error = sigprocmask(how, &new_set, &old_set);
1984 		if (error)
1985 			goto out;
1986 		if (oset)
1987 			goto set_old;
1988 	} else if (oset) {
1989 		spin_lock_irq(&current->sighand->siglock);
1990 		old_set = current->blocked;
1991 		spin_unlock_irq(&current->sighand->siglock);
1992 
1993 	set_old:
1994 		error = -EFAULT;
1995 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
1996 			goto out;
1997 	}
1998 	error = 0;
1999 out:
2000 	return error;
2001 }
2002 
2003 long do_sigpending(void __user *set, unsigned long sigsetsize)
2004 {
2005 	long error = -EINVAL;
2006 	sigset_t pending;
2007 
2008 	if (sigsetsize > sizeof(sigset_t))
2009 		goto out;
2010 
2011 	spin_lock_irq(&current->sighand->siglock);
2012 	sigorsets(&pending, &current->pending.signal,
2013 		  &current->signal->shared_pending.signal);
2014 	spin_unlock_irq(&current->sighand->siglock);
2015 
2016 	/* Outside the lock because only this thread touches it.  */
2017 	sigandsets(&pending, &current->blocked, &pending);
2018 
2019 	error = -EFAULT;
2020 	if (!copy_to_user(set, &pending, sigsetsize))
2021 		error = 0;
2022 
2023 out:
2024 	return error;
2025 }
2026 
2027 asmlinkage long
2028 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2029 {
2030 	return do_sigpending(set, sigsetsize);
2031 }
2032 
2033 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2034 
2035 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2036 {
2037 	int err;
2038 
2039 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2040 		return -EFAULT;
2041 	if (from->si_code < 0)
2042 		return __copy_to_user(to, from, sizeof(siginfo_t))
2043 			? -EFAULT : 0;
2044 	/*
2045 	 * If you change siginfo_t structure, please be sure
2046 	 * this code is fixed accordingly.
2047 	 * It should never copy any pad contained in the structure
2048 	 * to avoid security leaks, but must copy the generic
2049 	 * 3 ints plus the relevant union member.
2050 	 */
2051 	err = __put_user(from->si_signo, &to->si_signo);
2052 	err |= __put_user(from->si_errno, &to->si_errno);
2053 	err |= __put_user((short)from->si_code, &to->si_code);
2054 	switch (from->si_code & __SI_MASK) {
2055 	case __SI_KILL:
2056 		err |= __put_user(from->si_pid, &to->si_pid);
2057 		err |= __put_user(from->si_uid, &to->si_uid);
2058 		break;
2059 	case __SI_TIMER:
2060 		 err |= __put_user(from->si_tid, &to->si_tid);
2061 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2062 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2063 		break;
2064 	case __SI_POLL:
2065 		err |= __put_user(from->si_band, &to->si_band);
2066 		err |= __put_user(from->si_fd, &to->si_fd);
2067 		break;
2068 	case __SI_FAULT:
2069 		err |= __put_user(from->si_addr, &to->si_addr);
2070 #ifdef __ARCH_SI_TRAPNO
2071 		err |= __put_user(from->si_trapno, &to->si_trapno);
2072 #endif
2073 		break;
2074 	case __SI_CHLD:
2075 		err |= __put_user(from->si_pid, &to->si_pid);
2076 		err |= __put_user(from->si_uid, &to->si_uid);
2077 		err |= __put_user(from->si_status, &to->si_status);
2078 		err |= __put_user(from->si_utime, &to->si_utime);
2079 		err |= __put_user(from->si_stime, &to->si_stime);
2080 		break;
2081 	case __SI_RT: /* This is not generated by the kernel as of now. */
2082 	case __SI_MESGQ: /* But this is */
2083 		err |= __put_user(from->si_pid, &to->si_pid);
2084 		err |= __put_user(from->si_uid, &to->si_uid);
2085 		err |= __put_user(from->si_ptr, &to->si_ptr);
2086 		break;
2087 	default: /* this is just in case for now ... */
2088 		err |= __put_user(from->si_pid, &to->si_pid);
2089 		err |= __put_user(from->si_uid, &to->si_uid);
2090 		break;
2091 	}
2092 	return err;
2093 }
2094 
2095 #endif
2096 
2097 asmlinkage long
2098 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2099 		    siginfo_t __user *uinfo,
2100 		    const struct timespec __user *uts,
2101 		    size_t sigsetsize)
2102 {
2103 	int ret, sig;
2104 	sigset_t these;
2105 	struct timespec ts;
2106 	siginfo_t info;
2107 	long timeout = 0;
2108 
2109 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2110 	if (sigsetsize != sizeof(sigset_t))
2111 		return -EINVAL;
2112 
2113 	if (copy_from_user(&these, uthese, sizeof(these)))
2114 		return -EFAULT;
2115 
2116 	/*
2117 	 * Invert the set of allowed signals to get those we
2118 	 * want to block.
2119 	 */
2120 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2121 	signotset(&these);
2122 
2123 	if (uts) {
2124 		if (copy_from_user(&ts, uts, sizeof(ts)))
2125 			return -EFAULT;
2126 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2127 		    || ts.tv_sec < 0)
2128 			return -EINVAL;
2129 	}
2130 
2131 	spin_lock_irq(&current->sighand->siglock);
2132 	sig = dequeue_signal(current, &these, &info);
2133 	if (!sig) {
2134 		timeout = MAX_SCHEDULE_TIMEOUT;
2135 		if (uts)
2136 			timeout = (timespec_to_jiffies(&ts)
2137 				   + (ts.tv_sec || ts.tv_nsec));
2138 
2139 		if (timeout) {
2140 			/* None ready -- temporarily unblock those we're
2141 			 * interested while we are sleeping in so that we'll
2142 			 * be awakened when they arrive.  */
2143 			current->real_blocked = current->blocked;
2144 			sigandsets(&current->blocked, &current->blocked, &these);
2145 			recalc_sigpending();
2146 			spin_unlock_irq(&current->sighand->siglock);
2147 
2148 			timeout = schedule_timeout_interruptible(timeout);
2149 
2150 			spin_lock_irq(&current->sighand->siglock);
2151 			sig = dequeue_signal(current, &these, &info);
2152 			current->blocked = current->real_blocked;
2153 			siginitset(&current->real_blocked, 0);
2154 			recalc_sigpending();
2155 		}
2156 	}
2157 	spin_unlock_irq(&current->sighand->siglock);
2158 
2159 	if (sig) {
2160 		ret = sig;
2161 		if (uinfo) {
2162 			if (copy_siginfo_to_user(uinfo, &info))
2163 				ret = -EFAULT;
2164 		}
2165 	} else {
2166 		ret = -EAGAIN;
2167 		if (timeout)
2168 			ret = -EINTR;
2169 	}
2170 
2171 	return ret;
2172 }
2173 
2174 asmlinkage long
2175 sys_kill(int pid, int sig)
2176 {
2177 	struct siginfo info;
2178 
2179 	info.si_signo = sig;
2180 	info.si_errno = 0;
2181 	info.si_code = SI_USER;
2182 	info.si_pid = current->tgid;
2183 	info.si_uid = current->uid;
2184 
2185 	return kill_something_info(sig, &info, pid);
2186 }
2187 
2188 static int do_tkill(int tgid, int pid, int sig)
2189 {
2190 	int error;
2191 	struct siginfo info;
2192 	struct task_struct *p;
2193 
2194 	error = -ESRCH;
2195 	info.si_signo = sig;
2196 	info.si_errno = 0;
2197 	info.si_code = SI_TKILL;
2198 	info.si_pid = current->tgid;
2199 	info.si_uid = current->uid;
2200 
2201 	read_lock(&tasklist_lock);
2202 	p = find_task_by_pid(pid);
2203 	if (p && (tgid <= 0 || p->tgid == tgid)) {
2204 		error = check_kill_permission(sig, &info, p);
2205 		/*
2206 		 * The null signal is a permissions and process existence
2207 		 * probe.  No signal is actually delivered.
2208 		 */
2209 		if (!error && sig && p->sighand) {
2210 			spin_lock_irq(&p->sighand->siglock);
2211 			handle_stop_signal(sig, p);
2212 			error = specific_send_sig_info(sig, &info, p);
2213 			spin_unlock_irq(&p->sighand->siglock);
2214 		}
2215 	}
2216 	read_unlock(&tasklist_lock);
2217 
2218 	return error;
2219 }
2220 
2221 /**
2222  *  sys_tgkill - send signal to one specific thread
2223  *  @tgid: the thread group ID of the thread
2224  *  @pid: the PID of the thread
2225  *  @sig: signal to be sent
2226  *
2227  *  This syscall also checks the tgid and returns -ESRCH even if the PID
2228  *  exists but it's not belonging to the target process anymore. This
2229  *  method solves the problem of threads exiting and PIDs getting reused.
2230  */
2231 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2232 {
2233 	/* This is only valid for single tasks */
2234 	if (pid <= 0 || tgid <= 0)
2235 		return -EINVAL;
2236 
2237 	return do_tkill(tgid, pid, sig);
2238 }
2239 
2240 /*
2241  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2242  */
2243 asmlinkage long
2244 sys_tkill(int pid, int sig)
2245 {
2246 	/* This is only valid for single tasks */
2247 	if (pid <= 0)
2248 		return -EINVAL;
2249 
2250 	return do_tkill(0, pid, sig);
2251 }
2252 
2253 asmlinkage long
2254 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2255 {
2256 	siginfo_t info;
2257 
2258 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2259 		return -EFAULT;
2260 
2261 	/* Not even root can pretend to send signals from the kernel.
2262 	   Nor can they impersonate a kill(), which adds source info.  */
2263 	if (info.si_code >= 0)
2264 		return -EPERM;
2265 	info.si_signo = sig;
2266 
2267 	/* POSIX.1b doesn't mention process groups.  */
2268 	return kill_proc_info(sig, &info, pid);
2269 }
2270 
2271 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2272 {
2273 	struct k_sigaction *k;
2274 	sigset_t mask;
2275 
2276 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2277 		return -EINVAL;
2278 
2279 	k = &current->sighand->action[sig-1];
2280 
2281 	spin_lock_irq(&current->sighand->siglock);
2282 	if (signal_pending(current)) {
2283 		/*
2284 		 * If there might be a fatal signal pending on multiple
2285 		 * threads, make sure we take it before changing the action.
2286 		 */
2287 		spin_unlock_irq(&current->sighand->siglock);
2288 		return -ERESTARTNOINTR;
2289 	}
2290 
2291 	if (oact)
2292 		*oact = *k;
2293 
2294 	if (act) {
2295 		sigdelsetmask(&act->sa.sa_mask,
2296 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2297 		*k = *act;
2298 		/*
2299 		 * POSIX 3.3.1.3:
2300 		 *  "Setting a signal action to SIG_IGN for a signal that is
2301 		 *   pending shall cause the pending signal to be discarded,
2302 		 *   whether or not it is blocked."
2303 		 *
2304 		 *  "Setting a signal action to SIG_DFL for a signal that is
2305 		 *   pending and whose default action is to ignore the signal
2306 		 *   (for example, SIGCHLD), shall cause the pending signal to
2307 		 *   be discarded, whether or not it is blocked"
2308 		 */
2309 		if (act->sa.sa_handler == SIG_IGN ||
2310 		   (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2311 			struct task_struct *t = current;
2312 			sigemptyset(&mask);
2313 			sigaddset(&mask, sig);
2314 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2315 			do {
2316 				rm_from_queue_full(&mask, &t->pending);
2317 				recalc_sigpending_tsk(t);
2318 				t = next_thread(t);
2319 			} while (t != current);
2320 		}
2321 	}
2322 
2323 	spin_unlock_irq(&current->sighand->siglock);
2324 	return 0;
2325 }
2326 
2327 int
2328 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2329 {
2330 	stack_t oss;
2331 	int error;
2332 
2333 	if (uoss) {
2334 		oss.ss_sp = (void __user *) current->sas_ss_sp;
2335 		oss.ss_size = current->sas_ss_size;
2336 		oss.ss_flags = sas_ss_flags(sp);
2337 	}
2338 
2339 	if (uss) {
2340 		void __user *ss_sp;
2341 		size_t ss_size;
2342 		int ss_flags;
2343 
2344 		error = -EFAULT;
2345 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2346 		    || __get_user(ss_sp, &uss->ss_sp)
2347 		    || __get_user(ss_flags, &uss->ss_flags)
2348 		    || __get_user(ss_size, &uss->ss_size))
2349 			goto out;
2350 
2351 		error = -EPERM;
2352 		if (on_sig_stack(sp))
2353 			goto out;
2354 
2355 		error = -EINVAL;
2356 		/*
2357 		 *
2358 		 * Note - this code used to test ss_flags incorrectly
2359 		 *  	  old code may have been written using ss_flags==0
2360 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2361 		 *	  way that worked) - this fix preserves that older
2362 		 *	  mechanism
2363 		 */
2364 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2365 			goto out;
2366 
2367 		if (ss_flags == SS_DISABLE) {
2368 			ss_size = 0;
2369 			ss_sp = NULL;
2370 		} else {
2371 			error = -ENOMEM;
2372 			if (ss_size < MINSIGSTKSZ)
2373 				goto out;
2374 		}
2375 
2376 		current->sas_ss_sp = (unsigned long) ss_sp;
2377 		current->sas_ss_size = ss_size;
2378 	}
2379 
2380 	if (uoss) {
2381 		error = -EFAULT;
2382 		if (copy_to_user(uoss, &oss, sizeof(oss)))
2383 			goto out;
2384 	}
2385 
2386 	error = 0;
2387 out:
2388 	return error;
2389 }
2390 
2391 #ifdef __ARCH_WANT_SYS_SIGPENDING
2392 
2393 asmlinkage long
2394 sys_sigpending(old_sigset_t __user *set)
2395 {
2396 	return do_sigpending(set, sizeof(*set));
2397 }
2398 
2399 #endif
2400 
2401 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2402 /* Some platforms have their own version with special arguments others
2403    support only sys_rt_sigprocmask.  */
2404 
2405 asmlinkage long
2406 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2407 {
2408 	int error;
2409 	old_sigset_t old_set, new_set;
2410 
2411 	if (set) {
2412 		error = -EFAULT;
2413 		if (copy_from_user(&new_set, set, sizeof(*set)))
2414 			goto out;
2415 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2416 
2417 		spin_lock_irq(&current->sighand->siglock);
2418 		old_set = current->blocked.sig[0];
2419 
2420 		error = 0;
2421 		switch (how) {
2422 		default:
2423 			error = -EINVAL;
2424 			break;
2425 		case SIG_BLOCK:
2426 			sigaddsetmask(&current->blocked, new_set);
2427 			break;
2428 		case SIG_UNBLOCK:
2429 			sigdelsetmask(&current->blocked, new_set);
2430 			break;
2431 		case SIG_SETMASK:
2432 			current->blocked.sig[0] = new_set;
2433 			break;
2434 		}
2435 
2436 		recalc_sigpending();
2437 		spin_unlock_irq(&current->sighand->siglock);
2438 		if (error)
2439 			goto out;
2440 		if (oset)
2441 			goto set_old;
2442 	} else if (oset) {
2443 		old_set = current->blocked.sig[0];
2444 	set_old:
2445 		error = -EFAULT;
2446 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2447 			goto out;
2448 	}
2449 	error = 0;
2450 out:
2451 	return error;
2452 }
2453 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2454 
2455 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2456 asmlinkage long
2457 sys_rt_sigaction(int sig,
2458 		 const struct sigaction __user *act,
2459 		 struct sigaction __user *oact,
2460 		 size_t sigsetsize)
2461 {
2462 	struct k_sigaction new_sa, old_sa;
2463 	int ret = -EINVAL;
2464 
2465 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2466 	if (sigsetsize != sizeof(sigset_t))
2467 		goto out;
2468 
2469 	if (act) {
2470 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2471 			return -EFAULT;
2472 	}
2473 
2474 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2475 
2476 	if (!ret && oact) {
2477 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2478 			return -EFAULT;
2479 	}
2480 out:
2481 	return ret;
2482 }
2483 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2484 
2485 #ifdef __ARCH_WANT_SYS_SGETMASK
2486 
2487 /*
2488  * For backwards compatibility.  Functionality superseded by sigprocmask.
2489  */
2490 asmlinkage long
2491 sys_sgetmask(void)
2492 {
2493 	/* SMP safe */
2494 	return current->blocked.sig[0];
2495 }
2496 
2497 asmlinkage long
2498 sys_ssetmask(int newmask)
2499 {
2500 	int old;
2501 
2502 	spin_lock_irq(&current->sighand->siglock);
2503 	old = current->blocked.sig[0];
2504 
2505 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2506 						  sigmask(SIGSTOP)));
2507 	recalc_sigpending();
2508 	spin_unlock_irq(&current->sighand->siglock);
2509 
2510 	return old;
2511 }
2512 #endif /* __ARCH_WANT_SGETMASK */
2513 
2514 #ifdef __ARCH_WANT_SYS_SIGNAL
2515 /*
2516  * For backwards compatibility.  Functionality superseded by sigaction.
2517  */
2518 asmlinkage unsigned long
2519 sys_signal(int sig, __sighandler_t handler)
2520 {
2521 	struct k_sigaction new_sa, old_sa;
2522 	int ret;
2523 
2524 	new_sa.sa.sa_handler = handler;
2525 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2526 	sigemptyset(&new_sa.sa.sa_mask);
2527 
2528 	ret = do_sigaction(sig, &new_sa, &old_sa);
2529 
2530 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2531 }
2532 #endif /* __ARCH_WANT_SYS_SIGNAL */
2533 
2534 #ifdef __ARCH_WANT_SYS_PAUSE
2535 
2536 asmlinkage long
2537 sys_pause(void)
2538 {
2539 	current->state = TASK_INTERRUPTIBLE;
2540 	schedule();
2541 	return -ERESTARTNOHAND;
2542 }
2543 
2544 #endif
2545 
2546 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2547 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2548 {
2549 	sigset_t newset;
2550 
2551 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2552 	if (sigsetsize != sizeof(sigset_t))
2553 		return -EINVAL;
2554 
2555 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2556 		return -EFAULT;
2557 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2558 
2559 	spin_lock_irq(&current->sighand->siglock);
2560 	current->saved_sigmask = current->blocked;
2561 	current->blocked = newset;
2562 	recalc_sigpending();
2563 	spin_unlock_irq(&current->sighand->siglock);
2564 
2565 	current->state = TASK_INTERRUPTIBLE;
2566 	schedule();
2567 	set_thread_flag(TIF_RESTORE_SIGMASK);
2568 	return -ERESTARTNOHAND;
2569 }
2570 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2571 
2572 void __init signals_init(void)
2573 {
2574 	sigqueue_cachep =
2575 		kmem_cache_create("sigqueue",
2576 				  sizeof(struct sigqueue),
2577 				  __alignof__(struct sigqueue),
2578 				  SLAB_PANIC, NULL, NULL);
2579 }
2580