xref: /linux/kernel/signal.c (revision 2624f124b3b5d550ab2fbef7ee3bc0e1fed09722)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/fs.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <linux/audit.h>
28 #include <asm/param.h>
29 #include <asm/uaccess.h>
30 #include <asm/unistd.h>
31 #include <asm/siginfo.h>
32 
33 /*
34  * SLAB caches for signal bits.
35  */
36 
37 static kmem_cache_t *sigqueue_cachep;
38 
39 /*
40  * In POSIX a signal is sent either to a specific thread (Linux task)
41  * or to the process as a whole (Linux thread group).  How the signal
42  * is sent determines whether it's to one thread or the whole group,
43  * which determines which signal mask(s) are involved in blocking it
44  * from being delivered until later.  When the signal is delivered,
45  * either it's caught or ignored by a user handler or it has a default
46  * effect that applies to the whole thread group (POSIX process).
47  *
48  * The possible effects an unblocked signal set to SIG_DFL can have are:
49  *   ignore	- Nothing Happens
50  *   terminate	- kill the process, i.e. all threads in the group,
51  * 		  similar to exit_group.  The group leader (only) reports
52  *		  WIFSIGNALED status to its parent.
53  *   coredump	- write a core dump file describing all threads using
54  *		  the same mm and then kill all those threads
55  *   stop 	- stop all the threads in the group, i.e. TASK_STOPPED state
56  *
57  * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
58  * Other signals when not blocked and set to SIG_DFL behaves as follows.
59  * The job control signals also have other special effects.
60  *
61  *	+--------------------+------------------+
62  *	|  POSIX signal      |  default action  |
63  *	+--------------------+------------------+
64  *	|  SIGHUP            |  terminate	|
65  *	|  SIGINT            |	terminate	|
66  *	|  SIGQUIT           |	coredump 	|
67  *	|  SIGILL            |	coredump 	|
68  *	|  SIGTRAP           |	coredump 	|
69  *	|  SIGABRT/SIGIOT    |	coredump 	|
70  *	|  SIGBUS            |	coredump 	|
71  *	|  SIGFPE            |	coredump 	|
72  *	|  SIGKILL           |	terminate(+)	|
73  *	|  SIGUSR1           |	terminate	|
74  *	|  SIGSEGV           |	coredump 	|
75  *	|  SIGUSR2           |	terminate	|
76  *	|  SIGPIPE           |	terminate	|
77  *	|  SIGALRM           |	terminate	|
78  *	|  SIGTERM           |	terminate	|
79  *	|  SIGCHLD           |	ignore   	|
80  *	|  SIGCONT           |	ignore(*)	|
81  *	|  SIGSTOP           |	stop(*)(+)  	|
82  *	|  SIGTSTP           |	stop(*)  	|
83  *	|  SIGTTIN           |	stop(*)  	|
84  *	|  SIGTTOU           |	stop(*)  	|
85  *	|  SIGURG            |	ignore   	|
86  *	|  SIGXCPU           |	coredump 	|
87  *	|  SIGXFSZ           |	coredump 	|
88  *	|  SIGVTALRM         |	terminate	|
89  *	|  SIGPROF           |	terminate	|
90  *	|  SIGPOLL/SIGIO     |	terminate	|
91  *	|  SIGSYS/SIGUNUSED  |	coredump 	|
92  *	|  SIGSTKFLT         |	terminate	|
93  *	|  SIGWINCH          |	ignore   	|
94  *	|  SIGPWR            |	terminate	|
95  *	|  SIGRTMIN-SIGRTMAX |	terminate       |
96  *	+--------------------+------------------+
97  *	|  non-POSIX signal  |  default action  |
98  *	+--------------------+------------------+
99  *	|  SIGEMT            |  coredump	|
100  *	+--------------------+------------------+
101  *
102  * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
103  * (*) Special job control effects:
104  * When SIGCONT is sent, it resumes the process (all threads in the group)
105  * from TASK_STOPPED state and also clears any pending/queued stop signals
106  * (any of those marked with "stop(*)").  This happens regardless of blocking,
107  * catching, or ignoring SIGCONT.  When any stop signal is sent, it clears
108  * any pending/queued SIGCONT signals; this happens regardless of blocking,
109  * catching, or ignored the stop signal, though (except for SIGSTOP) the
110  * default action of stopping the process may happen later or never.
111  */
112 
113 #ifdef SIGEMT
114 #define M_SIGEMT	M(SIGEMT)
115 #else
116 #define M_SIGEMT	0
117 #endif
118 
119 #if SIGRTMIN > BITS_PER_LONG
120 #define M(sig) (1ULL << ((sig)-1))
121 #else
122 #define M(sig) (1UL << ((sig)-1))
123 #endif
124 #define T(sig, mask) (M(sig) & (mask))
125 
126 #define SIG_KERNEL_ONLY_MASK (\
127 	M(SIGKILL)   |  M(SIGSTOP)                                   )
128 
129 #define SIG_KERNEL_STOP_MASK (\
130 	M(SIGSTOP)   |  M(SIGTSTP)   |  M(SIGTTIN)   |  M(SIGTTOU)   )
131 
132 #define SIG_KERNEL_COREDUMP_MASK (\
133         M(SIGQUIT)   |  M(SIGILL)    |  M(SIGTRAP)   |  M(SIGABRT)   | \
134         M(SIGFPE)    |  M(SIGSEGV)   |  M(SIGBUS)    |  M(SIGSYS)    | \
135         M(SIGXCPU)   |  M(SIGXFSZ)   |  M_SIGEMT                     )
136 
137 #define SIG_KERNEL_IGNORE_MASK (\
138         M(SIGCONT)   |  M(SIGCHLD)   |  M(SIGWINCH)  |  M(SIGURG)    )
139 
140 #define sig_kernel_only(sig) \
141 		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_ONLY_MASK))
142 #define sig_kernel_coredump(sig) \
143 		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_COREDUMP_MASK))
144 #define sig_kernel_ignore(sig) \
145 		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_IGNORE_MASK))
146 #define sig_kernel_stop(sig) \
147 		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))
148 
149 #define sig_user_defined(t, signr) \
150 	(((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) &&	\
151 	 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
152 
153 #define sig_fatal(t, signr) \
154 	(!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
155 	 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
156 
157 static int sig_ignored(struct task_struct *t, int sig)
158 {
159 	void __user * handler;
160 
161 	/*
162 	 * Tracers always want to know about signals..
163 	 */
164 	if (t->ptrace & PT_PTRACED)
165 		return 0;
166 
167 	/*
168 	 * Blocked signals are never ignored, since the
169 	 * signal handler may change by the time it is
170 	 * unblocked.
171 	 */
172 	if (sigismember(&t->blocked, sig))
173 		return 0;
174 
175 	/* Is it explicitly or implicitly ignored? */
176 	handler = t->sighand->action[sig-1].sa.sa_handler;
177 	return   handler == SIG_IGN ||
178 		(handler == SIG_DFL && sig_kernel_ignore(sig));
179 }
180 
181 /*
182  * Re-calculate pending state from the set of locally pending
183  * signals, globally pending signals, and blocked signals.
184  */
185 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
186 {
187 	unsigned long ready;
188 	long i;
189 
190 	switch (_NSIG_WORDS) {
191 	default:
192 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
193 			ready |= signal->sig[i] &~ blocked->sig[i];
194 		break;
195 
196 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
197 		ready |= signal->sig[2] &~ blocked->sig[2];
198 		ready |= signal->sig[1] &~ blocked->sig[1];
199 		ready |= signal->sig[0] &~ blocked->sig[0];
200 		break;
201 
202 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
203 		ready |= signal->sig[0] &~ blocked->sig[0];
204 		break;
205 
206 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
207 	}
208 	return ready !=	0;
209 }
210 
211 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
212 
213 fastcall void recalc_sigpending_tsk(struct task_struct *t)
214 {
215 	if (t->signal->group_stop_count > 0 ||
216 	    (freezing(t)) ||
217 	    PENDING(&t->pending, &t->blocked) ||
218 	    PENDING(&t->signal->shared_pending, &t->blocked))
219 		set_tsk_thread_flag(t, TIF_SIGPENDING);
220 	else
221 		clear_tsk_thread_flag(t, TIF_SIGPENDING);
222 }
223 
224 void recalc_sigpending(void)
225 {
226 	recalc_sigpending_tsk(current);
227 }
228 
229 /* Given the mask, find the first available signal that should be serviced. */
230 
231 static int
232 next_signal(struct sigpending *pending, sigset_t *mask)
233 {
234 	unsigned long i, *s, *m, x;
235 	int sig = 0;
236 
237 	s = pending->signal.sig;
238 	m = mask->sig;
239 	switch (_NSIG_WORDS) {
240 	default:
241 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
242 			if ((x = *s &~ *m) != 0) {
243 				sig = ffz(~x) + i*_NSIG_BPW + 1;
244 				break;
245 			}
246 		break;
247 
248 	case 2: if ((x = s[0] &~ m[0]) != 0)
249 			sig = 1;
250 		else if ((x = s[1] &~ m[1]) != 0)
251 			sig = _NSIG_BPW + 1;
252 		else
253 			break;
254 		sig += ffz(~x);
255 		break;
256 
257 	case 1: if ((x = *s &~ *m) != 0)
258 			sig = ffz(~x) + 1;
259 		break;
260 	}
261 
262 	return sig;
263 }
264 
265 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags,
266 					 int override_rlimit)
267 {
268 	struct sigqueue *q = NULL;
269 
270 	atomic_inc(&t->user->sigpending);
271 	if (override_rlimit ||
272 	    atomic_read(&t->user->sigpending) <=
273 			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
274 		q = kmem_cache_alloc(sigqueue_cachep, flags);
275 	if (unlikely(q == NULL)) {
276 		atomic_dec(&t->user->sigpending);
277 	} else {
278 		INIT_LIST_HEAD(&q->list);
279 		q->flags = 0;
280 		q->lock = NULL;
281 		q->user = get_uid(t->user);
282 	}
283 	return(q);
284 }
285 
286 static inline void __sigqueue_free(struct sigqueue *q)
287 {
288 	if (q->flags & SIGQUEUE_PREALLOC)
289 		return;
290 	atomic_dec(&q->user->sigpending);
291 	free_uid(q->user);
292 	kmem_cache_free(sigqueue_cachep, q);
293 }
294 
295 static void flush_sigqueue(struct sigpending *queue)
296 {
297 	struct sigqueue *q;
298 
299 	sigemptyset(&queue->signal);
300 	while (!list_empty(&queue->list)) {
301 		q = list_entry(queue->list.next, struct sigqueue , list);
302 		list_del_init(&q->list);
303 		__sigqueue_free(q);
304 	}
305 }
306 
307 /*
308  * Flush all pending signals for a task.
309  */
310 
311 void
312 flush_signals(struct task_struct *t)
313 {
314 	unsigned long flags;
315 
316 	spin_lock_irqsave(&t->sighand->siglock, flags);
317 	clear_tsk_thread_flag(t,TIF_SIGPENDING);
318 	flush_sigqueue(&t->pending);
319 	flush_sigqueue(&t->signal->shared_pending);
320 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
321 }
322 
323 /*
324  * This function expects the tasklist_lock write-locked.
325  */
326 void __exit_sighand(struct task_struct *tsk)
327 {
328 	struct sighand_struct * sighand = tsk->sighand;
329 
330 	/* Ok, we're done with the signal handlers */
331 	tsk->sighand = NULL;
332 	if (atomic_dec_and_test(&sighand->count))
333 		kmem_cache_free(sighand_cachep, sighand);
334 }
335 
336 void exit_sighand(struct task_struct *tsk)
337 {
338 	write_lock_irq(&tasklist_lock);
339 	__exit_sighand(tsk);
340 	write_unlock_irq(&tasklist_lock);
341 }
342 
343 /*
344  * This function expects the tasklist_lock write-locked.
345  */
346 void __exit_signal(struct task_struct *tsk)
347 {
348 	struct signal_struct * sig = tsk->signal;
349 	struct sighand_struct * sighand = tsk->sighand;
350 
351 	if (!sig)
352 		BUG();
353 	if (!atomic_read(&sig->count))
354 		BUG();
355 	spin_lock(&sighand->siglock);
356 	posix_cpu_timers_exit(tsk);
357 	if (atomic_dec_and_test(&sig->count)) {
358 		posix_cpu_timers_exit_group(tsk);
359 		if (tsk == sig->curr_target)
360 			sig->curr_target = next_thread(tsk);
361 		tsk->signal = NULL;
362 		spin_unlock(&sighand->siglock);
363 		flush_sigqueue(&sig->shared_pending);
364 	} else {
365 		/*
366 		 * If there is any task waiting for the group exit
367 		 * then notify it:
368 		 */
369 		if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
370 			wake_up_process(sig->group_exit_task);
371 			sig->group_exit_task = NULL;
372 		}
373 		if (tsk == sig->curr_target)
374 			sig->curr_target = next_thread(tsk);
375 		tsk->signal = NULL;
376 		/*
377 		 * Accumulate here the counters for all threads but the
378 		 * group leader as they die, so they can be added into
379 		 * the process-wide totals when those are taken.
380 		 * The group leader stays around as a zombie as long
381 		 * as there are other threads.  When it gets reaped,
382 		 * the exit.c code will add its counts into these totals.
383 		 * We won't ever get here for the group leader, since it
384 		 * will have been the last reference on the signal_struct.
385 		 */
386 		sig->utime = cputime_add(sig->utime, tsk->utime);
387 		sig->stime = cputime_add(sig->stime, tsk->stime);
388 		sig->min_flt += tsk->min_flt;
389 		sig->maj_flt += tsk->maj_flt;
390 		sig->nvcsw += tsk->nvcsw;
391 		sig->nivcsw += tsk->nivcsw;
392 		sig->sched_time += tsk->sched_time;
393 		spin_unlock(&sighand->siglock);
394 		sig = NULL;	/* Marker for below.  */
395 	}
396 	clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
397 	flush_sigqueue(&tsk->pending);
398 	if (sig) {
399 		/*
400 		 * We are cleaning up the signal_struct here.  We delayed
401 		 * calling exit_itimers until after flush_sigqueue, just in
402 		 * case our thread-local pending queue contained a queued
403 		 * timer signal that would have been cleared in
404 		 * exit_itimers.  When that called sigqueue_free, it would
405 		 * attempt to re-take the tasklist_lock and deadlock.  This
406 		 * can never happen if we ensure that all queues the
407 		 * timer's signal might be queued on have been flushed
408 		 * first.  The shared_pending queue, and our own pending
409 		 * queue are the only queues the timer could be on, since
410 		 * there are no other threads left in the group and timer
411 		 * signals are constrained to threads inside the group.
412 		 */
413 		exit_itimers(sig);
414 		exit_thread_group_keys(sig);
415 		kmem_cache_free(signal_cachep, sig);
416 	}
417 }
418 
419 void exit_signal(struct task_struct *tsk)
420 {
421 	write_lock_irq(&tasklist_lock);
422 	__exit_signal(tsk);
423 	write_unlock_irq(&tasklist_lock);
424 }
425 
426 /*
427  * Flush all handlers for a task.
428  */
429 
430 void
431 flush_signal_handlers(struct task_struct *t, int force_default)
432 {
433 	int i;
434 	struct k_sigaction *ka = &t->sighand->action[0];
435 	for (i = _NSIG ; i != 0 ; i--) {
436 		if (force_default || ka->sa.sa_handler != SIG_IGN)
437 			ka->sa.sa_handler = SIG_DFL;
438 		ka->sa.sa_flags = 0;
439 		sigemptyset(&ka->sa.sa_mask);
440 		ka++;
441 	}
442 }
443 
444 
445 /* Notify the system that a driver wants to block all signals for this
446  * process, and wants to be notified if any signals at all were to be
447  * sent/acted upon.  If the notifier routine returns non-zero, then the
448  * signal will be acted upon after all.  If the notifier routine returns 0,
449  * then then signal will be blocked.  Only one block per process is
450  * allowed.  priv is a pointer to private data that the notifier routine
451  * can use to determine if the signal should be blocked or not.  */
452 
453 void
454 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
455 {
456 	unsigned long flags;
457 
458 	spin_lock_irqsave(&current->sighand->siglock, flags);
459 	current->notifier_mask = mask;
460 	current->notifier_data = priv;
461 	current->notifier = notifier;
462 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
463 }
464 
465 /* Notify the system that blocking has ended. */
466 
467 void
468 unblock_all_signals(void)
469 {
470 	unsigned long flags;
471 
472 	spin_lock_irqsave(&current->sighand->siglock, flags);
473 	current->notifier = NULL;
474 	current->notifier_data = NULL;
475 	recalc_sigpending();
476 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
477 }
478 
479 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
480 {
481 	struct sigqueue *q, *first = NULL;
482 	int still_pending = 0;
483 
484 	if (unlikely(!sigismember(&list->signal, sig)))
485 		return 0;
486 
487 	/*
488 	 * Collect the siginfo appropriate to this signal.  Check if
489 	 * there is another siginfo for the same signal.
490 	*/
491 	list_for_each_entry(q, &list->list, list) {
492 		if (q->info.si_signo == sig) {
493 			if (first) {
494 				still_pending = 1;
495 				break;
496 			}
497 			first = q;
498 		}
499 	}
500 	if (first) {
501 		list_del_init(&first->list);
502 		copy_siginfo(info, &first->info);
503 		__sigqueue_free(first);
504 		if (!still_pending)
505 			sigdelset(&list->signal, sig);
506 	} else {
507 
508 		/* Ok, it wasn't in the queue.  This must be
509 		   a fast-pathed signal or we must have been
510 		   out of queue space.  So zero out the info.
511 		 */
512 		sigdelset(&list->signal, sig);
513 		info->si_signo = sig;
514 		info->si_errno = 0;
515 		info->si_code = 0;
516 		info->si_pid = 0;
517 		info->si_uid = 0;
518 	}
519 	return 1;
520 }
521 
522 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
523 			siginfo_t *info)
524 {
525 	int sig = 0;
526 
527 	/* SIGKILL must have priority, otherwise it is quite easy
528 	 * to create an unkillable process, sending sig < SIGKILL
529 	 * to self */
530 	if (unlikely(sigismember(&pending->signal, SIGKILL))) {
531 		if (!sigismember(mask, SIGKILL))
532 			sig = SIGKILL;
533 	}
534 
535 	if (likely(!sig))
536 		sig = next_signal(pending, mask);
537 	if (sig) {
538 		if (current->notifier) {
539 			if (sigismember(current->notifier_mask, sig)) {
540 				if (!(current->notifier)(current->notifier_data)) {
541 					clear_thread_flag(TIF_SIGPENDING);
542 					return 0;
543 				}
544 			}
545 		}
546 
547 		if (!collect_signal(sig, pending, info))
548 			sig = 0;
549 
550 	}
551 	recalc_sigpending();
552 
553 	return sig;
554 }
555 
556 /*
557  * Dequeue a signal and return the element to the caller, which is
558  * expected to free it.
559  *
560  * All callers have to hold the siglock.
561  */
562 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
563 {
564 	int signr = __dequeue_signal(&tsk->pending, mask, info);
565 	if (!signr)
566 		signr = __dequeue_signal(&tsk->signal->shared_pending,
567 					 mask, info);
568  	if (signr && unlikely(sig_kernel_stop(signr))) {
569  		/*
570  		 * Set a marker that we have dequeued a stop signal.  Our
571  		 * caller might release the siglock and then the pending
572  		 * stop signal it is about to process is no longer in the
573  		 * pending bitmasks, but must still be cleared by a SIGCONT
574  		 * (and overruled by a SIGKILL).  So those cases clear this
575  		 * shared flag after we've set it.  Note that this flag may
576  		 * remain set after the signal we return is ignored or
577  		 * handled.  That doesn't matter because its only purpose
578  		 * is to alert stop-signal processing code when another
579  		 * processor has come along and cleared the flag.
580  		 */
581  		tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
582  	}
583 	if ( signr &&
584 	     ((info->si_code & __SI_MASK) == __SI_TIMER) &&
585 	     info->si_sys_private){
586 		/*
587 		 * Release the siglock to ensure proper locking order
588 		 * of timer locks outside of siglocks.  Note, we leave
589 		 * irqs disabled here, since the posix-timers code is
590 		 * about to disable them again anyway.
591 		 */
592 		spin_unlock(&tsk->sighand->siglock);
593 		do_schedule_next_timer(info);
594 		spin_lock(&tsk->sighand->siglock);
595 	}
596 	return signr;
597 }
598 
599 /*
600  * Tell a process that it has a new active signal..
601  *
602  * NOTE! we rely on the previous spin_lock to
603  * lock interrupts for us! We can only be called with
604  * "siglock" held, and the local interrupt must
605  * have been disabled when that got acquired!
606  *
607  * No need to set need_resched since signal event passing
608  * goes through ->blocked
609  */
610 void signal_wake_up(struct task_struct *t, int resume)
611 {
612 	unsigned int mask;
613 
614 	set_tsk_thread_flag(t, TIF_SIGPENDING);
615 
616 	/*
617 	 * For SIGKILL, we want to wake it up in the stopped/traced case.
618 	 * We don't check t->state here because there is a race with it
619 	 * executing another processor and just now entering stopped state.
620 	 * By using wake_up_state, we ensure the process will wake up and
621 	 * handle its death signal.
622 	 */
623 	mask = TASK_INTERRUPTIBLE;
624 	if (resume)
625 		mask |= TASK_STOPPED | TASK_TRACED;
626 	if (!wake_up_state(t, mask))
627 		kick_process(t);
628 }
629 
630 /*
631  * Remove signals in mask from the pending set and queue.
632  * Returns 1 if any signals were found.
633  *
634  * All callers must be holding the siglock.
635  */
636 static int rm_from_queue(unsigned long mask, struct sigpending *s)
637 {
638 	struct sigqueue *q, *n;
639 
640 	if (!sigtestsetmask(&s->signal, mask))
641 		return 0;
642 
643 	sigdelsetmask(&s->signal, mask);
644 	list_for_each_entry_safe(q, n, &s->list, list) {
645 		if (q->info.si_signo < SIGRTMIN &&
646 		    (mask & sigmask(q->info.si_signo))) {
647 			list_del_init(&q->list);
648 			__sigqueue_free(q);
649 		}
650 	}
651 	return 1;
652 }
653 
654 /*
655  * Bad permissions for sending the signal
656  */
657 static int check_kill_permission(int sig, struct siginfo *info,
658 				 struct task_struct *t)
659 {
660 	int error = -EINVAL;
661 	if (!valid_signal(sig))
662 		return error;
663 	error = -EPERM;
664 	if ((!info || ((unsigned long)info != 1 &&
665 			(unsigned long)info != 2 && SI_FROMUSER(info)))
666 	    && ((sig != SIGCONT) ||
667 		(current->signal->session != t->signal->session))
668 	    && (current->euid ^ t->suid) && (current->euid ^ t->uid)
669 	    && (current->uid ^ t->suid) && (current->uid ^ t->uid)
670 	    && !capable(CAP_KILL))
671 		return error;
672 
673 	error = security_task_kill(t, info, sig);
674 	if (!error)
675 		audit_signal_info(sig, t); /* Let audit system see the signal */
676 	return error;
677 }
678 
679 /* forward decl */
680 static void do_notify_parent_cldstop(struct task_struct *tsk,
681 				     int to_self,
682 				     int why);
683 
684 /*
685  * Handle magic process-wide effects of stop/continue signals.
686  * Unlike the signal actions, these happen immediately at signal-generation
687  * time regardless of blocking, ignoring, or handling.  This does the
688  * actual continuing for SIGCONT, but not the actual stopping for stop
689  * signals.  The process stop is done as a signal action for SIG_DFL.
690  */
691 static void handle_stop_signal(int sig, struct task_struct *p)
692 {
693 	struct task_struct *t;
694 
695 	if (p->signal->flags & SIGNAL_GROUP_EXIT)
696 		/*
697 		 * The process is in the middle of dying already.
698 		 */
699 		return;
700 
701 	if (sig_kernel_stop(sig)) {
702 		/*
703 		 * This is a stop signal.  Remove SIGCONT from all queues.
704 		 */
705 		rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
706 		t = p;
707 		do {
708 			rm_from_queue(sigmask(SIGCONT), &t->pending);
709 			t = next_thread(t);
710 		} while (t != p);
711 	} else if (sig == SIGCONT) {
712 		/*
713 		 * Remove all stop signals from all queues,
714 		 * and wake all threads.
715 		 */
716 		if (unlikely(p->signal->group_stop_count > 0)) {
717 			/*
718 			 * There was a group stop in progress.  We'll
719 			 * pretend it finished before we got here.  We are
720 			 * obliged to report it to the parent: if the
721 			 * SIGSTOP happened "after" this SIGCONT, then it
722 			 * would have cleared this pending SIGCONT.  If it
723 			 * happened "before" this SIGCONT, then the parent
724 			 * got the SIGCHLD about the stop finishing before
725 			 * the continue happened.  We do the notification
726 			 * now, and it's as if the stop had finished and
727 			 * the SIGCHLD was pending on entry to this kill.
728 			 */
729 			p->signal->group_stop_count = 0;
730 			p->signal->flags = SIGNAL_STOP_CONTINUED;
731 			spin_unlock(&p->sighand->siglock);
732 			do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
733 			spin_lock(&p->sighand->siglock);
734 		}
735 		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
736 		t = p;
737 		do {
738 			unsigned int state;
739 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
740 
741 			/*
742 			 * If there is a handler for SIGCONT, we must make
743 			 * sure that no thread returns to user mode before
744 			 * we post the signal, in case it was the only
745 			 * thread eligible to run the signal handler--then
746 			 * it must not do anything between resuming and
747 			 * running the handler.  With the TIF_SIGPENDING
748 			 * flag set, the thread will pause and acquire the
749 			 * siglock that we hold now and until we've queued
750 			 * the pending signal.
751 			 *
752 			 * Wake up the stopped thread _after_ setting
753 			 * TIF_SIGPENDING
754 			 */
755 			state = TASK_STOPPED;
756 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
757 				set_tsk_thread_flag(t, TIF_SIGPENDING);
758 				state |= TASK_INTERRUPTIBLE;
759 			}
760 			wake_up_state(t, state);
761 
762 			t = next_thread(t);
763 		} while (t != p);
764 
765 		if (p->signal->flags & SIGNAL_STOP_STOPPED) {
766 			/*
767 			 * We were in fact stopped, and are now continued.
768 			 * Notify the parent with CLD_CONTINUED.
769 			 */
770 			p->signal->flags = SIGNAL_STOP_CONTINUED;
771 			p->signal->group_exit_code = 0;
772 			spin_unlock(&p->sighand->siglock);
773 			do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
774 			spin_lock(&p->sighand->siglock);
775 		} else {
776 			/*
777 			 * We are not stopped, but there could be a stop
778 			 * signal in the middle of being processed after
779 			 * being removed from the queue.  Clear that too.
780 			 */
781 			p->signal->flags = 0;
782 		}
783 	} else if (sig == SIGKILL) {
784 		/*
785 		 * Make sure that any pending stop signal already dequeued
786 		 * is undone by the wakeup for SIGKILL.
787 		 */
788 		p->signal->flags = 0;
789 	}
790 }
791 
792 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
793 			struct sigpending *signals)
794 {
795 	struct sigqueue * q = NULL;
796 	int ret = 0;
797 
798 	/*
799 	 * fast-pathed signals for kernel-internal things like SIGSTOP
800 	 * or SIGKILL.
801 	 */
802 	if ((unsigned long)info == 2)
803 		goto out_set;
804 
805 	/* Real-time signals must be queued if sent by sigqueue, or
806 	   some other real-time mechanism.  It is implementation
807 	   defined whether kill() does so.  We attempt to do so, on
808 	   the principle of least surprise, but since kill is not
809 	   allowed to fail with EAGAIN when low on memory we just
810 	   make sure at least one signal gets delivered and don't
811 	   pass on the info struct.  */
812 
813 	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
814 					     ((unsigned long) info < 2 ||
815 					      info->si_code >= 0)));
816 	if (q) {
817 		list_add_tail(&q->list, &signals->list);
818 		switch ((unsigned long) info) {
819 		case 0:
820 			q->info.si_signo = sig;
821 			q->info.si_errno = 0;
822 			q->info.si_code = SI_USER;
823 			q->info.si_pid = current->pid;
824 			q->info.si_uid = current->uid;
825 			break;
826 		case 1:
827 			q->info.si_signo = sig;
828 			q->info.si_errno = 0;
829 			q->info.si_code = SI_KERNEL;
830 			q->info.si_pid = 0;
831 			q->info.si_uid = 0;
832 			break;
833 		default:
834 			copy_siginfo(&q->info, info);
835 			break;
836 		}
837 	} else {
838 		if (sig >= SIGRTMIN && info && (unsigned long)info != 1
839 		   && info->si_code != SI_USER)
840 		/*
841 		 * Queue overflow, abort.  We may abort if the signal was rt
842 		 * and sent by user using something other than kill().
843 		 */
844 			return -EAGAIN;
845 		if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
846 			/*
847 			 * Set up a return to indicate that we dropped
848 			 * the signal.
849 			 */
850 			ret = info->si_sys_private;
851 	}
852 
853 out_set:
854 	sigaddset(&signals->signal, sig);
855 	return ret;
856 }
857 
858 #define LEGACY_QUEUE(sigptr, sig) \
859 	(((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
860 
861 
862 static int
863 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
864 {
865 	int ret = 0;
866 
867 	if (!irqs_disabled())
868 		BUG();
869 	assert_spin_locked(&t->sighand->siglock);
870 
871 	if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
872 		/*
873 		 * Set up a return to indicate that we dropped the signal.
874 		 */
875 		ret = info->si_sys_private;
876 
877 	/* Short-circuit ignored signals.  */
878 	if (sig_ignored(t, sig))
879 		goto out;
880 
881 	/* Support queueing exactly one non-rt signal, so that we
882 	   can get more detailed information about the cause of
883 	   the signal. */
884 	if (LEGACY_QUEUE(&t->pending, sig))
885 		goto out;
886 
887 	ret = send_signal(sig, info, t, &t->pending);
888 	if (!ret && !sigismember(&t->blocked, sig))
889 		signal_wake_up(t, sig == SIGKILL);
890 out:
891 	return ret;
892 }
893 
894 /*
895  * Force a signal that the process can't ignore: if necessary
896  * we unblock the signal and change any SIG_IGN to SIG_DFL.
897  */
898 
899 int
900 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
901 {
902 	unsigned long int flags;
903 	int ret;
904 
905 	spin_lock_irqsave(&t->sighand->siglock, flags);
906 	if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
907 		t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
908 		sigdelset(&t->blocked, sig);
909 		recalc_sigpending_tsk(t);
910 	}
911 	ret = specific_send_sig_info(sig, info, t);
912 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
913 
914 	return ret;
915 }
916 
917 void
918 force_sig_specific(int sig, struct task_struct *t)
919 {
920 	unsigned long int flags;
921 
922 	spin_lock_irqsave(&t->sighand->siglock, flags);
923 	if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
924 		t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
925 	sigdelset(&t->blocked, sig);
926 	recalc_sigpending_tsk(t);
927 	specific_send_sig_info(sig, (void *)2, t);
928 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
929 }
930 
931 /*
932  * Test if P wants to take SIG.  After we've checked all threads with this,
933  * it's equivalent to finding no threads not blocking SIG.  Any threads not
934  * blocking SIG were ruled out because they are not running and already
935  * have pending signals.  Such threads will dequeue from the shared queue
936  * as soon as they're available, so putting the signal on the shared queue
937  * will be equivalent to sending it to one such thread.
938  */
939 #define wants_signal(sig, p, mask) 			\
940 	(!sigismember(&(p)->blocked, sig)		\
941 	 && !((p)->state & mask)			\
942 	 && !((p)->flags & PF_EXITING)			\
943 	 && (task_curr(p) || !signal_pending(p)))
944 
945 
946 static void
947 __group_complete_signal(int sig, struct task_struct *p)
948 {
949 	unsigned int mask;
950 	struct task_struct *t;
951 
952 	/*
953 	 * Don't bother traced and stopped tasks (but
954 	 * SIGKILL will punch through that).
955 	 */
956 	mask = TASK_STOPPED | TASK_TRACED;
957 	if (sig == SIGKILL)
958 		mask = 0;
959 
960 	/*
961 	 * Now find a thread we can wake up to take the signal off the queue.
962 	 *
963 	 * If the main thread wants the signal, it gets first crack.
964 	 * Probably the least surprising to the average bear.
965 	 */
966 	if (wants_signal(sig, p, mask))
967 		t = p;
968 	else if (thread_group_empty(p))
969 		/*
970 		 * There is just one thread and it does not need to be woken.
971 		 * It will dequeue unblocked signals before it runs again.
972 		 */
973 		return;
974 	else {
975 		/*
976 		 * Otherwise try to find a suitable thread.
977 		 */
978 		t = p->signal->curr_target;
979 		if (t == NULL)
980 			/* restart balancing at this thread */
981 			t = p->signal->curr_target = p;
982 		BUG_ON(t->tgid != p->tgid);
983 
984 		while (!wants_signal(sig, t, mask)) {
985 			t = next_thread(t);
986 			if (t == p->signal->curr_target)
987 				/*
988 				 * No thread needs to be woken.
989 				 * Any eligible threads will see
990 				 * the signal in the queue soon.
991 				 */
992 				return;
993 		}
994 		p->signal->curr_target = t;
995 	}
996 
997 	/*
998 	 * Found a killable thread.  If the signal will be fatal,
999 	 * then start taking the whole group down immediately.
1000 	 */
1001 	if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
1002 	    !sigismember(&t->real_blocked, sig) &&
1003 	    (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
1004 		/*
1005 		 * This signal will be fatal to the whole group.
1006 		 */
1007 		if (!sig_kernel_coredump(sig)) {
1008 			/*
1009 			 * Start a group exit and wake everybody up.
1010 			 * This way we don't have other threads
1011 			 * running and doing things after a slower
1012 			 * thread has the fatal signal pending.
1013 			 */
1014 			p->signal->flags = SIGNAL_GROUP_EXIT;
1015 			p->signal->group_exit_code = sig;
1016 			p->signal->group_stop_count = 0;
1017 			t = p;
1018 			do {
1019 				sigaddset(&t->pending.signal, SIGKILL);
1020 				signal_wake_up(t, 1);
1021 				t = next_thread(t);
1022 			} while (t != p);
1023 			return;
1024 		}
1025 
1026 		/*
1027 		 * There will be a core dump.  We make all threads other
1028 		 * than the chosen one go into a group stop so that nothing
1029 		 * happens until it gets scheduled, takes the signal off
1030 		 * the shared queue, and does the core dump.  This is a
1031 		 * little more complicated than strictly necessary, but it
1032 		 * keeps the signal state that winds up in the core dump
1033 		 * unchanged from the death state, e.g. which thread had
1034 		 * the core-dump signal unblocked.
1035 		 */
1036 		rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1037 		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1038 		p->signal->group_stop_count = 0;
1039 		p->signal->group_exit_task = t;
1040 		t = p;
1041 		do {
1042 			p->signal->group_stop_count++;
1043 			signal_wake_up(t, 0);
1044 			t = next_thread(t);
1045 		} while (t != p);
1046 		wake_up_process(p->signal->group_exit_task);
1047 		return;
1048 	}
1049 
1050 	/*
1051 	 * The signal is already in the shared-pending queue.
1052 	 * Tell the chosen thread to wake up and dequeue it.
1053 	 */
1054 	signal_wake_up(t, sig == SIGKILL);
1055 	return;
1056 }
1057 
1058 int
1059 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1060 {
1061 	int ret = 0;
1062 
1063 	assert_spin_locked(&p->sighand->siglock);
1064 	handle_stop_signal(sig, p);
1065 
1066 	if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
1067 		/*
1068 		 * Set up a return to indicate that we dropped the signal.
1069 		 */
1070 		ret = info->si_sys_private;
1071 
1072 	/* Short-circuit ignored signals.  */
1073 	if (sig_ignored(p, sig))
1074 		return ret;
1075 
1076 	if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1077 		/* This is a non-RT signal and we already have one queued.  */
1078 		return ret;
1079 
1080 	/*
1081 	 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1082 	 * We always use the shared queue for process-wide signals,
1083 	 * to avoid several races.
1084 	 */
1085 	ret = send_signal(sig, info, p, &p->signal->shared_pending);
1086 	if (unlikely(ret))
1087 		return ret;
1088 
1089 	__group_complete_signal(sig, p);
1090 	return 0;
1091 }
1092 
1093 /*
1094  * Nuke all other threads in the group.
1095  */
1096 void zap_other_threads(struct task_struct *p)
1097 {
1098 	struct task_struct *t;
1099 
1100 	p->signal->flags = SIGNAL_GROUP_EXIT;
1101 	p->signal->group_stop_count = 0;
1102 
1103 	if (thread_group_empty(p))
1104 		return;
1105 
1106 	for (t = next_thread(p); t != p; t = next_thread(t)) {
1107 		/*
1108 		 * Don't bother with already dead threads
1109 		 */
1110 		if (t->exit_state)
1111 			continue;
1112 
1113 		/*
1114 		 * We don't want to notify the parent, since we are
1115 		 * killed as part of a thread group due to another
1116 		 * thread doing an execve() or similar. So set the
1117 		 * exit signal to -1 to allow immediate reaping of
1118 		 * the process.  But don't detach the thread group
1119 		 * leader.
1120 		 */
1121 		if (t != p->group_leader)
1122 			t->exit_signal = -1;
1123 
1124 		sigaddset(&t->pending.signal, SIGKILL);
1125 		rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1126 		signal_wake_up(t, 1);
1127 	}
1128 }
1129 
1130 /*
1131  * Must be called with the tasklist_lock held for reading!
1132  */
1133 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1134 {
1135 	unsigned long flags;
1136 	int ret;
1137 
1138 	ret = check_kill_permission(sig, info, p);
1139 	if (!ret && sig && p->sighand) {
1140 		spin_lock_irqsave(&p->sighand->siglock, flags);
1141 		ret = __group_send_sig_info(sig, info, p);
1142 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1143 	}
1144 
1145 	return ret;
1146 }
1147 
1148 /*
1149  * kill_pg_info() sends a signal to a process group: this is what the tty
1150  * control characters do (^C, ^Z etc)
1151  */
1152 
1153 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1154 {
1155 	struct task_struct *p = NULL;
1156 	int retval, success;
1157 
1158 	if (pgrp <= 0)
1159 		return -EINVAL;
1160 
1161 	success = 0;
1162 	retval = -ESRCH;
1163 	do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1164 		int err = group_send_sig_info(sig, info, p);
1165 		success |= !err;
1166 		retval = err;
1167 	} while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1168 	return success ? 0 : retval;
1169 }
1170 
1171 int
1172 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1173 {
1174 	int retval;
1175 
1176 	read_lock(&tasklist_lock);
1177 	retval = __kill_pg_info(sig, info, pgrp);
1178 	read_unlock(&tasklist_lock);
1179 
1180 	return retval;
1181 }
1182 
1183 int
1184 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1185 {
1186 	int error;
1187 	struct task_struct *p;
1188 
1189 	read_lock(&tasklist_lock);
1190 	p = find_task_by_pid(pid);
1191 	error = -ESRCH;
1192 	if (p)
1193 		error = group_send_sig_info(sig, info, p);
1194 	read_unlock(&tasklist_lock);
1195 	return error;
1196 }
1197 
1198 
1199 /*
1200  * kill_something_info() interprets pid in interesting ways just like kill(2).
1201  *
1202  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1203  * is probably wrong.  Should make it like BSD or SYSV.
1204  */
1205 
1206 static int kill_something_info(int sig, struct siginfo *info, int pid)
1207 {
1208 	if (!pid) {
1209 		return kill_pg_info(sig, info, process_group(current));
1210 	} else if (pid == -1) {
1211 		int retval = 0, count = 0;
1212 		struct task_struct * p;
1213 
1214 		read_lock(&tasklist_lock);
1215 		for_each_process(p) {
1216 			if (p->pid > 1 && p->tgid != current->tgid) {
1217 				int err = group_send_sig_info(sig, info, p);
1218 				++count;
1219 				if (err != -EPERM)
1220 					retval = err;
1221 			}
1222 		}
1223 		read_unlock(&tasklist_lock);
1224 		return count ? retval : -ESRCH;
1225 	} else if (pid < 0) {
1226 		return kill_pg_info(sig, info, -pid);
1227 	} else {
1228 		return kill_proc_info(sig, info, pid);
1229 	}
1230 }
1231 
1232 /*
1233  * These are for backward compatibility with the rest of the kernel source.
1234  */
1235 
1236 /*
1237  * These two are the most common entry points.  They send a signal
1238  * just to the specific thread.
1239  */
1240 int
1241 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1242 {
1243 	int ret;
1244 	unsigned long flags;
1245 
1246 	/*
1247 	 * Make sure legacy kernel users don't send in bad values
1248 	 * (normal paths check this in check_kill_permission).
1249 	 */
1250 	if (!valid_signal(sig))
1251 		return -EINVAL;
1252 
1253 	/*
1254 	 * We need the tasklist lock even for the specific
1255 	 * thread case (when we don't need to follow the group
1256 	 * lists) in order to avoid races with "p->sighand"
1257 	 * going away or changing from under us.
1258 	 */
1259 	read_lock(&tasklist_lock);
1260 	spin_lock_irqsave(&p->sighand->siglock, flags);
1261 	ret = specific_send_sig_info(sig, info, p);
1262 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1263 	read_unlock(&tasklist_lock);
1264 	return ret;
1265 }
1266 
1267 int
1268 send_sig(int sig, struct task_struct *p, int priv)
1269 {
1270 	return send_sig_info(sig, (void*)(long)(priv != 0), p);
1271 }
1272 
1273 /*
1274  * This is the entry point for "process-wide" signals.
1275  * They will go to an appropriate thread in the thread group.
1276  */
1277 int
1278 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1279 {
1280 	int ret;
1281 	read_lock(&tasklist_lock);
1282 	ret = group_send_sig_info(sig, info, p);
1283 	read_unlock(&tasklist_lock);
1284 	return ret;
1285 }
1286 
1287 void
1288 force_sig(int sig, struct task_struct *p)
1289 {
1290 	force_sig_info(sig, (void*)1L, p);
1291 }
1292 
1293 /*
1294  * When things go south during signal handling, we
1295  * will force a SIGSEGV. And if the signal that caused
1296  * the problem was already a SIGSEGV, we'll want to
1297  * make sure we don't even try to deliver the signal..
1298  */
1299 int
1300 force_sigsegv(int sig, struct task_struct *p)
1301 {
1302 	if (sig == SIGSEGV) {
1303 		unsigned long flags;
1304 		spin_lock_irqsave(&p->sighand->siglock, flags);
1305 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1306 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1307 	}
1308 	force_sig(SIGSEGV, p);
1309 	return 0;
1310 }
1311 
1312 int
1313 kill_pg(pid_t pgrp, int sig, int priv)
1314 {
1315 	return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1316 }
1317 
1318 int
1319 kill_proc(pid_t pid, int sig, int priv)
1320 {
1321 	return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1322 }
1323 
1324 /*
1325  * These functions support sending signals using preallocated sigqueue
1326  * structures.  This is needed "because realtime applications cannot
1327  * afford to lose notifications of asynchronous events, like timer
1328  * expirations or I/O completions".  In the case of Posix Timers
1329  * we allocate the sigqueue structure from the timer_create.  If this
1330  * allocation fails we are able to report the failure to the application
1331  * with an EAGAIN error.
1332  */
1333 
1334 struct sigqueue *sigqueue_alloc(void)
1335 {
1336 	struct sigqueue *q;
1337 
1338 	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1339 		q->flags |= SIGQUEUE_PREALLOC;
1340 	return(q);
1341 }
1342 
1343 void sigqueue_free(struct sigqueue *q)
1344 {
1345 	unsigned long flags;
1346 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1347 	/*
1348 	 * If the signal is still pending remove it from the
1349 	 * pending queue.
1350 	 */
1351 	if (unlikely(!list_empty(&q->list))) {
1352 		read_lock(&tasklist_lock);
1353 		spin_lock_irqsave(q->lock, flags);
1354 		if (!list_empty(&q->list))
1355 			list_del_init(&q->list);
1356 		spin_unlock_irqrestore(q->lock, flags);
1357 		read_unlock(&tasklist_lock);
1358 	}
1359 	q->flags &= ~SIGQUEUE_PREALLOC;
1360 	__sigqueue_free(q);
1361 }
1362 
1363 int
1364 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1365 {
1366 	unsigned long flags;
1367 	int ret = 0;
1368 
1369 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1370 	read_lock(&tasklist_lock);
1371 
1372 	if (unlikely(p->flags & PF_EXITING)) {
1373 		ret = -1;
1374 		goto out_err;
1375 	}
1376 
1377 	spin_lock_irqsave(&p->sighand->siglock, flags);
1378 
1379 	if (unlikely(!list_empty(&q->list))) {
1380 		/*
1381 		 * If an SI_TIMER entry is already queue just increment
1382 		 * the overrun count.
1383 		 */
1384 		if (q->info.si_code != SI_TIMER)
1385 			BUG();
1386 		q->info.si_overrun++;
1387 		goto out;
1388 	}
1389 	/* Short-circuit ignored signals.  */
1390 	if (sig_ignored(p, sig)) {
1391 		ret = 1;
1392 		goto out;
1393 	}
1394 
1395 	q->lock = &p->sighand->siglock;
1396 	list_add_tail(&q->list, &p->pending.list);
1397 	sigaddset(&p->pending.signal, sig);
1398 	if (!sigismember(&p->blocked, sig))
1399 		signal_wake_up(p, sig == SIGKILL);
1400 
1401 out:
1402 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1403 out_err:
1404 	read_unlock(&tasklist_lock);
1405 
1406 	return ret;
1407 }
1408 
1409 int
1410 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1411 {
1412 	unsigned long flags;
1413 	int ret = 0;
1414 
1415 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1416 	read_lock(&tasklist_lock);
1417 	spin_lock_irqsave(&p->sighand->siglock, flags);
1418 	handle_stop_signal(sig, p);
1419 
1420 	/* Short-circuit ignored signals.  */
1421 	if (sig_ignored(p, sig)) {
1422 		ret = 1;
1423 		goto out;
1424 	}
1425 
1426 	if (unlikely(!list_empty(&q->list))) {
1427 		/*
1428 		 * If an SI_TIMER entry is already queue just increment
1429 		 * the overrun count.  Other uses should not try to
1430 		 * send the signal multiple times.
1431 		 */
1432 		if (q->info.si_code != SI_TIMER)
1433 			BUG();
1434 		q->info.si_overrun++;
1435 		goto out;
1436 	}
1437 
1438 	/*
1439 	 * Put this signal on the shared-pending queue.
1440 	 * We always use the shared queue for process-wide signals,
1441 	 * to avoid several races.
1442 	 */
1443 	q->lock = &p->sighand->siglock;
1444 	list_add_tail(&q->list, &p->signal->shared_pending.list);
1445 	sigaddset(&p->signal->shared_pending.signal, sig);
1446 
1447 	__group_complete_signal(sig, p);
1448 out:
1449 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1450 	read_unlock(&tasklist_lock);
1451 	return(ret);
1452 }
1453 
1454 /*
1455  * Wake up any threads in the parent blocked in wait* syscalls.
1456  */
1457 static inline void __wake_up_parent(struct task_struct *p,
1458 				    struct task_struct *parent)
1459 {
1460 	wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1461 }
1462 
1463 /*
1464  * Let a parent know about the death of a child.
1465  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1466  */
1467 
1468 void do_notify_parent(struct task_struct *tsk, int sig)
1469 {
1470 	struct siginfo info;
1471 	unsigned long flags;
1472 	struct sighand_struct *psig;
1473 
1474 	BUG_ON(sig == -1);
1475 
1476  	/* do_notify_parent_cldstop should have been called instead.  */
1477  	BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1478 
1479 	BUG_ON(!tsk->ptrace &&
1480 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1481 
1482 	info.si_signo = sig;
1483 	info.si_errno = 0;
1484 	info.si_pid = tsk->pid;
1485 	info.si_uid = tsk->uid;
1486 
1487 	/* FIXME: find out whether or not this is supposed to be c*time. */
1488 	info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1489 						       tsk->signal->utime));
1490 	info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1491 						       tsk->signal->stime));
1492 
1493 	info.si_status = tsk->exit_code & 0x7f;
1494 	if (tsk->exit_code & 0x80)
1495 		info.si_code = CLD_DUMPED;
1496 	else if (tsk->exit_code & 0x7f)
1497 		info.si_code = CLD_KILLED;
1498 	else {
1499 		info.si_code = CLD_EXITED;
1500 		info.si_status = tsk->exit_code >> 8;
1501 	}
1502 
1503 	psig = tsk->parent->sighand;
1504 	spin_lock_irqsave(&psig->siglock, flags);
1505 	if (sig == SIGCHLD &&
1506 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1507 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1508 		/*
1509 		 * We are exiting and our parent doesn't care.  POSIX.1
1510 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1511 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1512 		 * automatically and not left for our parent's wait4 call.
1513 		 * Rather than having the parent do it as a magic kind of
1514 		 * signal handler, we just set this to tell do_exit that we
1515 		 * can be cleaned up without becoming a zombie.  Note that
1516 		 * we still call __wake_up_parent in this case, because a
1517 		 * blocked sys_wait4 might now return -ECHILD.
1518 		 *
1519 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1520 		 * is implementation-defined: we do (if you don't want
1521 		 * it, just use SIG_IGN instead).
1522 		 */
1523 		tsk->exit_signal = -1;
1524 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1525 			sig = 0;
1526 	}
1527 	if (valid_signal(sig) && sig > 0)
1528 		__group_send_sig_info(sig, &info, tsk->parent);
1529 	__wake_up_parent(tsk, tsk->parent);
1530 	spin_unlock_irqrestore(&psig->siglock, flags);
1531 }
1532 
1533 static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1534 {
1535 	struct siginfo info;
1536 	unsigned long flags;
1537 	struct task_struct *parent;
1538 	struct sighand_struct *sighand;
1539 
1540 	if (to_self)
1541 		parent = tsk->parent;
1542 	else {
1543 		tsk = tsk->group_leader;
1544 		parent = tsk->real_parent;
1545 	}
1546 
1547 	info.si_signo = SIGCHLD;
1548 	info.si_errno = 0;
1549 	info.si_pid = tsk->pid;
1550 	info.si_uid = tsk->uid;
1551 
1552 	/* FIXME: find out whether or not this is supposed to be c*time. */
1553 	info.si_utime = cputime_to_jiffies(tsk->utime);
1554 	info.si_stime = cputime_to_jiffies(tsk->stime);
1555 
1556  	info.si_code = why;
1557  	switch (why) {
1558  	case CLD_CONTINUED:
1559  		info.si_status = SIGCONT;
1560  		break;
1561  	case CLD_STOPPED:
1562  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1563  		break;
1564  	case CLD_TRAPPED:
1565  		info.si_status = tsk->exit_code & 0x7f;
1566  		break;
1567  	default:
1568  		BUG();
1569  	}
1570 
1571 	sighand = parent->sighand;
1572 	spin_lock_irqsave(&sighand->siglock, flags);
1573 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1574 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1575 		__group_send_sig_info(SIGCHLD, &info, parent);
1576 	/*
1577 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1578 	 */
1579 	__wake_up_parent(tsk, parent);
1580 	spin_unlock_irqrestore(&sighand->siglock, flags);
1581 }
1582 
1583 /*
1584  * This must be called with current->sighand->siglock held.
1585  *
1586  * This should be the path for all ptrace stops.
1587  * We always set current->last_siginfo while stopped here.
1588  * That makes it a way to test a stopped process for
1589  * being ptrace-stopped vs being job-control-stopped.
1590  *
1591  * If we actually decide not to stop at all because the tracer is gone,
1592  * we leave nostop_code in current->exit_code.
1593  */
1594 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1595 {
1596 	/*
1597 	 * If there is a group stop in progress,
1598 	 * we must participate in the bookkeeping.
1599 	 */
1600 	if (current->signal->group_stop_count > 0)
1601 		--current->signal->group_stop_count;
1602 
1603 	current->last_siginfo = info;
1604 	current->exit_code = exit_code;
1605 
1606 	/* Let the debugger run.  */
1607 	set_current_state(TASK_TRACED);
1608 	spin_unlock_irq(&current->sighand->siglock);
1609 	read_lock(&tasklist_lock);
1610 	if (likely(current->ptrace & PT_PTRACED) &&
1611 	    likely(current->parent != current->real_parent ||
1612 		   !(current->ptrace & PT_ATTACHED)) &&
1613 	    (likely(current->parent->signal != current->signal) ||
1614 	     !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1615 		do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1616 		read_unlock(&tasklist_lock);
1617 		schedule();
1618 	} else {
1619 		/*
1620 		 * By the time we got the lock, our tracer went away.
1621 		 * Don't stop here.
1622 		 */
1623 		read_unlock(&tasklist_lock);
1624 		set_current_state(TASK_RUNNING);
1625 		current->exit_code = nostop_code;
1626 	}
1627 
1628 	/*
1629 	 * We are back.  Now reacquire the siglock before touching
1630 	 * last_siginfo, so that we are sure to have synchronized with
1631 	 * any signal-sending on another CPU that wants to examine it.
1632 	 */
1633 	spin_lock_irq(&current->sighand->siglock);
1634 	current->last_siginfo = NULL;
1635 
1636 	/*
1637 	 * Queued signals ignored us while we were stopped for tracing.
1638 	 * So check for any that we should take before resuming user mode.
1639 	 */
1640 	recalc_sigpending();
1641 }
1642 
1643 void ptrace_notify(int exit_code)
1644 {
1645 	siginfo_t info;
1646 
1647 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1648 
1649 	memset(&info, 0, sizeof info);
1650 	info.si_signo = SIGTRAP;
1651 	info.si_code = exit_code;
1652 	info.si_pid = current->pid;
1653 	info.si_uid = current->uid;
1654 
1655 	/* Let the debugger run.  */
1656 	spin_lock_irq(&current->sighand->siglock);
1657 	ptrace_stop(exit_code, 0, &info);
1658 	spin_unlock_irq(&current->sighand->siglock);
1659 }
1660 
1661 static void
1662 finish_stop(int stop_count)
1663 {
1664 	int to_self;
1665 
1666 	/*
1667 	 * If there are no other threads in the group, or if there is
1668 	 * a group stop in progress and we are the last to stop,
1669 	 * report to the parent.  When ptraced, every thread reports itself.
1670 	 */
1671 	if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1672 		to_self = 1;
1673 	else if (stop_count == 0)
1674 		to_self = 0;
1675 	else
1676 		goto out;
1677 
1678 	read_lock(&tasklist_lock);
1679 	do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1680 	read_unlock(&tasklist_lock);
1681 
1682 out:
1683 	schedule();
1684 	/*
1685 	 * Now we don't run again until continued.
1686 	 */
1687 	current->exit_code = 0;
1688 }
1689 
1690 /*
1691  * This performs the stopping for SIGSTOP and other stop signals.
1692  * We have to stop all threads in the thread group.
1693  * Returns nonzero if we've actually stopped and released the siglock.
1694  * Returns zero if we didn't stop and still hold the siglock.
1695  */
1696 static int
1697 do_signal_stop(int signr)
1698 {
1699 	struct signal_struct *sig = current->signal;
1700 	struct sighand_struct *sighand = current->sighand;
1701 	int stop_count = -1;
1702 
1703 	if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1704 		return 0;
1705 
1706 	if (sig->group_stop_count > 0) {
1707 		/*
1708 		 * There is a group stop in progress.  We don't need to
1709 		 * start another one.
1710 		 */
1711 		signr = sig->group_exit_code;
1712 		stop_count = --sig->group_stop_count;
1713 		current->exit_code = signr;
1714 		set_current_state(TASK_STOPPED);
1715 		if (stop_count == 0)
1716 			sig->flags = SIGNAL_STOP_STOPPED;
1717 		spin_unlock_irq(&sighand->siglock);
1718 	}
1719 	else if (thread_group_empty(current)) {
1720 		/*
1721 		 * Lock must be held through transition to stopped state.
1722 		 */
1723 		current->exit_code = current->signal->group_exit_code = signr;
1724 		set_current_state(TASK_STOPPED);
1725 		sig->flags = SIGNAL_STOP_STOPPED;
1726 		spin_unlock_irq(&sighand->siglock);
1727 	}
1728 	else {
1729 		/*
1730 		 * There is no group stop already in progress.
1731 		 * We must initiate one now, but that requires
1732 		 * dropping siglock to get both the tasklist lock
1733 		 * and siglock again in the proper order.  Note that
1734 		 * this allows an intervening SIGCONT to be posted.
1735 		 * We need to check for that and bail out if necessary.
1736 		 */
1737 		struct task_struct *t;
1738 
1739 		spin_unlock_irq(&sighand->siglock);
1740 
1741 		/* signals can be posted during this window */
1742 
1743 		read_lock(&tasklist_lock);
1744 		spin_lock_irq(&sighand->siglock);
1745 
1746 		if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1747 			/*
1748 			 * Another stop or continue happened while we
1749 			 * didn't have the lock.  We can just swallow this
1750 			 * signal now.  If we raced with a SIGCONT, that
1751 			 * should have just cleared it now.  If we raced
1752 			 * with another processor delivering a stop signal,
1753 			 * then the SIGCONT that wakes us up should clear it.
1754 			 */
1755 			read_unlock(&tasklist_lock);
1756 			return 0;
1757 		}
1758 
1759 		if (sig->group_stop_count == 0) {
1760 			sig->group_exit_code = signr;
1761 			stop_count = 0;
1762 			for (t = next_thread(current); t != current;
1763 			     t = next_thread(t))
1764 				/*
1765 				 * Setting state to TASK_STOPPED for a group
1766 				 * stop is always done with the siglock held,
1767 				 * so this check has no races.
1768 				 */
1769 				if (t->state < TASK_STOPPED) {
1770 					stop_count++;
1771 					signal_wake_up(t, 0);
1772 				}
1773 			sig->group_stop_count = stop_count;
1774 		}
1775 		else {
1776 			/* A race with another thread while unlocked.  */
1777 			signr = sig->group_exit_code;
1778 			stop_count = --sig->group_stop_count;
1779 		}
1780 
1781 		current->exit_code = signr;
1782 		set_current_state(TASK_STOPPED);
1783 		if (stop_count == 0)
1784 			sig->flags = SIGNAL_STOP_STOPPED;
1785 
1786 		spin_unlock_irq(&sighand->siglock);
1787 		read_unlock(&tasklist_lock);
1788 	}
1789 
1790 	finish_stop(stop_count);
1791 	return 1;
1792 }
1793 
1794 /*
1795  * Do appropriate magic when group_stop_count > 0.
1796  * We return nonzero if we stopped, after releasing the siglock.
1797  * We return zero if we still hold the siglock and should look
1798  * for another signal without checking group_stop_count again.
1799  */
1800 static inline int handle_group_stop(void)
1801 {
1802 	int stop_count;
1803 
1804 	if (current->signal->group_exit_task == current) {
1805 		/*
1806 		 * Group stop is so we can do a core dump,
1807 		 * We are the initiating thread, so get on with it.
1808 		 */
1809 		current->signal->group_exit_task = NULL;
1810 		return 0;
1811 	}
1812 
1813 	if (current->signal->flags & SIGNAL_GROUP_EXIT)
1814 		/*
1815 		 * Group stop is so another thread can do a core dump,
1816 		 * or else we are racing against a death signal.
1817 		 * Just punt the stop so we can get the next signal.
1818 		 */
1819 		return 0;
1820 
1821 	/*
1822 	 * There is a group stop in progress.  We stop
1823 	 * without any associated signal being in our queue.
1824 	 */
1825 	stop_count = --current->signal->group_stop_count;
1826 	if (stop_count == 0)
1827 		current->signal->flags = SIGNAL_STOP_STOPPED;
1828 	current->exit_code = current->signal->group_exit_code;
1829 	set_current_state(TASK_STOPPED);
1830 	spin_unlock_irq(&current->sighand->siglock);
1831 	finish_stop(stop_count);
1832 	return 1;
1833 }
1834 
1835 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1836 			  struct pt_regs *regs, void *cookie)
1837 {
1838 	sigset_t *mask = &current->blocked;
1839 	int signr = 0;
1840 
1841 relock:
1842 	spin_lock_irq(&current->sighand->siglock);
1843 	for (;;) {
1844 		struct k_sigaction *ka;
1845 
1846 		if (unlikely(current->signal->group_stop_count > 0) &&
1847 		    handle_group_stop())
1848 			goto relock;
1849 
1850 		signr = dequeue_signal(current, mask, info);
1851 
1852 		if (!signr)
1853 			break; /* will return 0 */
1854 
1855 		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1856 			ptrace_signal_deliver(regs, cookie);
1857 
1858 			/* Let the debugger run.  */
1859 			ptrace_stop(signr, signr, info);
1860 
1861 			/* We're back.  Did the debugger cancel the sig?  */
1862 			signr = current->exit_code;
1863 			if (signr == 0)
1864 				continue;
1865 
1866 			current->exit_code = 0;
1867 
1868 			/* Update the siginfo structure if the signal has
1869 			   changed.  If the debugger wanted something
1870 			   specific in the siginfo structure then it should
1871 			   have updated *info via PTRACE_SETSIGINFO.  */
1872 			if (signr != info->si_signo) {
1873 				info->si_signo = signr;
1874 				info->si_errno = 0;
1875 				info->si_code = SI_USER;
1876 				info->si_pid = current->parent->pid;
1877 				info->si_uid = current->parent->uid;
1878 			}
1879 
1880 			/* If the (new) signal is now blocked, requeue it.  */
1881 			if (sigismember(&current->blocked, signr)) {
1882 				specific_send_sig_info(signr, info, current);
1883 				continue;
1884 			}
1885 		}
1886 
1887 		ka = &current->sighand->action[signr-1];
1888 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1889 			continue;
1890 		if (ka->sa.sa_handler != SIG_DFL) {
1891 			/* Run the handler.  */
1892 			*return_ka = *ka;
1893 
1894 			if (ka->sa.sa_flags & SA_ONESHOT)
1895 				ka->sa.sa_handler = SIG_DFL;
1896 
1897 			break; /* will return non-zero "signr" value */
1898 		}
1899 
1900 		/*
1901 		 * Now we are doing the default action for this signal.
1902 		 */
1903 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1904 			continue;
1905 
1906 		/* Init gets no signals it doesn't want.  */
1907 		if (current->pid == 1)
1908 			continue;
1909 
1910 		if (sig_kernel_stop(signr)) {
1911 			/*
1912 			 * The default action is to stop all threads in
1913 			 * the thread group.  The job control signals
1914 			 * do nothing in an orphaned pgrp, but SIGSTOP
1915 			 * always works.  Note that siglock needs to be
1916 			 * dropped during the call to is_orphaned_pgrp()
1917 			 * because of lock ordering with tasklist_lock.
1918 			 * This allows an intervening SIGCONT to be posted.
1919 			 * We need to check for that and bail out if necessary.
1920 			 */
1921 			if (signr != SIGSTOP) {
1922 				spin_unlock_irq(&current->sighand->siglock);
1923 
1924 				/* signals can be posted during this window */
1925 
1926 				if (is_orphaned_pgrp(process_group(current)))
1927 					goto relock;
1928 
1929 				spin_lock_irq(&current->sighand->siglock);
1930 			}
1931 
1932 			if (likely(do_signal_stop(signr))) {
1933 				/* It released the siglock.  */
1934 				goto relock;
1935 			}
1936 
1937 			/*
1938 			 * We didn't actually stop, due to a race
1939 			 * with SIGCONT or something like that.
1940 			 */
1941 			continue;
1942 		}
1943 
1944 		spin_unlock_irq(&current->sighand->siglock);
1945 
1946 		/*
1947 		 * Anything else is fatal, maybe with a core dump.
1948 		 */
1949 		current->flags |= PF_SIGNALED;
1950 		if (sig_kernel_coredump(signr)) {
1951 			/*
1952 			 * If it was able to dump core, this kills all
1953 			 * other threads in the group and synchronizes with
1954 			 * their demise.  If we lost the race with another
1955 			 * thread getting here, it set group_exit_code
1956 			 * first and our do_group_exit call below will use
1957 			 * that value and ignore the one we pass it.
1958 			 */
1959 			do_coredump((long)signr, signr, regs);
1960 		}
1961 
1962 		/*
1963 		 * Death signals, no core dump.
1964 		 */
1965 		do_group_exit(signr);
1966 		/* NOTREACHED */
1967 	}
1968 	spin_unlock_irq(&current->sighand->siglock);
1969 	return signr;
1970 }
1971 
1972 EXPORT_SYMBOL(recalc_sigpending);
1973 EXPORT_SYMBOL_GPL(dequeue_signal);
1974 EXPORT_SYMBOL(flush_signals);
1975 EXPORT_SYMBOL(force_sig);
1976 EXPORT_SYMBOL(kill_pg);
1977 EXPORT_SYMBOL(kill_proc);
1978 EXPORT_SYMBOL(ptrace_notify);
1979 EXPORT_SYMBOL(send_sig);
1980 EXPORT_SYMBOL(send_sig_info);
1981 EXPORT_SYMBOL(sigprocmask);
1982 EXPORT_SYMBOL(block_all_signals);
1983 EXPORT_SYMBOL(unblock_all_signals);
1984 
1985 
1986 /*
1987  * System call entry points.
1988  */
1989 
1990 asmlinkage long sys_restart_syscall(void)
1991 {
1992 	struct restart_block *restart = &current_thread_info()->restart_block;
1993 	return restart->fn(restart);
1994 }
1995 
1996 long do_no_restart_syscall(struct restart_block *param)
1997 {
1998 	return -EINTR;
1999 }
2000 
2001 /*
2002  * We don't need to get the kernel lock - this is all local to this
2003  * particular thread.. (and that's good, because this is _heavily_
2004  * used by various programs)
2005  */
2006 
2007 /*
2008  * This is also useful for kernel threads that want to temporarily
2009  * (or permanently) block certain signals.
2010  *
2011  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2012  * interface happily blocks "unblockable" signals like SIGKILL
2013  * and friends.
2014  */
2015 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2016 {
2017 	int error;
2018 	sigset_t old_block;
2019 
2020 	spin_lock_irq(&current->sighand->siglock);
2021 	old_block = current->blocked;
2022 	error = 0;
2023 	switch (how) {
2024 	case SIG_BLOCK:
2025 		sigorsets(&current->blocked, &current->blocked, set);
2026 		break;
2027 	case SIG_UNBLOCK:
2028 		signandsets(&current->blocked, &current->blocked, set);
2029 		break;
2030 	case SIG_SETMASK:
2031 		current->blocked = *set;
2032 		break;
2033 	default:
2034 		error = -EINVAL;
2035 	}
2036 	recalc_sigpending();
2037 	spin_unlock_irq(&current->sighand->siglock);
2038 	if (oldset)
2039 		*oldset = old_block;
2040 	return error;
2041 }
2042 
2043 asmlinkage long
2044 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2045 {
2046 	int error = -EINVAL;
2047 	sigset_t old_set, new_set;
2048 
2049 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2050 	if (sigsetsize != sizeof(sigset_t))
2051 		goto out;
2052 
2053 	if (set) {
2054 		error = -EFAULT;
2055 		if (copy_from_user(&new_set, set, sizeof(*set)))
2056 			goto out;
2057 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2058 
2059 		error = sigprocmask(how, &new_set, &old_set);
2060 		if (error)
2061 			goto out;
2062 		if (oset)
2063 			goto set_old;
2064 	} else if (oset) {
2065 		spin_lock_irq(&current->sighand->siglock);
2066 		old_set = current->blocked;
2067 		spin_unlock_irq(&current->sighand->siglock);
2068 
2069 	set_old:
2070 		error = -EFAULT;
2071 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2072 			goto out;
2073 	}
2074 	error = 0;
2075 out:
2076 	return error;
2077 }
2078 
2079 long do_sigpending(void __user *set, unsigned long sigsetsize)
2080 {
2081 	long error = -EINVAL;
2082 	sigset_t pending;
2083 
2084 	if (sigsetsize > sizeof(sigset_t))
2085 		goto out;
2086 
2087 	spin_lock_irq(&current->sighand->siglock);
2088 	sigorsets(&pending, &current->pending.signal,
2089 		  &current->signal->shared_pending.signal);
2090 	spin_unlock_irq(&current->sighand->siglock);
2091 
2092 	/* Outside the lock because only this thread touches it.  */
2093 	sigandsets(&pending, &current->blocked, &pending);
2094 
2095 	error = -EFAULT;
2096 	if (!copy_to_user(set, &pending, sigsetsize))
2097 		error = 0;
2098 
2099 out:
2100 	return error;
2101 }
2102 
2103 asmlinkage long
2104 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2105 {
2106 	return do_sigpending(set, sigsetsize);
2107 }
2108 
2109 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2110 
2111 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2112 {
2113 	int err;
2114 
2115 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2116 		return -EFAULT;
2117 	if (from->si_code < 0)
2118 		return __copy_to_user(to, from, sizeof(siginfo_t))
2119 			? -EFAULT : 0;
2120 	/*
2121 	 * If you change siginfo_t structure, please be sure
2122 	 * this code is fixed accordingly.
2123 	 * It should never copy any pad contained in the structure
2124 	 * to avoid security leaks, but must copy the generic
2125 	 * 3 ints plus the relevant union member.
2126 	 */
2127 	err = __put_user(from->si_signo, &to->si_signo);
2128 	err |= __put_user(from->si_errno, &to->si_errno);
2129 	err |= __put_user((short)from->si_code, &to->si_code);
2130 	switch (from->si_code & __SI_MASK) {
2131 	case __SI_KILL:
2132 		err |= __put_user(from->si_pid, &to->si_pid);
2133 		err |= __put_user(from->si_uid, &to->si_uid);
2134 		break;
2135 	case __SI_TIMER:
2136 		 err |= __put_user(from->si_tid, &to->si_tid);
2137 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2138 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2139 		break;
2140 	case __SI_POLL:
2141 		err |= __put_user(from->si_band, &to->si_band);
2142 		err |= __put_user(from->si_fd, &to->si_fd);
2143 		break;
2144 	case __SI_FAULT:
2145 		err |= __put_user(from->si_addr, &to->si_addr);
2146 #ifdef __ARCH_SI_TRAPNO
2147 		err |= __put_user(from->si_trapno, &to->si_trapno);
2148 #endif
2149 		break;
2150 	case __SI_CHLD:
2151 		err |= __put_user(from->si_pid, &to->si_pid);
2152 		err |= __put_user(from->si_uid, &to->si_uid);
2153 		err |= __put_user(from->si_status, &to->si_status);
2154 		err |= __put_user(from->si_utime, &to->si_utime);
2155 		err |= __put_user(from->si_stime, &to->si_stime);
2156 		break;
2157 	case __SI_RT: /* This is not generated by the kernel as of now. */
2158 	case __SI_MESGQ: /* But this is */
2159 		err |= __put_user(from->si_pid, &to->si_pid);
2160 		err |= __put_user(from->si_uid, &to->si_uid);
2161 		err |= __put_user(from->si_ptr, &to->si_ptr);
2162 		break;
2163 	default: /* this is just in case for now ... */
2164 		err |= __put_user(from->si_pid, &to->si_pid);
2165 		err |= __put_user(from->si_uid, &to->si_uid);
2166 		break;
2167 	}
2168 	return err;
2169 }
2170 
2171 #endif
2172 
2173 asmlinkage long
2174 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2175 		    siginfo_t __user *uinfo,
2176 		    const struct timespec __user *uts,
2177 		    size_t sigsetsize)
2178 {
2179 	int ret, sig;
2180 	sigset_t these;
2181 	struct timespec ts;
2182 	siginfo_t info;
2183 	long timeout = 0;
2184 
2185 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2186 	if (sigsetsize != sizeof(sigset_t))
2187 		return -EINVAL;
2188 
2189 	if (copy_from_user(&these, uthese, sizeof(these)))
2190 		return -EFAULT;
2191 
2192 	/*
2193 	 * Invert the set of allowed signals to get those we
2194 	 * want to block.
2195 	 */
2196 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2197 	signotset(&these);
2198 
2199 	if (uts) {
2200 		if (copy_from_user(&ts, uts, sizeof(ts)))
2201 			return -EFAULT;
2202 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2203 		    || ts.tv_sec < 0)
2204 			return -EINVAL;
2205 	}
2206 
2207 	spin_lock_irq(&current->sighand->siglock);
2208 	sig = dequeue_signal(current, &these, &info);
2209 	if (!sig) {
2210 		timeout = MAX_SCHEDULE_TIMEOUT;
2211 		if (uts)
2212 			timeout = (timespec_to_jiffies(&ts)
2213 				   + (ts.tv_sec || ts.tv_nsec));
2214 
2215 		if (timeout) {
2216 			/* None ready -- temporarily unblock those we're
2217 			 * interested while we are sleeping in so that we'll
2218 			 * be awakened when they arrive.  */
2219 			current->real_blocked = current->blocked;
2220 			sigandsets(&current->blocked, &current->blocked, &these);
2221 			recalc_sigpending();
2222 			spin_unlock_irq(&current->sighand->siglock);
2223 
2224 			current->state = TASK_INTERRUPTIBLE;
2225 			timeout = schedule_timeout(timeout);
2226 
2227 			try_to_freeze();
2228 			spin_lock_irq(&current->sighand->siglock);
2229 			sig = dequeue_signal(current, &these, &info);
2230 			current->blocked = current->real_blocked;
2231 			siginitset(&current->real_blocked, 0);
2232 			recalc_sigpending();
2233 		}
2234 	}
2235 	spin_unlock_irq(&current->sighand->siglock);
2236 
2237 	if (sig) {
2238 		ret = sig;
2239 		if (uinfo) {
2240 			if (copy_siginfo_to_user(uinfo, &info))
2241 				ret = -EFAULT;
2242 		}
2243 	} else {
2244 		ret = -EAGAIN;
2245 		if (timeout)
2246 			ret = -EINTR;
2247 	}
2248 
2249 	return ret;
2250 }
2251 
2252 asmlinkage long
2253 sys_kill(int pid, int sig)
2254 {
2255 	struct siginfo info;
2256 
2257 	info.si_signo = sig;
2258 	info.si_errno = 0;
2259 	info.si_code = SI_USER;
2260 	info.si_pid = current->tgid;
2261 	info.si_uid = current->uid;
2262 
2263 	return kill_something_info(sig, &info, pid);
2264 }
2265 
2266 /**
2267  *  sys_tgkill - send signal to one specific thread
2268  *  @tgid: the thread group ID of the thread
2269  *  @pid: the PID of the thread
2270  *  @sig: signal to be sent
2271  *
2272  *  This syscall also checks the tgid and returns -ESRCH even if the PID
2273  *  exists but it's not belonging to the target process anymore. This
2274  *  method solves the problem of threads exiting and PIDs getting reused.
2275  */
2276 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2277 {
2278 	struct siginfo info;
2279 	int error;
2280 	struct task_struct *p;
2281 
2282 	/* This is only valid for single tasks */
2283 	if (pid <= 0 || tgid <= 0)
2284 		return -EINVAL;
2285 
2286 	info.si_signo = sig;
2287 	info.si_errno = 0;
2288 	info.si_code = SI_TKILL;
2289 	info.si_pid = current->tgid;
2290 	info.si_uid = current->uid;
2291 
2292 	read_lock(&tasklist_lock);
2293 	p = find_task_by_pid(pid);
2294 	error = -ESRCH;
2295 	if (p && (p->tgid == tgid)) {
2296 		error = check_kill_permission(sig, &info, p);
2297 		/*
2298 		 * The null signal is a permissions and process existence
2299 		 * probe.  No signal is actually delivered.
2300 		 */
2301 		if (!error && sig && p->sighand) {
2302 			spin_lock_irq(&p->sighand->siglock);
2303 			handle_stop_signal(sig, p);
2304 			error = specific_send_sig_info(sig, &info, p);
2305 			spin_unlock_irq(&p->sighand->siglock);
2306 		}
2307 	}
2308 	read_unlock(&tasklist_lock);
2309 	return error;
2310 }
2311 
2312 /*
2313  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2314  */
2315 asmlinkage long
2316 sys_tkill(int pid, int sig)
2317 {
2318 	struct siginfo info;
2319 	int error;
2320 	struct task_struct *p;
2321 
2322 	/* This is only valid for single tasks */
2323 	if (pid <= 0)
2324 		return -EINVAL;
2325 
2326 	info.si_signo = sig;
2327 	info.si_errno = 0;
2328 	info.si_code = SI_TKILL;
2329 	info.si_pid = current->tgid;
2330 	info.si_uid = current->uid;
2331 
2332 	read_lock(&tasklist_lock);
2333 	p = find_task_by_pid(pid);
2334 	error = -ESRCH;
2335 	if (p) {
2336 		error = check_kill_permission(sig, &info, p);
2337 		/*
2338 		 * The null signal is a permissions and process existence
2339 		 * probe.  No signal is actually delivered.
2340 		 */
2341 		if (!error && sig && p->sighand) {
2342 			spin_lock_irq(&p->sighand->siglock);
2343 			handle_stop_signal(sig, p);
2344 			error = specific_send_sig_info(sig, &info, p);
2345 			spin_unlock_irq(&p->sighand->siglock);
2346 		}
2347 	}
2348 	read_unlock(&tasklist_lock);
2349 	return error;
2350 }
2351 
2352 asmlinkage long
2353 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2354 {
2355 	siginfo_t info;
2356 
2357 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2358 		return -EFAULT;
2359 
2360 	/* Not even root can pretend to send signals from the kernel.
2361 	   Nor can they impersonate a kill(), which adds source info.  */
2362 	if (info.si_code >= 0)
2363 		return -EPERM;
2364 	info.si_signo = sig;
2365 
2366 	/* POSIX.1b doesn't mention process groups.  */
2367 	return kill_proc_info(sig, &info, pid);
2368 }
2369 
2370 int
2371 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2372 {
2373 	struct k_sigaction *k;
2374 
2375 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2376 		return -EINVAL;
2377 
2378 	k = &current->sighand->action[sig-1];
2379 
2380 	spin_lock_irq(&current->sighand->siglock);
2381 	if (signal_pending(current)) {
2382 		/*
2383 		 * If there might be a fatal signal pending on multiple
2384 		 * threads, make sure we take it before changing the action.
2385 		 */
2386 		spin_unlock_irq(&current->sighand->siglock);
2387 		return -ERESTARTNOINTR;
2388 	}
2389 
2390 	if (oact)
2391 		*oact = *k;
2392 
2393 	if (act) {
2394 		/*
2395 		 * POSIX 3.3.1.3:
2396 		 *  "Setting a signal action to SIG_IGN for a signal that is
2397 		 *   pending shall cause the pending signal to be discarded,
2398 		 *   whether or not it is blocked."
2399 		 *
2400 		 *  "Setting a signal action to SIG_DFL for a signal that is
2401 		 *   pending and whose default action is to ignore the signal
2402 		 *   (for example, SIGCHLD), shall cause the pending signal to
2403 		 *   be discarded, whether or not it is blocked"
2404 		 */
2405 		if (act->sa.sa_handler == SIG_IGN ||
2406 		    (act->sa.sa_handler == SIG_DFL &&
2407 		     sig_kernel_ignore(sig))) {
2408 			/*
2409 			 * This is a fairly rare case, so we only take the
2410 			 * tasklist_lock once we're sure we'll need it.
2411 			 * Now we must do this little unlock and relock
2412 			 * dance to maintain the lock hierarchy.
2413 			 */
2414 			struct task_struct *t = current;
2415 			spin_unlock_irq(&t->sighand->siglock);
2416 			read_lock(&tasklist_lock);
2417 			spin_lock_irq(&t->sighand->siglock);
2418 			*k = *act;
2419 			sigdelsetmask(&k->sa.sa_mask,
2420 				      sigmask(SIGKILL) | sigmask(SIGSTOP));
2421 			rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2422 			do {
2423 				rm_from_queue(sigmask(sig), &t->pending);
2424 				recalc_sigpending_tsk(t);
2425 				t = next_thread(t);
2426 			} while (t != current);
2427 			spin_unlock_irq(&current->sighand->siglock);
2428 			read_unlock(&tasklist_lock);
2429 			return 0;
2430 		}
2431 
2432 		*k = *act;
2433 		sigdelsetmask(&k->sa.sa_mask,
2434 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2435 	}
2436 
2437 	spin_unlock_irq(&current->sighand->siglock);
2438 	return 0;
2439 }
2440 
2441 int
2442 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2443 {
2444 	stack_t oss;
2445 	int error;
2446 
2447 	if (uoss) {
2448 		oss.ss_sp = (void __user *) current->sas_ss_sp;
2449 		oss.ss_size = current->sas_ss_size;
2450 		oss.ss_flags = sas_ss_flags(sp);
2451 	}
2452 
2453 	if (uss) {
2454 		void __user *ss_sp;
2455 		size_t ss_size;
2456 		int ss_flags;
2457 
2458 		error = -EFAULT;
2459 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2460 		    || __get_user(ss_sp, &uss->ss_sp)
2461 		    || __get_user(ss_flags, &uss->ss_flags)
2462 		    || __get_user(ss_size, &uss->ss_size))
2463 			goto out;
2464 
2465 		error = -EPERM;
2466 		if (on_sig_stack(sp))
2467 			goto out;
2468 
2469 		error = -EINVAL;
2470 		/*
2471 		 *
2472 		 * Note - this code used to test ss_flags incorrectly
2473 		 *  	  old code may have been written using ss_flags==0
2474 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2475 		 *	  way that worked) - this fix preserves that older
2476 		 *	  mechanism
2477 		 */
2478 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2479 			goto out;
2480 
2481 		if (ss_flags == SS_DISABLE) {
2482 			ss_size = 0;
2483 			ss_sp = NULL;
2484 		} else {
2485 			error = -ENOMEM;
2486 			if (ss_size < MINSIGSTKSZ)
2487 				goto out;
2488 		}
2489 
2490 		current->sas_ss_sp = (unsigned long) ss_sp;
2491 		current->sas_ss_size = ss_size;
2492 	}
2493 
2494 	if (uoss) {
2495 		error = -EFAULT;
2496 		if (copy_to_user(uoss, &oss, sizeof(oss)))
2497 			goto out;
2498 	}
2499 
2500 	error = 0;
2501 out:
2502 	return error;
2503 }
2504 
2505 #ifdef __ARCH_WANT_SYS_SIGPENDING
2506 
2507 asmlinkage long
2508 sys_sigpending(old_sigset_t __user *set)
2509 {
2510 	return do_sigpending(set, sizeof(*set));
2511 }
2512 
2513 #endif
2514 
2515 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2516 /* Some platforms have their own version with special arguments others
2517    support only sys_rt_sigprocmask.  */
2518 
2519 asmlinkage long
2520 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2521 {
2522 	int error;
2523 	old_sigset_t old_set, new_set;
2524 
2525 	if (set) {
2526 		error = -EFAULT;
2527 		if (copy_from_user(&new_set, set, sizeof(*set)))
2528 			goto out;
2529 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2530 
2531 		spin_lock_irq(&current->sighand->siglock);
2532 		old_set = current->blocked.sig[0];
2533 
2534 		error = 0;
2535 		switch (how) {
2536 		default:
2537 			error = -EINVAL;
2538 			break;
2539 		case SIG_BLOCK:
2540 			sigaddsetmask(&current->blocked, new_set);
2541 			break;
2542 		case SIG_UNBLOCK:
2543 			sigdelsetmask(&current->blocked, new_set);
2544 			break;
2545 		case SIG_SETMASK:
2546 			current->blocked.sig[0] = new_set;
2547 			break;
2548 		}
2549 
2550 		recalc_sigpending();
2551 		spin_unlock_irq(&current->sighand->siglock);
2552 		if (error)
2553 			goto out;
2554 		if (oset)
2555 			goto set_old;
2556 	} else if (oset) {
2557 		old_set = current->blocked.sig[0];
2558 	set_old:
2559 		error = -EFAULT;
2560 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2561 			goto out;
2562 	}
2563 	error = 0;
2564 out:
2565 	return error;
2566 }
2567 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2568 
2569 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2570 asmlinkage long
2571 sys_rt_sigaction(int sig,
2572 		 const struct sigaction __user *act,
2573 		 struct sigaction __user *oact,
2574 		 size_t sigsetsize)
2575 {
2576 	struct k_sigaction new_sa, old_sa;
2577 	int ret = -EINVAL;
2578 
2579 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2580 	if (sigsetsize != sizeof(sigset_t))
2581 		goto out;
2582 
2583 	if (act) {
2584 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2585 			return -EFAULT;
2586 	}
2587 
2588 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2589 
2590 	if (!ret && oact) {
2591 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2592 			return -EFAULT;
2593 	}
2594 out:
2595 	return ret;
2596 }
2597 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2598 
2599 #ifdef __ARCH_WANT_SYS_SGETMASK
2600 
2601 /*
2602  * For backwards compatibility.  Functionality superseded by sigprocmask.
2603  */
2604 asmlinkage long
2605 sys_sgetmask(void)
2606 {
2607 	/* SMP safe */
2608 	return current->blocked.sig[0];
2609 }
2610 
2611 asmlinkage long
2612 sys_ssetmask(int newmask)
2613 {
2614 	int old;
2615 
2616 	spin_lock_irq(&current->sighand->siglock);
2617 	old = current->blocked.sig[0];
2618 
2619 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2620 						  sigmask(SIGSTOP)));
2621 	recalc_sigpending();
2622 	spin_unlock_irq(&current->sighand->siglock);
2623 
2624 	return old;
2625 }
2626 #endif /* __ARCH_WANT_SGETMASK */
2627 
2628 #ifdef __ARCH_WANT_SYS_SIGNAL
2629 /*
2630  * For backwards compatibility.  Functionality superseded by sigaction.
2631  */
2632 asmlinkage unsigned long
2633 sys_signal(int sig, __sighandler_t handler)
2634 {
2635 	struct k_sigaction new_sa, old_sa;
2636 	int ret;
2637 
2638 	new_sa.sa.sa_handler = handler;
2639 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2640 
2641 	ret = do_sigaction(sig, &new_sa, &old_sa);
2642 
2643 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2644 }
2645 #endif /* __ARCH_WANT_SYS_SIGNAL */
2646 
2647 #ifdef __ARCH_WANT_SYS_PAUSE
2648 
2649 asmlinkage long
2650 sys_pause(void)
2651 {
2652 	current->state = TASK_INTERRUPTIBLE;
2653 	schedule();
2654 	return -ERESTARTNOHAND;
2655 }
2656 
2657 #endif
2658 
2659 void __init signals_init(void)
2660 {
2661 	sigqueue_cachep =
2662 		kmem_cache_create("sigqueue",
2663 				  sizeof(struct sigqueue),
2664 				  __alignof__(struct sigqueue),
2665 				  SLAB_PANIC, NULL, NULL);
2666 }
2667