xref: /linux/kernel/signal.c (revision d8327c784b51b57dac2c26cfad87dce0d68dfd98)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/fs.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <linux/audit.h>
28 #include <linux/capability.h>
29 #include <asm/param.h>
30 #include <asm/uaccess.h>
31 #include <asm/unistd.h>
32 #include <asm/siginfo.h>
33 
34 /*
35  * SLAB caches for signal bits.
36  */
37 
38 static kmem_cache_t *sigqueue_cachep;
39 
40 /*
41  * In POSIX a signal is sent either to a specific thread (Linux task)
42  * or to the process as a whole (Linux thread group).  How the signal
43  * is sent determines whether it's to one thread or the whole group,
44  * which determines which signal mask(s) are involved in blocking it
45  * from being delivered until later.  When the signal is delivered,
46  * either it's caught or ignored by a user handler or it has a default
47  * effect that applies to the whole thread group (POSIX process).
48  *
49  * The possible effects an unblocked signal set to SIG_DFL can have are:
50  *   ignore	- Nothing Happens
51  *   terminate	- kill the process, i.e. all threads in the group,
52  * 		  similar to exit_group.  The group leader (only) reports
53  *		  WIFSIGNALED status to its parent.
54  *   coredump	- write a core dump file describing all threads using
55  *		  the same mm and then kill all those threads
56  *   stop 	- stop all the threads in the group, i.e. TASK_STOPPED state
57  *
58  * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
59  * Other signals when not blocked and set to SIG_DFL behaves as follows.
60  * The job control signals also have other special effects.
61  *
62  *	+--------------------+------------------+
63  *	|  POSIX signal      |  default action  |
64  *	+--------------------+------------------+
65  *	|  SIGHUP            |  terminate	|
66  *	|  SIGINT            |	terminate	|
67  *	|  SIGQUIT           |	coredump 	|
68  *	|  SIGILL            |	coredump 	|
69  *	|  SIGTRAP           |	coredump 	|
70  *	|  SIGABRT/SIGIOT    |	coredump 	|
71  *	|  SIGBUS            |	coredump 	|
72  *	|  SIGFPE            |	coredump 	|
73  *	|  SIGKILL           |	terminate(+)	|
74  *	|  SIGUSR1           |	terminate	|
75  *	|  SIGSEGV           |	coredump 	|
76  *	|  SIGUSR2           |	terminate	|
77  *	|  SIGPIPE           |	terminate	|
78  *	|  SIGALRM           |	terminate	|
79  *	|  SIGTERM           |	terminate	|
80  *	|  SIGCHLD           |	ignore   	|
81  *	|  SIGCONT           |	ignore(*)	|
82  *	|  SIGSTOP           |	stop(*)(+)  	|
83  *	|  SIGTSTP           |	stop(*)  	|
84  *	|  SIGTTIN           |	stop(*)  	|
85  *	|  SIGTTOU           |	stop(*)  	|
86  *	|  SIGURG            |	ignore   	|
87  *	|  SIGXCPU           |	coredump 	|
88  *	|  SIGXFSZ           |	coredump 	|
89  *	|  SIGVTALRM         |	terminate	|
90  *	|  SIGPROF           |	terminate	|
91  *	|  SIGPOLL/SIGIO     |	terminate	|
92  *	|  SIGSYS/SIGUNUSED  |	coredump 	|
93  *	|  SIGSTKFLT         |	terminate	|
94  *	|  SIGWINCH          |	ignore   	|
95  *	|  SIGPWR            |	terminate	|
96  *	|  SIGRTMIN-SIGRTMAX |	terminate       |
97  *	+--------------------+------------------+
98  *	|  non-POSIX signal  |  default action  |
99  *	+--------------------+------------------+
100  *	|  SIGEMT            |  coredump	|
101  *	+--------------------+------------------+
102  *
103  * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
104  * (*) Special job control effects:
105  * When SIGCONT is sent, it resumes the process (all threads in the group)
106  * from TASK_STOPPED state and also clears any pending/queued stop signals
107  * (any of those marked with "stop(*)").  This happens regardless of blocking,
108  * catching, or ignoring SIGCONT.  When any stop signal is sent, it clears
109  * any pending/queued SIGCONT signals; this happens regardless of blocking,
110  * catching, or ignored the stop signal, though (except for SIGSTOP) the
111  * default action of stopping the process may happen later or never.
112  */
113 
114 #ifdef SIGEMT
115 #define M_SIGEMT	M(SIGEMT)
116 #else
117 #define M_SIGEMT	0
118 #endif
119 
120 #if SIGRTMIN > BITS_PER_LONG
121 #define M(sig) (1ULL << ((sig)-1))
122 #else
123 #define M(sig) (1UL << ((sig)-1))
124 #endif
125 #define T(sig, mask) (M(sig) & (mask))
126 
127 #define SIG_KERNEL_ONLY_MASK (\
128 	M(SIGKILL)   |  M(SIGSTOP)                                   )
129 
130 #define SIG_KERNEL_STOP_MASK (\
131 	M(SIGSTOP)   |  M(SIGTSTP)   |  M(SIGTTIN)   |  M(SIGTTOU)   )
132 
133 #define SIG_KERNEL_COREDUMP_MASK (\
134         M(SIGQUIT)   |  M(SIGILL)    |  M(SIGTRAP)   |  M(SIGABRT)   | \
135         M(SIGFPE)    |  M(SIGSEGV)   |  M(SIGBUS)    |  M(SIGSYS)    | \
136         M(SIGXCPU)   |  M(SIGXFSZ)   |  M_SIGEMT                     )
137 
138 #define SIG_KERNEL_IGNORE_MASK (\
139         M(SIGCONT)   |  M(SIGCHLD)   |  M(SIGWINCH)  |  M(SIGURG)    )
140 
141 #define sig_kernel_only(sig) \
142 		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_ONLY_MASK))
143 #define sig_kernel_coredump(sig) \
144 		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_COREDUMP_MASK))
145 #define sig_kernel_ignore(sig) \
146 		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_IGNORE_MASK))
147 #define sig_kernel_stop(sig) \
148 		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))
149 
150 #define sig_user_defined(t, signr) \
151 	(((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) &&	\
152 	 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
153 
154 #define sig_fatal(t, signr) \
155 	(!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
156 	 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
157 
158 static int sig_ignored(struct task_struct *t, int sig)
159 {
160 	void __user * handler;
161 
162 	/*
163 	 * Tracers always want to know about signals..
164 	 */
165 	if (t->ptrace & PT_PTRACED)
166 		return 0;
167 
168 	/*
169 	 * Blocked signals are never ignored, since the
170 	 * signal handler may change by the time it is
171 	 * unblocked.
172 	 */
173 	if (sigismember(&t->blocked, sig))
174 		return 0;
175 
176 	/* Is it explicitly or implicitly ignored? */
177 	handler = t->sighand->action[sig-1].sa.sa_handler;
178 	return   handler == SIG_IGN ||
179 		(handler == SIG_DFL && sig_kernel_ignore(sig));
180 }
181 
182 /*
183  * Re-calculate pending state from the set of locally pending
184  * signals, globally pending signals, and blocked signals.
185  */
186 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
187 {
188 	unsigned long ready;
189 	long i;
190 
191 	switch (_NSIG_WORDS) {
192 	default:
193 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
194 			ready |= signal->sig[i] &~ blocked->sig[i];
195 		break;
196 
197 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
198 		ready |= signal->sig[2] &~ blocked->sig[2];
199 		ready |= signal->sig[1] &~ blocked->sig[1];
200 		ready |= signal->sig[0] &~ blocked->sig[0];
201 		break;
202 
203 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
204 		ready |= signal->sig[0] &~ blocked->sig[0];
205 		break;
206 
207 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
208 	}
209 	return ready !=	0;
210 }
211 
212 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
213 
214 fastcall void recalc_sigpending_tsk(struct task_struct *t)
215 {
216 	if (t->signal->group_stop_count > 0 ||
217 	    (freezing(t)) ||
218 	    PENDING(&t->pending, &t->blocked) ||
219 	    PENDING(&t->signal->shared_pending, &t->blocked))
220 		set_tsk_thread_flag(t, TIF_SIGPENDING);
221 	else
222 		clear_tsk_thread_flag(t, TIF_SIGPENDING);
223 }
224 
225 void recalc_sigpending(void)
226 {
227 	recalc_sigpending_tsk(current);
228 }
229 
230 /* Given the mask, find the first available signal that should be serviced. */
231 
232 static int
233 next_signal(struct sigpending *pending, sigset_t *mask)
234 {
235 	unsigned long i, *s, *m, x;
236 	int sig = 0;
237 
238 	s = pending->signal.sig;
239 	m = mask->sig;
240 	switch (_NSIG_WORDS) {
241 	default:
242 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
243 			if ((x = *s &~ *m) != 0) {
244 				sig = ffz(~x) + i*_NSIG_BPW + 1;
245 				break;
246 			}
247 		break;
248 
249 	case 2: if ((x = s[0] &~ m[0]) != 0)
250 			sig = 1;
251 		else if ((x = s[1] &~ m[1]) != 0)
252 			sig = _NSIG_BPW + 1;
253 		else
254 			break;
255 		sig += ffz(~x);
256 		break;
257 
258 	case 1: if ((x = *s &~ *m) != 0)
259 			sig = ffz(~x) + 1;
260 		break;
261 	}
262 
263 	return sig;
264 }
265 
266 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
267 					 int override_rlimit)
268 {
269 	struct sigqueue *q = NULL;
270 
271 	atomic_inc(&t->user->sigpending);
272 	if (override_rlimit ||
273 	    atomic_read(&t->user->sigpending) <=
274 			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
275 		q = kmem_cache_alloc(sigqueue_cachep, flags);
276 	if (unlikely(q == NULL)) {
277 		atomic_dec(&t->user->sigpending);
278 	} else {
279 		INIT_LIST_HEAD(&q->list);
280 		q->flags = 0;
281 		q->user = get_uid(t->user);
282 	}
283 	return(q);
284 }
285 
286 static void __sigqueue_free(struct sigqueue *q)
287 {
288 	if (q->flags & SIGQUEUE_PREALLOC)
289 		return;
290 	atomic_dec(&q->user->sigpending);
291 	free_uid(q->user);
292 	kmem_cache_free(sigqueue_cachep, q);
293 }
294 
295 static void flush_sigqueue(struct sigpending *queue)
296 {
297 	struct sigqueue *q;
298 
299 	sigemptyset(&queue->signal);
300 	while (!list_empty(&queue->list)) {
301 		q = list_entry(queue->list.next, struct sigqueue , list);
302 		list_del_init(&q->list);
303 		__sigqueue_free(q);
304 	}
305 }
306 
307 /*
308  * Flush all pending signals for a task.
309  */
310 
311 void
312 flush_signals(struct task_struct *t)
313 {
314 	unsigned long flags;
315 
316 	spin_lock_irqsave(&t->sighand->siglock, flags);
317 	clear_tsk_thread_flag(t,TIF_SIGPENDING);
318 	flush_sigqueue(&t->pending);
319 	flush_sigqueue(&t->signal->shared_pending);
320 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
321 }
322 
323 /*
324  * This function expects the tasklist_lock write-locked.
325  */
326 void __exit_sighand(struct task_struct *tsk)
327 {
328 	struct sighand_struct * sighand = tsk->sighand;
329 
330 	/* Ok, we're done with the signal handlers */
331 	tsk->sighand = NULL;
332 	if (atomic_dec_and_test(&sighand->count))
333 		sighand_free(sighand);
334 }
335 
336 void exit_sighand(struct task_struct *tsk)
337 {
338 	write_lock_irq(&tasklist_lock);
339 	rcu_read_lock();
340 	if (tsk->sighand != NULL) {
341 		struct sighand_struct *sighand = rcu_dereference(tsk->sighand);
342 		spin_lock(&sighand->siglock);
343 		__exit_sighand(tsk);
344 		spin_unlock(&sighand->siglock);
345 	}
346 	rcu_read_unlock();
347 	write_unlock_irq(&tasklist_lock);
348 }
349 
350 /*
351  * This function expects the tasklist_lock write-locked.
352  */
353 void __exit_signal(struct task_struct *tsk)
354 {
355 	struct signal_struct * sig = tsk->signal;
356 	struct sighand_struct * sighand;
357 
358 	if (!sig)
359 		BUG();
360 	if (!atomic_read(&sig->count))
361 		BUG();
362 	rcu_read_lock();
363 	sighand = rcu_dereference(tsk->sighand);
364 	spin_lock(&sighand->siglock);
365 	posix_cpu_timers_exit(tsk);
366 	if (atomic_dec_and_test(&sig->count)) {
367 		posix_cpu_timers_exit_group(tsk);
368 		tsk->signal = NULL;
369 		__exit_sighand(tsk);
370 		spin_unlock(&sighand->siglock);
371 		flush_sigqueue(&sig->shared_pending);
372 	} else {
373 		/*
374 		 * If there is any task waiting for the group exit
375 		 * then notify it:
376 		 */
377 		if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
378 			wake_up_process(sig->group_exit_task);
379 			sig->group_exit_task = NULL;
380 		}
381 		if (tsk == sig->curr_target)
382 			sig->curr_target = next_thread(tsk);
383 		tsk->signal = NULL;
384 		/*
385 		 * Accumulate here the counters for all threads but the
386 		 * group leader as they die, so they can be added into
387 		 * the process-wide totals when those are taken.
388 		 * The group leader stays around as a zombie as long
389 		 * as there are other threads.  When it gets reaped,
390 		 * the exit.c code will add its counts into these totals.
391 		 * We won't ever get here for the group leader, since it
392 		 * will have been the last reference on the signal_struct.
393 		 */
394 		sig->utime = cputime_add(sig->utime, tsk->utime);
395 		sig->stime = cputime_add(sig->stime, tsk->stime);
396 		sig->min_flt += tsk->min_flt;
397 		sig->maj_flt += tsk->maj_flt;
398 		sig->nvcsw += tsk->nvcsw;
399 		sig->nivcsw += tsk->nivcsw;
400 		sig->sched_time += tsk->sched_time;
401 		__exit_sighand(tsk);
402 		spin_unlock(&sighand->siglock);
403 		sig = NULL;	/* Marker for below.  */
404 	}
405 	rcu_read_unlock();
406 	clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
407 	flush_sigqueue(&tsk->pending);
408 	if (sig) {
409 		/*
410 		 * We are cleaning up the signal_struct here.
411 		 */
412 		exit_thread_group_keys(sig);
413 		kmem_cache_free(signal_cachep, sig);
414 	}
415 }
416 
417 void exit_signal(struct task_struct *tsk)
418 {
419 	atomic_dec(&tsk->signal->live);
420 
421 	write_lock_irq(&tasklist_lock);
422 	__exit_signal(tsk);
423 	write_unlock_irq(&tasklist_lock);
424 }
425 
426 /*
427  * Flush all handlers for a task.
428  */
429 
430 void
431 flush_signal_handlers(struct task_struct *t, int force_default)
432 {
433 	int i;
434 	struct k_sigaction *ka = &t->sighand->action[0];
435 	for (i = _NSIG ; i != 0 ; i--) {
436 		if (force_default || ka->sa.sa_handler != SIG_IGN)
437 			ka->sa.sa_handler = SIG_DFL;
438 		ka->sa.sa_flags = 0;
439 		sigemptyset(&ka->sa.sa_mask);
440 		ka++;
441 	}
442 }
443 
444 
445 /* Notify the system that a driver wants to block all signals for this
446  * process, and wants to be notified if any signals at all were to be
447  * sent/acted upon.  If the notifier routine returns non-zero, then the
448  * signal will be acted upon after all.  If the notifier routine returns 0,
449  * then then signal will be blocked.  Only one block per process is
450  * allowed.  priv is a pointer to private data that the notifier routine
451  * can use to determine if the signal should be blocked or not.  */
452 
453 void
454 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
455 {
456 	unsigned long flags;
457 
458 	spin_lock_irqsave(&current->sighand->siglock, flags);
459 	current->notifier_mask = mask;
460 	current->notifier_data = priv;
461 	current->notifier = notifier;
462 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
463 }
464 
465 /* Notify the system that blocking has ended. */
466 
467 void
468 unblock_all_signals(void)
469 {
470 	unsigned long flags;
471 
472 	spin_lock_irqsave(&current->sighand->siglock, flags);
473 	current->notifier = NULL;
474 	current->notifier_data = NULL;
475 	recalc_sigpending();
476 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
477 }
478 
479 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
480 {
481 	struct sigqueue *q, *first = NULL;
482 	int still_pending = 0;
483 
484 	if (unlikely(!sigismember(&list->signal, sig)))
485 		return 0;
486 
487 	/*
488 	 * Collect the siginfo appropriate to this signal.  Check if
489 	 * there is another siginfo for the same signal.
490 	*/
491 	list_for_each_entry(q, &list->list, list) {
492 		if (q->info.si_signo == sig) {
493 			if (first) {
494 				still_pending = 1;
495 				break;
496 			}
497 			first = q;
498 		}
499 	}
500 	if (first) {
501 		list_del_init(&first->list);
502 		copy_siginfo(info, &first->info);
503 		__sigqueue_free(first);
504 		if (!still_pending)
505 			sigdelset(&list->signal, sig);
506 	} else {
507 
508 		/* Ok, it wasn't in the queue.  This must be
509 		   a fast-pathed signal or we must have been
510 		   out of queue space.  So zero out the info.
511 		 */
512 		sigdelset(&list->signal, sig);
513 		info->si_signo = sig;
514 		info->si_errno = 0;
515 		info->si_code = 0;
516 		info->si_pid = 0;
517 		info->si_uid = 0;
518 	}
519 	return 1;
520 }
521 
522 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
523 			siginfo_t *info)
524 {
525 	int sig = 0;
526 
527 	sig = next_signal(pending, mask);
528 	if (sig) {
529 		if (current->notifier) {
530 			if (sigismember(current->notifier_mask, sig)) {
531 				if (!(current->notifier)(current->notifier_data)) {
532 					clear_thread_flag(TIF_SIGPENDING);
533 					return 0;
534 				}
535 			}
536 		}
537 
538 		if (!collect_signal(sig, pending, info))
539 			sig = 0;
540 
541 	}
542 	recalc_sigpending();
543 
544 	return sig;
545 }
546 
547 /*
548  * Dequeue a signal and return the element to the caller, which is
549  * expected to free it.
550  *
551  * All callers have to hold the siglock.
552  */
553 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
554 {
555 	int signr = __dequeue_signal(&tsk->pending, mask, info);
556 	if (!signr)
557 		signr = __dequeue_signal(&tsk->signal->shared_pending,
558 					 mask, info);
559  	if (signr && unlikely(sig_kernel_stop(signr))) {
560  		/*
561  		 * Set a marker that we have dequeued a stop signal.  Our
562  		 * caller might release the siglock and then the pending
563  		 * stop signal it is about to process is no longer in the
564  		 * pending bitmasks, but must still be cleared by a SIGCONT
565  		 * (and overruled by a SIGKILL).  So those cases clear this
566  		 * shared flag after we've set it.  Note that this flag may
567  		 * remain set after the signal we return is ignored or
568  		 * handled.  That doesn't matter because its only purpose
569  		 * is to alert stop-signal processing code when another
570  		 * processor has come along and cleared the flag.
571  		 */
572  		if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
573  			tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
574  	}
575 	if ( signr &&
576 	     ((info->si_code & __SI_MASK) == __SI_TIMER) &&
577 	     info->si_sys_private){
578 		/*
579 		 * Release the siglock to ensure proper locking order
580 		 * of timer locks outside of siglocks.  Note, we leave
581 		 * irqs disabled here, since the posix-timers code is
582 		 * about to disable them again anyway.
583 		 */
584 		spin_unlock(&tsk->sighand->siglock);
585 		do_schedule_next_timer(info);
586 		spin_lock(&tsk->sighand->siglock);
587 	}
588 	return signr;
589 }
590 
591 /*
592  * Tell a process that it has a new active signal..
593  *
594  * NOTE! we rely on the previous spin_lock to
595  * lock interrupts for us! We can only be called with
596  * "siglock" held, and the local interrupt must
597  * have been disabled when that got acquired!
598  *
599  * No need to set need_resched since signal event passing
600  * goes through ->blocked
601  */
602 void signal_wake_up(struct task_struct *t, int resume)
603 {
604 	unsigned int mask;
605 
606 	set_tsk_thread_flag(t, TIF_SIGPENDING);
607 
608 	/*
609 	 * For SIGKILL, we want to wake it up in the stopped/traced case.
610 	 * We don't check t->state here because there is a race with it
611 	 * executing another processor and just now entering stopped state.
612 	 * By using wake_up_state, we ensure the process will wake up and
613 	 * handle its death signal.
614 	 */
615 	mask = TASK_INTERRUPTIBLE;
616 	if (resume)
617 		mask |= TASK_STOPPED | TASK_TRACED;
618 	if (!wake_up_state(t, mask))
619 		kick_process(t);
620 }
621 
622 /*
623  * Remove signals in mask from the pending set and queue.
624  * Returns 1 if any signals were found.
625  *
626  * All callers must be holding the siglock.
627  *
628  * This version takes a sigset mask and looks at all signals,
629  * not just those in the first mask word.
630  */
631 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
632 {
633 	struct sigqueue *q, *n;
634 	sigset_t m;
635 
636 	sigandsets(&m, mask, &s->signal);
637 	if (sigisemptyset(&m))
638 		return 0;
639 
640 	signandsets(&s->signal, &s->signal, mask);
641 	list_for_each_entry_safe(q, n, &s->list, list) {
642 		if (sigismember(mask, q->info.si_signo)) {
643 			list_del_init(&q->list);
644 			__sigqueue_free(q);
645 		}
646 	}
647 	return 1;
648 }
649 /*
650  * Remove signals in mask from the pending set and queue.
651  * Returns 1 if any signals were found.
652  *
653  * All callers must be holding the siglock.
654  */
655 static int rm_from_queue(unsigned long mask, struct sigpending *s)
656 {
657 	struct sigqueue *q, *n;
658 
659 	if (!sigtestsetmask(&s->signal, mask))
660 		return 0;
661 
662 	sigdelsetmask(&s->signal, mask);
663 	list_for_each_entry_safe(q, n, &s->list, list) {
664 		if (q->info.si_signo < SIGRTMIN &&
665 		    (mask & sigmask(q->info.si_signo))) {
666 			list_del_init(&q->list);
667 			__sigqueue_free(q);
668 		}
669 	}
670 	return 1;
671 }
672 
673 /*
674  * Bad permissions for sending the signal
675  */
676 static int check_kill_permission(int sig, struct siginfo *info,
677 				 struct task_struct *t)
678 {
679 	int error = -EINVAL;
680 	if (!valid_signal(sig))
681 		return error;
682 	error = -EPERM;
683 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
684 	    && ((sig != SIGCONT) ||
685 		(current->signal->session != t->signal->session))
686 	    && (current->euid ^ t->suid) && (current->euid ^ t->uid)
687 	    && (current->uid ^ t->suid) && (current->uid ^ t->uid)
688 	    && !capable(CAP_KILL))
689 		return error;
690 
691 	error = security_task_kill(t, info, sig);
692 	if (!error)
693 		audit_signal_info(sig, t); /* Let audit system see the signal */
694 	return error;
695 }
696 
697 /* forward decl */
698 static void do_notify_parent_cldstop(struct task_struct *tsk,
699 				     int to_self,
700 				     int why);
701 
702 /*
703  * Handle magic process-wide effects of stop/continue signals.
704  * Unlike the signal actions, these happen immediately at signal-generation
705  * time regardless of blocking, ignoring, or handling.  This does the
706  * actual continuing for SIGCONT, but not the actual stopping for stop
707  * signals.  The process stop is done as a signal action for SIG_DFL.
708  */
709 static void handle_stop_signal(int sig, struct task_struct *p)
710 {
711 	struct task_struct *t;
712 
713 	if (p->signal->flags & SIGNAL_GROUP_EXIT)
714 		/*
715 		 * The process is in the middle of dying already.
716 		 */
717 		return;
718 
719 	if (sig_kernel_stop(sig)) {
720 		/*
721 		 * This is a stop signal.  Remove SIGCONT from all queues.
722 		 */
723 		rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
724 		t = p;
725 		do {
726 			rm_from_queue(sigmask(SIGCONT), &t->pending);
727 			t = next_thread(t);
728 		} while (t != p);
729 	} else if (sig == SIGCONT) {
730 		/*
731 		 * Remove all stop signals from all queues,
732 		 * and wake all threads.
733 		 */
734 		if (unlikely(p->signal->group_stop_count > 0)) {
735 			/*
736 			 * There was a group stop in progress.  We'll
737 			 * pretend it finished before we got here.  We are
738 			 * obliged to report it to the parent: if the
739 			 * SIGSTOP happened "after" this SIGCONT, then it
740 			 * would have cleared this pending SIGCONT.  If it
741 			 * happened "before" this SIGCONT, then the parent
742 			 * got the SIGCHLD about the stop finishing before
743 			 * the continue happened.  We do the notification
744 			 * now, and it's as if the stop had finished and
745 			 * the SIGCHLD was pending on entry to this kill.
746 			 */
747 			p->signal->group_stop_count = 0;
748 			p->signal->flags = SIGNAL_STOP_CONTINUED;
749 			spin_unlock(&p->sighand->siglock);
750 			do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
751 			spin_lock(&p->sighand->siglock);
752 		}
753 		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
754 		t = p;
755 		do {
756 			unsigned int state;
757 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
758 
759 			/*
760 			 * If there is a handler for SIGCONT, we must make
761 			 * sure that no thread returns to user mode before
762 			 * we post the signal, in case it was the only
763 			 * thread eligible to run the signal handler--then
764 			 * it must not do anything between resuming and
765 			 * running the handler.  With the TIF_SIGPENDING
766 			 * flag set, the thread will pause and acquire the
767 			 * siglock that we hold now and until we've queued
768 			 * the pending signal.
769 			 *
770 			 * Wake up the stopped thread _after_ setting
771 			 * TIF_SIGPENDING
772 			 */
773 			state = TASK_STOPPED;
774 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
775 				set_tsk_thread_flag(t, TIF_SIGPENDING);
776 				state |= TASK_INTERRUPTIBLE;
777 			}
778 			wake_up_state(t, state);
779 
780 			t = next_thread(t);
781 		} while (t != p);
782 
783 		if (p->signal->flags & SIGNAL_STOP_STOPPED) {
784 			/*
785 			 * We were in fact stopped, and are now continued.
786 			 * Notify the parent with CLD_CONTINUED.
787 			 */
788 			p->signal->flags = SIGNAL_STOP_CONTINUED;
789 			p->signal->group_exit_code = 0;
790 			spin_unlock(&p->sighand->siglock);
791 			do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
792 			spin_lock(&p->sighand->siglock);
793 		} else {
794 			/*
795 			 * We are not stopped, but there could be a stop
796 			 * signal in the middle of being processed after
797 			 * being removed from the queue.  Clear that too.
798 			 */
799 			p->signal->flags = 0;
800 		}
801 	} else if (sig == SIGKILL) {
802 		/*
803 		 * Make sure that any pending stop signal already dequeued
804 		 * is undone by the wakeup for SIGKILL.
805 		 */
806 		p->signal->flags = 0;
807 	}
808 }
809 
810 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
811 			struct sigpending *signals)
812 {
813 	struct sigqueue * q = NULL;
814 	int ret = 0;
815 
816 	/*
817 	 * fast-pathed signals for kernel-internal things like SIGSTOP
818 	 * or SIGKILL.
819 	 */
820 	if (info == SEND_SIG_FORCED)
821 		goto out_set;
822 
823 	/* Real-time signals must be queued if sent by sigqueue, or
824 	   some other real-time mechanism.  It is implementation
825 	   defined whether kill() does so.  We attempt to do so, on
826 	   the principle of least surprise, but since kill is not
827 	   allowed to fail with EAGAIN when low on memory we just
828 	   make sure at least one signal gets delivered and don't
829 	   pass on the info struct.  */
830 
831 	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
832 					     (is_si_special(info) ||
833 					      info->si_code >= 0)));
834 	if (q) {
835 		list_add_tail(&q->list, &signals->list);
836 		switch ((unsigned long) info) {
837 		case (unsigned long) SEND_SIG_NOINFO:
838 			q->info.si_signo = sig;
839 			q->info.si_errno = 0;
840 			q->info.si_code = SI_USER;
841 			q->info.si_pid = current->pid;
842 			q->info.si_uid = current->uid;
843 			break;
844 		case (unsigned long) SEND_SIG_PRIV:
845 			q->info.si_signo = sig;
846 			q->info.si_errno = 0;
847 			q->info.si_code = SI_KERNEL;
848 			q->info.si_pid = 0;
849 			q->info.si_uid = 0;
850 			break;
851 		default:
852 			copy_siginfo(&q->info, info);
853 			break;
854 		}
855 	} else if (!is_si_special(info)) {
856 		if (sig >= SIGRTMIN && info->si_code != SI_USER)
857 		/*
858 		 * Queue overflow, abort.  We may abort if the signal was rt
859 		 * and sent by user using something other than kill().
860 		 */
861 			return -EAGAIN;
862 	}
863 
864 out_set:
865 	sigaddset(&signals->signal, sig);
866 	return ret;
867 }
868 
869 #define LEGACY_QUEUE(sigptr, sig) \
870 	(((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
871 
872 
873 static int
874 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
875 {
876 	int ret = 0;
877 
878 	if (!irqs_disabled())
879 		BUG();
880 	assert_spin_locked(&t->sighand->siglock);
881 
882 	/* Short-circuit ignored signals.  */
883 	if (sig_ignored(t, sig))
884 		goto out;
885 
886 	/* Support queueing exactly one non-rt signal, so that we
887 	   can get more detailed information about the cause of
888 	   the signal. */
889 	if (LEGACY_QUEUE(&t->pending, sig))
890 		goto out;
891 
892 	ret = send_signal(sig, info, t, &t->pending);
893 	if (!ret && !sigismember(&t->blocked, sig))
894 		signal_wake_up(t, sig == SIGKILL);
895 out:
896 	return ret;
897 }
898 
899 /*
900  * Force a signal that the process can't ignore: if necessary
901  * we unblock the signal and change any SIG_IGN to SIG_DFL.
902  */
903 
904 int
905 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
906 {
907 	unsigned long int flags;
908 	int ret;
909 
910 	spin_lock_irqsave(&t->sighand->siglock, flags);
911 	if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
912 		t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
913 	}
914 	if (sigismember(&t->blocked, sig)) {
915 		sigdelset(&t->blocked, sig);
916 	}
917 	recalc_sigpending_tsk(t);
918 	ret = specific_send_sig_info(sig, info, t);
919 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
920 
921 	return ret;
922 }
923 
924 void
925 force_sig_specific(int sig, struct task_struct *t)
926 {
927 	force_sig_info(sig, SEND_SIG_FORCED, t);
928 }
929 
930 /*
931  * Test if P wants to take SIG.  After we've checked all threads with this,
932  * it's equivalent to finding no threads not blocking SIG.  Any threads not
933  * blocking SIG were ruled out because they are not running and already
934  * have pending signals.  Such threads will dequeue from the shared queue
935  * as soon as they're available, so putting the signal on the shared queue
936  * will be equivalent to sending it to one such thread.
937  */
938 static inline int wants_signal(int sig, struct task_struct *p)
939 {
940 	if (sigismember(&p->blocked, sig))
941 		return 0;
942 	if (p->flags & PF_EXITING)
943 		return 0;
944 	if (sig == SIGKILL)
945 		return 1;
946 	if (p->state & (TASK_STOPPED | TASK_TRACED))
947 		return 0;
948 	return task_curr(p) || !signal_pending(p);
949 }
950 
951 static void
952 __group_complete_signal(int sig, struct task_struct *p)
953 {
954 	struct task_struct *t;
955 
956 	/*
957 	 * Now find a thread we can wake up to take the signal off the queue.
958 	 *
959 	 * If the main thread wants the signal, it gets first crack.
960 	 * Probably the least surprising to the average bear.
961 	 */
962 	if (wants_signal(sig, p))
963 		t = p;
964 	else if (thread_group_empty(p))
965 		/*
966 		 * There is just one thread and it does not need to be woken.
967 		 * It will dequeue unblocked signals before it runs again.
968 		 */
969 		return;
970 	else {
971 		/*
972 		 * Otherwise try to find a suitable thread.
973 		 */
974 		t = p->signal->curr_target;
975 		if (t == NULL)
976 			/* restart balancing at this thread */
977 			t = p->signal->curr_target = p;
978 		BUG_ON(t->tgid != p->tgid);
979 
980 		while (!wants_signal(sig, t)) {
981 			t = next_thread(t);
982 			if (t == p->signal->curr_target)
983 				/*
984 				 * No thread needs to be woken.
985 				 * Any eligible threads will see
986 				 * the signal in the queue soon.
987 				 */
988 				return;
989 		}
990 		p->signal->curr_target = t;
991 	}
992 
993 	/*
994 	 * Found a killable thread.  If the signal will be fatal,
995 	 * then start taking the whole group down immediately.
996 	 */
997 	if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
998 	    !sigismember(&t->real_blocked, sig) &&
999 	    (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
1000 		/*
1001 		 * This signal will be fatal to the whole group.
1002 		 */
1003 		if (!sig_kernel_coredump(sig)) {
1004 			/*
1005 			 * Start a group exit and wake everybody up.
1006 			 * This way we don't have other threads
1007 			 * running and doing things after a slower
1008 			 * thread has the fatal signal pending.
1009 			 */
1010 			p->signal->flags = SIGNAL_GROUP_EXIT;
1011 			p->signal->group_exit_code = sig;
1012 			p->signal->group_stop_count = 0;
1013 			t = p;
1014 			do {
1015 				sigaddset(&t->pending.signal, SIGKILL);
1016 				signal_wake_up(t, 1);
1017 				t = next_thread(t);
1018 			} while (t != p);
1019 			return;
1020 		}
1021 
1022 		/*
1023 		 * There will be a core dump.  We make all threads other
1024 		 * than the chosen one go into a group stop so that nothing
1025 		 * happens until it gets scheduled, takes the signal off
1026 		 * the shared queue, and does the core dump.  This is a
1027 		 * little more complicated than strictly necessary, but it
1028 		 * keeps the signal state that winds up in the core dump
1029 		 * unchanged from the death state, e.g. which thread had
1030 		 * the core-dump signal unblocked.
1031 		 */
1032 		rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1033 		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1034 		p->signal->group_stop_count = 0;
1035 		p->signal->group_exit_task = t;
1036 		t = p;
1037 		do {
1038 			p->signal->group_stop_count++;
1039 			signal_wake_up(t, 0);
1040 			t = next_thread(t);
1041 		} while (t != p);
1042 		wake_up_process(p->signal->group_exit_task);
1043 		return;
1044 	}
1045 
1046 	/*
1047 	 * The signal is already in the shared-pending queue.
1048 	 * Tell the chosen thread to wake up and dequeue it.
1049 	 */
1050 	signal_wake_up(t, sig == SIGKILL);
1051 	return;
1052 }
1053 
1054 int
1055 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1056 {
1057 	int ret = 0;
1058 
1059 	assert_spin_locked(&p->sighand->siglock);
1060 	handle_stop_signal(sig, p);
1061 
1062 	/* Short-circuit ignored signals.  */
1063 	if (sig_ignored(p, sig))
1064 		return ret;
1065 
1066 	if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1067 		/* This is a non-RT signal and we already have one queued.  */
1068 		return ret;
1069 
1070 	/*
1071 	 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1072 	 * We always use the shared queue for process-wide signals,
1073 	 * to avoid several races.
1074 	 */
1075 	ret = send_signal(sig, info, p, &p->signal->shared_pending);
1076 	if (unlikely(ret))
1077 		return ret;
1078 
1079 	__group_complete_signal(sig, p);
1080 	return 0;
1081 }
1082 
1083 /*
1084  * Nuke all other threads in the group.
1085  */
1086 void zap_other_threads(struct task_struct *p)
1087 {
1088 	struct task_struct *t;
1089 
1090 	p->signal->flags = SIGNAL_GROUP_EXIT;
1091 	p->signal->group_stop_count = 0;
1092 
1093 	if (thread_group_empty(p))
1094 		return;
1095 
1096 	for (t = next_thread(p); t != p; t = next_thread(t)) {
1097 		/*
1098 		 * Don't bother with already dead threads
1099 		 */
1100 		if (t->exit_state)
1101 			continue;
1102 
1103 		/*
1104 		 * We don't want to notify the parent, since we are
1105 		 * killed as part of a thread group due to another
1106 		 * thread doing an execve() or similar. So set the
1107 		 * exit signal to -1 to allow immediate reaping of
1108 		 * the process.  But don't detach the thread group
1109 		 * leader.
1110 		 */
1111 		if (t != p->group_leader)
1112 			t->exit_signal = -1;
1113 
1114 		/* SIGKILL will be handled before any pending SIGSTOP */
1115 		sigaddset(&t->pending.signal, SIGKILL);
1116 		signal_wake_up(t, 1);
1117 	}
1118 }
1119 
1120 /*
1121  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1122  */
1123 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1124 {
1125 	unsigned long flags;
1126 	struct sighand_struct *sp;
1127 	int ret;
1128 
1129 retry:
1130 	ret = check_kill_permission(sig, info, p);
1131 	if (!ret && sig && (sp = rcu_dereference(p->sighand))) {
1132 		spin_lock_irqsave(&sp->siglock, flags);
1133 		if (p->sighand != sp) {
1134 			spin_unlock_irqrestore(&sp->siglock, flags);
1135 			goto retry;
1136 		}
1137 		if ((atomic_read(&sp->count) == 0) ||
1138 				(atomic_read(&p->usage) == 0)) {
1139 			spin_unlock_irqrestore(&sp->siglock, flags);
1140 			return -ESRCH;
1141 		}
1142 		ret = __group_send_sig_info(sig, info, p);
1143 		spin_unlock_irqrestore(&sp->siglock, flags);
1144 	}
1145 
1146 	return ret;
1147 }
1148 
1149 /*
1150  * kill_pg_info() sends a signal to a process group: this is what the tty
1151  * control characters do (^C, ^Z etc)
1152  */
1153 
1154 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1155 {
1156 	struct task_struct *p = NULL;
1157 	int retval, success;
1158 
1159 	if (pgrp <= 0)
1160 		return -EINVAL;
1161 
1162 	success = 0;
1163 	retval = -ESRCH;
1164 	do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1165 		int err = group_send_sig_info(sig, info, p);
1166 		success |= !err;
1167 		retval = err;
1168 	} while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1169 	return success ? 0 : retval;
1170 }
1171 
1172 int
1173 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1174 {
1175 	int retval;
1176 
1177 	read_lock(&tasklist_lock);
1178 	retval = __kill_pg_info(sig, info, pgrp);
1179 	read_unlock(&tasklist_lock);
1180 
1181 	return retval;
1182 }
1183 
1184 int
1185 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1186 {
1187 	int error;
1188 	int acquired_tasklist_lock = 0;
1189 	struct task_struct *p;
1190 
1191 	rcu_read_lock();
1192 	if (unlikely(sig_kernel_stop(sig) || sig == SIGCONT)) {
1193 		read_lock(&tasklist_lock);
1194 		acquired_tasklist_lock = 1;
1195 	}
1196 	p = find_task_by_pid(pid);
1197 	error = -ESRCH;
1198 	if (p)
1199 		error = group_send_sig_info(sig, info, p);
1200 	if (unlikely(acquired_tasklist_lock))
1201 		read_unlock(&tasklist_lock);
1202 	rcu_read_unlock();
1203 	return error;
1204 }
1205 
1206 /* like kill_proc_info(), but doesn't use uid/euid of "current" */
1207 int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1208 		      uid_t uid, uid_t euid)
1209 {
1210 	int ret = -EINVAL;
1211 	struct task_struct *p;
1212 
1213 	if (!valid_signal(sig))
1214 		return ret;
1215 
1216 	read_lock(&tasklist_lock);
1217 	p = find_task_by_pid(pid);
1218 	if (!p) {
1219 		ret = -ESRCH;
1220 		goto out_unlock;
1221 	}
1222 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1223 	    && (euid != p->suid) && (euid != p->uid)
1224 	    && (uid != p->suid) && (uid != p->uid)) {
1225 		ret = -EPERM;
1226 		goto out_unlock;
1227 	}
1228 	if (sig && p->sighand) {
1229 		unsigned long flags;
1230 		spin_lock_irqsave(&p->sighand->siglock, flags);
1231 		ret = __group_send_sig_info(sig, info, p);
1232 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1233 	}
1234 out_unlock:
1235 	read_unlock(&tasklist_lock);
1236 	return ret;
1237 }
1238 EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1239 
1240 /*
1241  * kill_something_info() interprets pid in interesting ways just like kill(2).
1242  *
1243  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1244  * is probably wrong.  Should make it like BSD or SYSV.
1245  */
1246 
1247 static int kill_something_info(int sig, struct siginfo *info, int pid)
1248 {
1249 	if (!pid) {
1250 		return kill_pg_info(sig, info, process_group(current));
1251 	} else if (pid == -1) {
1252 		int retval = 0, count = 0;
1253 		struct task_struct * p;
1254 
1255 		read_lock(&tasklist_lock);
1256 		for_each_process(p) {
1257 			if (p->pid > 1 && p->tgid != current->tgid) {
1258 				int err = group_send_sig_info(sig, info, p);
1259 				++count;
1260 				if (err != -EPERM)
1261 					retval = err;
1262 			}
1263 		}
1264 		read_unlock(&tasklist_lock);
1265 		return count ? retval : -ESRCH;
1266 	} else if (pid < 0) {
1267 		return kill_pg_info(sig, info, -pid);
1268 	} else {
1269 		return kill_proc_info(sig, info, pid);
1270 	}
1271 }
1272 
1273 /*
1274  * These are for backward compatibility with the rest of the kernel source.
1275  */
1276 
1277 /*
1278  * These two are the most common entry points.  They send a signal
1279  * just to the specific thread.
1280  */
1281 int
1282 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1283 {
1284 	int ret;
1285 	unsigned long flags;
1286 
1287 	/*
1288 	 * Make sure legacy kernel users don't send in bad values
1289 	 * (normal paths check this in check_kill_permission).
1290 	 */
1291 	if (!valid_signal(sig))
1292 		return -EINVAL;
1293 
1294 	/*
1295 	 * We need the tasklist lock even for the specific
1296 	 * thread case (when we don't need to follow the group
1297 	 * lists) in order to avoid races with "p->sighand"
1298 	 * going away or changing from under us.
1299 	 */
1300 	read_lock(&tasklist_lock);
1301 	spin_lock_irqsave(&p->sighand->siglock, flags);
1302 	ret = specific_send_sig_info(sig, info, p);
1303 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1304 	read_unlock(&tasklist_lock);
1305 	return ret;
1306 }
1307 
1308 #define __si_special(priv) \
1309 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1310 
1311 int
1312 send_sig(int sig, struct task_struct *p, int priv)
1313 {
1314 	return send_sig_info(sig, __si_special(priv), p);
1315 }
1316 
1317 /*
1318  * This is the entry point for "process-wide" signals.
1319  * They will go to an appropriate thread in the thread group.
1320  */
1321 int
1322 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1323 {
1324 	int ret;
1325 	read_lock(&tasklist_lock);
1326 	ret = group_send_sig_info(sig, info, p);
1327 	read_unlock(&tasklist_lock);
1328 	return ret;
1329 }
1330 
1331 void
1332 force_sig(int sig, struct task_struct *p)
1333 {
1334 	force_sig_info(sig, SEND_SIG_PRIV, p);
1335 }
1336 
1337 /*
1338  * When things go south during signal handling, we
1339  * will force a SIGSEGV. And if the signal that caused
1340  * the problem was already a SIGSEGV, we'll want to
1341  * make sure we don't even try to deliver the signal..
1342  */
1343 int
1344 force_sigsegv(int sig, struct task_struct *p)
1345 {
1346 	if (sig == SIGSEGV) {
1347 		unsigned long flags;
1348 		spin_lock_irqsave(&p->sighand->siglock, flags);
1349 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1350 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1351 	}
1352 	force_sig(SIGSEGV, p);
1353 	return 0;
1354 }
1355 
1356 int
1357 kill_pg(pid_t pgrp, int sig, int priv)
1358 {
1359 	return kill_pg_info(sig, __si_special(priv), pgrp);
1360 }
1361 
1362 int
1363 kill_proc(pid_t pid, int sig, int priv)
1364 {
1365 	return kill_proc_info(sig, __si_special(priv), pid);
1366 }
1367 
1368 /*
1369  * These functions support sending signals using preallocated sigqueue
1370  * structures.  This is needed "because realtime applications cannot
1371  * afford to lose notifications of asynchronous events, like timer
1372  * expirations or I/O completions".  In the case of Posix Timers
1373  * we allocate the sigqueue structure from the timer_create.  If this
1374  * allocation fails we are able to report the failure to the application
1375  * with an EAGAIN error.
1376  */
1377 
1378 struct sigqueue *sigqueue_alloc(void)
1379 {
1380 	struct sigqueue *q;
1381 
1382 	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1383 		q->flags |= SIGQUEUE_PREALLOC;
1384 	return(q);
1385 }
1386 
1387 void sigqueue_free(struct sigqueue *q)
1388 {
1389 	unsigned long flags;
1390 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1391 	/*
1392 	 * If the signal is still pending remove it from the
1393 	 * pending queue.
1394 	 */
1395 	if (unlikely(!list_empty(&q->list))) {
1396 		spinlock_t *lock = &current->sighand->siglock;
1397 		read_lock(&tasklist_lock);
1398 		spin_lock_irqsave(lock, flags);
1399 		if (!list_empty(&q->list))
1400 			list_del_init(&q->list);
1401 		spin_unlock_irqrestore(lock, flags);
1402 		read_unlock(&tasklist_lock);
1403 	}
1404 	q->flags &= ~SIGQUEUE_PREALLOC;
1405 	__sigqueue_free(q);
1406 }
1407 
1408 int
1409 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1410 {
1411 	unsigned long flags;
1412 	int ret = 0;
1413 	struct sighand_struct *sh;
1414 
1415 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1416 
1417 	/*
1418 	 * The rcu based delayed sighand destroy makes it possible to
1419 	 * run this without tasklist lock held. The task struct itself
1420 	 * cannot go away as create_timer did get_task_struct().
1421 	 *
1422 	 * We return -1, when the task is marked exiting, so
1423 	 * posix_timer_event can redirect it to the group leader
1424 	 */
1425 	rcu_read_lock();
1426 
1427 	if (unlikely(p->flags & PF_EXITING)) {
1428 		ret = -1;
1429 		goto out_err;
1430 	}
1431 
1432 retry:
1433 	sh = rcu_dereference(p->sighand);
1434 
1435 	spin_lock_irqsave(&sh->siglock, flags);
1436 	if (p->sighand != sh) {
1437 		/* We raced with exec() in a multithreaded process... */
1438 		spin_unlock_irqrestore(&sh->siglock, flags);
1439 		goto retry;
1440 	}
1441 
1442 	/*
1443 	 * We do the check here again to handle the following scenario:
1444 	 *
1445 	 * CPU 0		CPU 1
1446 	 * send_sigqueue
1447 	 * check PF_EXITING
1448 	 * interrupt		exit code running
1449 	 *			__exit_signal
1450 	 *			lock sighand->siglock
1451 	 *			unlock sighand->siglock
1452 	 * lock sh->siglock
1453 	 * add(tsk->pending) 	flush_sigqueue(tsk->pending)
1454 	 *
1455 	 */
1456 
1457 	if (unlikely(p->flags & PF_EXITING)) {
1458 		ret = -1;
1459 		goto out;
1460 	}
1461 
1462 	if (unlikely(!list_empty(&q->list))) {
1463 		/*
1464 		 * If an SI_TIMER entry is already queue just increment
1465 		 * the overrun count.
1466 		 */
1467 		if (q->info.si_code != SI_TIMER)
1468 			BUG();
1469 		q->info.si_overrun++;
1470 		goto out;
1471 	}
1472 	/* Short-circuit ignored signals.  */
1473 	if (sig_ignored(p, sig)) {
1474 		ret = 1;
1475 		goto out;
1476 	}
1477 
1478 	list_add_tail(&q->list, &p->pending.list);
1479 	sigaddset(&p->pending.signal, sig);
1480 	if (!sigismember(&p->blocked, sig))
1481 		signal_wake_up(p, sig == SIGKILL);
1482 
1483 out:
1484 	spin_unlock_irqrestore(&sh->siglock, flags);
1485 out_err:
1486 	rcu_read_unlock();
1487 
1488 	return ret;
1489 }
1490 
1491 int
1492 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1493 {
1494 	unsigned long flags;
1495 	int ret = 0;
1496 
1497 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1498 
1499 	read_lock(&tasklist_lock);
1500 	/* Since it_lock is held, p->sighand cannot be NULL. */
1501 	spin_lock_irqsave(&p->sighand->siglock, flags);
1502 	handle_stop_signal(sig, p);
1503 
1504 	/* Short-circuit ignored signals.  */
1505 	if (sig_ignored(p, sig)) {
1506 		ret = 1;
1507 		goto out;
1508 	}
1509 
1510 	if (unlikely(!list_empty(&q->list))) {
1511 		/*
1512 		 * If an SI_TIMER entry is already queue just increment
1513 		 * the overrun count.  Other uses should not try to
1514 		 * send the signal multiple times.
1515 		 */
1516 		if (q->info.si_code != SI_TIMER)
1517 			BUG();
1518 		q->info.si_overrun++;
1519 		goto out;
1520 	}
1521 
1522 	/*
1523 	 * Put this signal on the shared-pending queue.
1524 	 * We always use the shared queue for process-wide signals,
1525 	 * to avoid several races.
1526 	 */
1527 	list_add_tail(&q->list, &p->signal->shared_pending.list);
1528 	sigaddset(&p->signal->shared_pending.signal, sig);
1529 
1530 	__group_complete_signal(sig, p);
1531 out:
1532 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1533 	read_unlock(&tasklist_lock);
1534 	return ret;
1535 }
1536 
1537 /*
1538  * Wake up any threads in the parent blocked in wait* syscalls.
1539  */
1540 static inline void __wake_up_parent(struct task_struct *p,
1541 				    struct task_struct *parent)
1542 {
1543 	wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1544 }
1545 
1546 /*
1547  * Let a parent know about the death of a child.
1548  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1549  */
1550 
1551 void do_notify_parent(struct task_struct *tsk, int sig)
1552 {
1553 	struct siginfo info;
1554 	unsigned long flags;
1555 	struct sighand_struct *psig;
1556 
1557 	BUG_ON(sig == -1);
1558 
1559  	/* do_notify_parent_cldstop should have been called instead.  */
1560  	BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1561 
1562 	BUG_ON(!tsk->ptrace &&
1563 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1564 
1565 	info.si_signo = sig;
1566 	info.si_errno = 0;
1567 	info.si_pid = tsk->pid;
1568 	info.si_uid = tsk->uid;
1569 
1570 	/* FIXME: find out whether or not this is supposed to be c*time. */
1571 	info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1572 						       tsk->signal->utime));
1573 	info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1574 						       tsk->signal->stime));
1575 
1576 	info.si_status = tsk->exit_code & 0x7f;
1577 	if (tsk->exit_code & 0x80)
1578 		info.si_code = CLD_DUMPED;
1579 	else if (tsk->exit_code & 0x7f)
1580 		info.si_code = CLD_KILLED;
1581 	else {
1582 		info.si_code = CLD_EXITED;
1583 		info.si_status = tsk->exit_code >> 8;
1584 	}
1585 
1586 	psig = tsk->parent->sighand;
1587 	spin_lock_irqsave(&psig->siglock, flags);
1588 	if (!tsk->ptrace && sig == SIGCHLD &&
1589 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1590 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1591 		/*
1592 		 * We are exiting and our parent doesn't care.  POSIX.1
1593 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1594 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1595 		 * automatically and not left for our parent's wait4 call.
1596 		 * Rather than having the parent do it as a magic kind of
1597 		 * signal handler, we just set this to tell do_exit that we
1598 		 * can be cleaned up without becoming a zombie.  Note that
1599 		 * we still call __wake_up_parent in this case, because a
1600 		 * blocked sys_wait4 might now return -ECHILD.
1601 		 *
1602 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1603 		 * is implementation-defined: we do (if you don't want
1604 		 * it, just use SIG_IGN instead).
1605 		 */
1606 		tsk->exit_signal = -1;
1607 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1608 			sig = 0;
1609 	}
1610 	if (valid_signal(sig) && sig > 0)
1611 		__group_send_sig_info(sig, &info, tsk->parent);
1612 	__wake_up_parent(tsk, tsk->parent);
1613 	spin_unlock_irqrestore(&psig->siglock, flags);
1614 }
1615 
1616 static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1617 {
1618 	struct siginfo info;
1619 	unsigned long flags;
1620 	struct task_struct *parent;
1621 	struct sighand_struct *sighand;
1622 
1623 	if (to_self)
1624 		parent = tsk->parent;
1625 	else {
1626 		tsk = tsk->group_leader;
1627 		parent = tsk->real_parent;
1628 	}
1629 
1630 	info.si_signo = SIGCHLD;
1631 	info.si_errno = 0;
1632 	info.si_pid = tsk->pid;
1633 	info.si_uid = tsk->uid;
1634 
1635 	/* FIXME: find out whether or not this is supposed to be c*time. */
1636 	info.si_utime = cputime_to_jiffies(tsk->utime);
1637 	info.si_stime = cputime_to_jiffies(tsk->stime);
1638 
1639  	info.si_code = why;
1640  	switch (why) {
1641  	case CLD_CONTINUED:
1642  		info.si_status = SIGCONT;
1643  		break;
1644  	case CLD_STOPPED:
1645  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1646  		break;
1647  	case CLD_TRAPPED:
1648  		info.si_status = tsk->exit_code & 0x7f;
1649  		break;
1650  	default:
1651  		BUG();
1652  	}
1653 
1654 	sighand = parent->sighand;
1655 	spin_lock_irqsave(&sighand->siglock, flags);
1656 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1657 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1658 		__group_send_sig_info(SIGCHLD, &info, parent);
1659 	/*
1660 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1661 	 */
1662 	__wake_up_parent(tsk, parent);
1663 	spin_unlock_irqrestore(&sighand->siglock, flags);
1664 }
1665 
1666 /*
1667  * This must be called with current->sighand->siglock held.
1668  *
1669  * This should be the path for all ptrace stops.
1670  * We always set current->last_siginfo while stopped here.
1671  * That makes it a way to test a stopped process for
1672  * being ptrace-stopped vs being job-control-stopped.
1673  *
1674  * If we actually decide not to stop at all because the tracer is gone,
1675  * we leave nostop_code in current->exit_code.
1676  */
1677 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1678 {
1679 	/*
1680 	 * If there is a group stop in progress,
1681 	 * we must participate in the bookkeeping.
1682 	 */
1683 	if (current->signal->group_stop_count > 0)
1684 		--current->signal->group_stop_count;
1685 
1686 	current->last_siginfo = info;
1687 	current->exit_code = exit_code;
1688 
1689 	/* Let the debugger run.  */
1690 	set_current_state(TASK_TRACED);
1691 	spin_unlock_irq(&current->sighand->siglock);
1692 	read_lock(&tasklist_lock);
1693 	if (likely(current->ptrace & PT_PTRACED) &&
1694 	    likely(current->parent != current->real_parent ||
1695 		   !(current->ptrace & PT_ATTACHED)) &&
1696 	    (likely(current->parent->signal != current->signal) ||
1697 	     !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1698 		do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1699 		read_unlock(&tasklist_lock);
1700 		schedule();
1701 	} else {
1702 		/*
1703 		 * By the time we got the lock, our tracer went away.
1704 		 * Don't stop here.
1705 		 */
1706 		read_unlock(&tasklist_lock);
1707 		set_current_state(TASK_RUNNING);
1708 		current->exit_code = nostop_code;
1709 	}
1710 
1711 	/*
1712 	 * We are back.  Now reacquire the siglock before touching
1713 	 * last_siginfo, so that we are sure to have synchronized with
1714 	 * any signal-sending on another CPU that wants to examine it.
1715 	 */
1716 	spin_lock_irq(&current->sighand->siglock);
1717 	current->last_siginfo = NULL;
1718 
1719 	/*
1720 	 * Queued signals ignored us while we were stopped for tracing.
1721 	 * So check for any that we should take before resuming user mode.
1722 	 */
1723 	recalc_sigpending();
1724 }
1725 
1726 void ptrace_notify(int exit_code)
1727 {
1728 	siginfo_t info;
1729 
1730 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1731 
1732 	memset(&info, 0, sizeof info);
1733 	info.si_signo = SIGTRAP;
1734 	info.si_code = exit_code;
1735 	info.si_pid = current->pid;
1736 	info.si_uid = current->uid;
1737 
1738 	/* Let the debugger run.  */
1739 	spin_lock_irq(&current->sighand->siglock);
1740 	ptrace_stop(exit_code, 0, &info);
1741 	spin_unlock_irq(&current->sighand->siglock);
1742 }
1743 
1744 static void
1745 finish_stop(int stop_count)
1746 {
1747 	int to_self;
1748 
1749 	/*
1750 	 * If there are no other threads in the group, or if there is
1751 	 * a group stop in progress and we are the last to stop,
1752 	 * report to the parent.  When ptraced, every thread reports itself.
1753 	 */
1754 	if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1755 		to_self = 1;
1756 	else if (stop_count == 0)
1757 		to_self = 0;
1758 	else
1759 		goto out;
1760 
1761 	read_lock(&tasklist_lock);
1762 	do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1763 	read_unlock(&tasklist_lock);
1764 
1765 out:
1766 	schedule();
1767 	/*
1768 	 * Now we don't run again until continued.
1769 	 */
1770 	current->exit_code = 0;
1771 }
1772 
1773 /*
1774  * This performs the stopping for SIGSTOP and other stop signals.
1775  * We have to stop all threads in the thread group.
1776  * Returns nonzero if we've actually stopped and released the siglock.
1777  * Returns zero if we didn't stop and still hold the siglock.
1778  */
1779 static int
1780 do_signal_stop(int signr)
1781 {
1782 	struct signal_struct *sig = current->signal;
1783 	struct sighand_struct *sighand = current->sighand;
1784 	int stop_count = -1;
1785 
1786 	if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1787 		return 0;
1788 
1789 	if (sig->group_stop_count > 0) {
1790 		/*
1791 		 * There is a group stop in progress.  We don't need to
1792 		 * start another one.
1793 		 */
1794 		signr = sig->group_exit_code;
1795 		stop_count = --sig->group_stop_count;
1796 		current->exit_code = signr;
1797 		set_current_state(TASK_STOPPED);
1798 		if (stop_count == 0)
1799 			sig->flags = SIGNAL_STOP_STOPPED;
1800 		spin_unlock_irq(&sighand->siglock);
1801 	}
1802 	else if (thread_group_empty(current)) {
1803 		/*
1804 		 * Lock must be held through transition to stopped state.
1805 		 */
1806 		current->exit_code = current->signal->group_exit_code = signr;
1807 		set_current_state(TASK_STOPPED);
1808 		sig->flags = SIGNAL_STOP_STOPPED;
1809 		spin_unlock_irq(&sighand->siglock);
1810 	}
1811 	else {
1812 		/*
1813 		 * There is no group stop already in progress.
1814 		 * We must initiate one now, but that requires
1815 		 * dropping siglock to get both the tasklist lock
1816 		 * and siglock again in the proper order.  Note that
1817 		 * this allows an intervening SIGCONT to be posted.
1818 		 * We need to check for that and bail out if necessary.
1819 		 */
1820 		struct task_struct *t;
1821 
1822 		spin_unlock_irq(&sighand->siglock);
1823 
1824 		/* signals can be posted during this window */
1825 
1826 		read_lock(&tasklist_lock);
1827 		spin_lock_irq(&sighand->siglock);
1828 
1829 		if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1830 			/*
1831 			 * Another stop or continue happened while we
1832 			 * didn't have the lock.  We can just swallow this
1833 			 * signal now.  If we raced with a SIGCONT, that
1834 			 * should have just cleared it now.  If we raced
1835 			 * with another processor delivering a stop signal,
1836 			 * then the SIGCONT that wakes us up should clear it.
1837 			 */
1838 			read_unlock(&tasklist_lock);
1839 			return 0;
1840 		}
1841 
1842 		if (sig->group_stop_count == 0) {
1843 			sig->group_exit_code = signr;
1844 			stop_count = 0;
1845 			for (t = next_thread(current); t != current;
1846 			     t = next_thread(t))
1847 				/*
1848 				 * Setting state to TASK_STOPPED for a group
1849 				 * stop is always done with the siglock held,
1850 				 * so this check has no races.
1851 				 */
1852 				if (!t->exit_state &&
1853 				    !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1854 					stop_count++;
1855 					signal_wake_up(t, 0);
1856 				}
1857 			sig->group_stop_count = stop_count;
1858 		}
1859 		else {
1860 			/* A race with another thread while unlocked.  */
1861 			signr = sig->group_exit_code;
1862 			stop_count = --sig->group_stop_count;
1863 		}
1864 
1865 		current->exit_code = signr;
1866 		set_current_state(TASK_STOPPED);
1867 		if (stop_count == 0)
1868 			sig->flags = SIGNAL_STOP_STOPPED;
1869 
1870 		spin_unlock_irq(&sighand->siglock);
1871 		read_unlock(&tasklist_lock);
1872 	}
1873 
1874 	finish_stop(stop_count);
1875 	return 1;
1876 }
1877 
1878 /*
1879  * Do appropriate magic when group_stop_count > 0.
1880  * We return nonzero if we stopped, after releasing the siglock.
1881  * We return zero if we still hold the siglock and should look
1882  * for another signal without checking group_stop_count again.
1883  */
1884 static int handle_group_stop(void)
1885 {
1886 	int stop_count;
1887 
1888 	if (current->signal->group_exit_task == current) {
1889 		/*
1890 		 * Group stop is so we can do a core dump,
1891 		 * We are the initiating thread, so get on with it.
1892 		 */
1893 		current->signal->group_exit_task = NULL;
1894 		return 0;
1895 	}
1896 
1897 	if (current->signal->flags & SIGNAL_GROUP_EXIT)
1898 		/*
1899 		 * Group stop is so another thread can do a core dump,
1900 		 * or else we are racing against a death signal.
1901 		 * Just punt the stop so we can get the next signal.
1902 		 */
1903 		return 0;
1904 
1905 	/*
1906 	 * There is a group stop in progress.  We stop
1907 	 * without any associated signal being in our queue.
1908 	 */
1909 	stop_count = --current->signal->group_stop_count;
1910 	if (stop_count == 0)
1911 		current->signal->flags = SIGNAL_STOP_STOPPED;
1912 	current->exit_code = current->signal->group_exit_code;
1913 	set_current_state(TASK_STOPPED);
1914 	spin_unlock_irq(&current->sighand->siglock);
1915 	finish_stop(stop_count);
1916 	return 1;
1917 }
1918 
1919 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1920 			  struct pt_regs *regs, void *cookie)
1921 {
1922 	sigset_t *mask = &current->blocked;
1923 	int signr = 0;
1924 
1925 relock:
1926 	spin_lock_irq(&current->sighand->siglock);
1927 	for (;;) {
1928 		struct k_sigaction *ka;
1929 
1930 		if (unlikely(current->signal->group_stop_count > 0) &&
1931 		    handle_group_stop())
1932 			goto relock;
1933 
1934 		signr = dequeue_signal(current, mask, info);
1935 
1936 		if (!signr)
1937 			break; /* will return 0 */
1938 
1939 		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1940 			ptrace_signal_deliver(regs, cookie);
1941 
1942 			/* Let the debugger run.  */
1943 			ptrace_stop(signr, signr, info);
1944 
1945 			/* We're back.  Did the debugger cancel the sig or group_exit? */
1946 			signr = current->exit_code;
1947 			if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
1948 				continue;
1949 
1950 			current->exit_code = 0;
1951 
1952 			/* Update the siginfo structure if the signal has
1953 			   changed.  If the debugger wanted something
1954 			   specific in the siginfo structure then it should
1955 			   have updated *info via PTRACE_SETSIGINFO.  */
1956 			if (signr != info->si_signo) {
1957 				info->si_signo = signr;
1958 				info->si_errno = 0;
1959 				info->si_code = SI_USER;
1960 				info->si_pid = current->parent->pid;
1961 				info->si_uid = current->parent->uid;
1962 			}
1963 
1964 			/* If the (new) signal is now blocked, requeue it.  */
1965 			if (sigismember(&current->blocked, signr)) {
1966 				specific_send_sig_info(signr, info, current);
1967 				continue;
1968 			}
1969 		}
1970 
1971 		ka = &current->sighand->action[signr-1];
1972 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1973 			continue;
1974 		if (ka->sa.sa_handler != SIG_DFL) {
1975 			/* Run the handler.  */
1976 			*return_ka = *ka;
1977 
1978 			if (ka->sa.sa_flags & SA_ONESHOT)
1979 				ka->sa.sa_handler = SIG_DFL;
1980 
1981 			break; /* will return non-zero "signr" value */
1982 		}
1983 
1984 		/*
1985 		 * Now we are doing the default action for this signal.
1986 		 */
1987 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1988 			continue;
1989 
1990 		/* Init gets no signals it doesn't want.  */
1991 		if (current->pid == 1)
1992 			continue;
1993 
1994 		if (sig_kernel_stop(signr)) {
1995 			/*
1996 			 * The default action is to stop all threads in
1997 			 * the thread group.  The job control signals
1998 			 * do nothing in an orphaned pgrp, but SIGSTOP
1999 			 * always works.  Note that siglock needs to be
2000 			 * dropped during the call to is_orphaned_pgrp()
2001 			 * because of lock ordering with tasklist_lock.
2002 			 * This allows an intervening SIGCONT to be posted.
2003 			 * We need to check for that and bail out if necessary.
2004 			 */
2005 			if (signr != SIGSTOP) {
2006 				spin_unlock_irq(&current->sighand->siglock);
2007 
2008 				/* signals can be posted during this window */
2009 
2010 				if (is_orphaned_pgrp(process_group(current)))
2011 					goto relock;
2012 
2013 				spin_lock_irq(&current->sighand->siglock);
2014 			}
2015 
2016 			if (likely(do_signal_stop(signr))) {
2017 				/* It released the siglock.  */
2018 				goto relock;
2019 			}
2020 
2021 			/*
2022 			 * We didn't actually stop, due to a race
2023 			 * with SIGCONT or something like that.
2024 			 */
2025 			continue;
2026 		}
2027 
2028 		spin_unlock_irq(&current->sighand->siglock);
2029 
2030 		/*
2031 		 * Anything else is fatal, maybe with a core dump.
2032 		 */
2033 		current->flags |= PF_SIGNALED;
2034 		if (sig_kernel_coredump(signr)) {
2035 			/*
2036 			 * If it was able to dump core, this kills all
2037 			 * other threads in the group and synchronizes with
2038 			 * their demise.  If we lost the race with another
2039 			 * thread getting here, it set group_exit_code
2040 			 * first and our do_group_exit call below will use
2041 			 * that value and ignore the one we pass it.
2042 			 */
2043 			do_coredump((long)signr, signr, regs);
2044 		}
2045 
2046 		/*
2047 		 * Death signals, no core dump.
2048 		 */
2049 		do_group_exit(signr);
2050 		/* NOTREACHED */
2051 	}
2052 	spin_unlock_irq(&current->sighand->siglock);
2053 	return signr;
2054 }
2055 
2056 EXPORT_SYMBOL(recalc_sigpending);
2057 EXPORT_SYMBOL_GPL(dequeue_signal);
2058 EXPORT_SYMBOL(flush_signals);
2059 EXPORT_SYMBOL(force_sig);
2060 EXPORT_SYMBOL(kill_pg);
2061 EXPORT_SYMBOL(kill_proc);
2062 EXPORT_SYMBOL(ptrace_notify);
2063 EXPORT_SYMBOL(send_sig);
2064 EXPORT_SYMBOL(send_sig_info);
2065 EXPORT_SYMBOL(sigprocmask);
2066 EXPORT_SYMBOL(block_all_signals);
2067 EXPORT_SYMBOL(unblock_all_signals);
2068 
2069 
2070 /*
2071  * System call entry points.
2072  */
2073 
2074 asmlinkage long sys_restart_syscall(void)
2075 {
2076 	struct restart_block *restart = &current_thread_info()->restart_block;
2077 	return restart->fn(restart);
2078 }
2079 
2080 long do_no_restart_syscall(struct restart_block *param)
2081 {
2082 	return -EINTR;
2083 }
2084 
2085 /*
2086  * We don't need to get the kernel lock - this is all local to this
2087  * particular thread.. (and that's good, because this is _heavily_
2088  * used by various programs)
2089  */
2090 
2091 /*
2092  * This is also useful for kernel threads that want to temporarily
2093  * (or permanently) block certain signals.
2094  *
2095  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2096  * interface happily blocks "unblockable" signals like SIGKILL
2097  * and friends.
2098  */
2099 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2100 {
2101 	int error;
2102 	sigset_t old_block;
2103 
2104 	spin_lock_irq(&current->sighand->siglock);
2105 	old_block = current->blocked;
2106 	error = 0;
2107 	switch (how) {
2108 	case SIG_BLOCK:
2109 		sigorsets(&current->blocked, &current->blocked, set);
2110 		break;
2111 	case SIG_UNBLOCK:
2112 		signandsets(&current->blocked, &current->blocked, set);
2113 		break;
2114 	case SIG_SETMASK:
2115 		current->blocked = *set;
2116 		break;
2117 	default:
2118 		error = -EINVAL;
2119 	}
2120 	recalc_sigpending();
2121 	spin_unlock_irq(&current->sighand->siglock);
2122 	if (oldset)
2123 		*oldset = old_block;
2124 	return error;
2125 }
2126 
2127 asmlinkage long
2128 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2129 {
2130 	int error = -EINVAL;
2131 	sigset_t old_set, new_set;
2132 
2133 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2134 	if (sigsetsize != sizeof(sigset_t))
2135 		goto out;
2136 
2137 	if (set) {
2138 		error = -EFAULT;
2139 		if (copy_from_user(&new_set, set, sizeof(*set)))
2140 			goto out;
2141 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2142 
2143 		error = sigprocmask(how, &new_set, &old_set);
2144 		if (error)
2145 			goto out;
2146 		if (oset)
2147 			goto set_old;
2148 	} else if (oset) {
2149 		spin_lock_irq(&current->sighand->siglock);
2150 		old_set = current->blocked;
2151 		spin_unlock_irq(&current->sighand->siglock);
2152 
2153 	set_old:
2154 		error = -EFAULT;
2155 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2156 			goto out;
2157 	}
2158 	error = 0;
2159 out:
2160 	return error;
2161 }
2162 
2163 long do_sigpending(void __user *set, unsigned long sigsetsize)
2164 {
2165 	long error = -EINVAL;
2166 	sigset_t pending;
2167 
2168 	if (sigsetsize > sizeof(sigset_t))
2169 		goto out;
2170 
2171 	spin_lock_irq(&current->sighand->siglock);
2172 	sigorsets(&pending, &current->pending.signal,
2173 		  &current->signal->shared_pending.signal);
2174 	spin_unlock_irq(&current->sighand->siglock);
2175 
2176 	/* Outside the lock because only this thread touches it.  */
2177 	sigandsets(&pending, &current->blocked, &pending);
2178 
2179 	error = -EFAULT;
2180 	if (!copy_to_user(set, &pending, sigsetsize))
2181 		error = 0;
2182 
2183 out:
2184 	return error;
2185 }
2186 
2187 asmlinkage long
2188 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2189 {
2190 	return do_sigpending(set, sigsetsize);
2191 }
2192 
2193 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2194 
2195 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2196 {
2197 	int err;
2198 
2199 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2200 		return -EFAULT;
2201 	if (from->si_code < 0)
2202 		return __copy_to_user(to, from, sizeof(siginfo_t))
2203 			? -EFAULT : 0;
2204 	/*
2205 	 * If you change siginfo_t structure, please be sure
2206 	 * this code is fixed accordingly.
2207 	 * It should never copy any pad contained in the structure
2208 	 * to avoid security leaks, but must copy the generic
2209 	 * 3 ints plus the relevant union member.
2210 	 */
2211 	err = __put_user(from->si_signo, &to->si_signo);
2212 	err |= __put_user(from->si_errno, &to->si_errno);
2213 	err |= __put_user((short)from->si_code, &to->si_code);
2214 	switch (from->si_code & __SI_MASK) {
2215 	case __SI_KILL:
2216 		err |= __put_user(from->si_pid, &to->si_pid);
2217 		err |= __put_user(from->si_uid, &to->si_uid);
2218 		break;
2219 	case __SI_TIMER:
2220 		 err |= __put_user(from->si_tid, &to->si_tid);
2221 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2222 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2223 		break;
2224 	case __SI_POLL:
2225 		err |= __put_user(from->si_band, &to->si_band);
2226 		err |= __put_user(from->si_fd, &to->si_fd);
2227 		break;
2228 	case __SI_FAULT:
2229 		err |= __put_user(from->si_addr, &to->si_addr);
2230 #ifdef __ARCH_SI_TRAPNO
2231 		err |= __put_user(from->si_trapno, &to->si_trapno);
2232 #endif
2233 		break;
2234 	case __SI_CHLD:
2235 		err |= __put_user(from->si_pid, &to->si_pid);
2236 		err |= __put_user(from->si_uid, &to->si_uid);
2237 		err |= __put_user(from->si_status, &to->si_status);
2238 		err |= __put_user(from->si_utime, &to->si_utime);
2239 		err |= __put_user(from->si_stime, &to->si_stime);
2240 		break;
2241 	case __SI_RT: /* This is not generated by the kernel as of now. */
2242 	case __SI_MESGQ: /* But this is */
2243 		err |= __put_user(from->si_pid, &to->si_pid);
2244 		err |= __put_user(from->si_uid, &to->si_uid);
2245 		err |= __put_user(from->si_ptr, &to->si_ptr);
2246 		break;
2247 	default: /* this is just in case for now ... */
2248 		err |= __put_user(from->si_pid, &to->si_pid);
2249 		err |= __put_user(from->si_uid, &to->si_uid);
2250 		break;
2251 	}
2252 	return err;
2253 }
2254 
2255 #endif
2256 
2257 asmlinkage long
2258 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2259 		    siginfo_t __user *uinfo,
2260 		    const struct timespec __user *uts,
2261 		    size_t sigsetsize)
2262 {
2263 	int ret, sig;
2264 	sigset_t these;
2265 	struct timespec ts;
2266 	siginfo_t info;
2267 	long timeout = 0;
2268 
2269 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2270 	if (sigsetsize != sizeof(sigset_t))
2271 		return -EINVAL;
2272 
2273 	if (copy_from_user(&these, uthese, sizeof(these)))
2274 		return -EFAULT;
2275 
2276 	/*
2277 	 * Invert the set of allowed signals to get those we
2278 	 * want to block.
2279 	 */
2280 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2281 	signotset(&these);
2282 
2283 	if (uts) {
2284 		if (copy_from_user(&ts, uts, sizeof(ts)))
2285 			return -EFAULT;
2286 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2287 		    || ts.tv_sec < 0)
2288 			return -EINVAL;
2289 	}
2290 
2291 	spin_lock_irq(&current->sighand->siglock);
2292 	sig = dequeue_signal(current, &these, &info);
2293 	if (!sig) {
2294 		timeout = MAX_SCHEDULE_TIMEOUT;
2295 		if (uts)
2296 			timeout = (timespec_to_jiffies(&ts)
2297 				   + (ts.tv_sec || ts.tv_nsec));
2298 
2299 		if (timeout) {
2300 			/* None ready -- temporarily unblock those we're
2301 			 * interested while we are sleeping in so that we'll
2302 			 * be awakened when they arrive.  */
2303 			current->real_blocked = current->blocked;
2304 			sigandsets(&current->blocked, &current->blocked, &these);
2305 			recalc_sigpending();
2306 			spin_unlock_irq(&current->sighand->siglock);
2307 
2308 			timeout = schedule_timeout_interruptible(timeout);
2309 
2310 			try_to_freeze();
2311 			spin_lock_irq(&current->sighand->siglock);
2312 			sig = dequeue_signal(current, &these, &info);
2313 			current->blocked = current->real_blocked;
2314 			siginitset(&current->real_blocked, 0);
2315 			recalc_sigpending();
2316 		}
2317 	}
2318 	spin_unlock_irq(&current->sighand->siglock);
2319 
2320 	if (sig) {
2321 		ret = sig;
2322 		if (uinfo) {
2323 			if (copy_siginfo_to_user(uinfo, &info))
2324 				ret = -EFAULT;
2325 		}
2326 	} else {
2327 		ret = -EAGAIN;
2328 		if (timeout)
2329 			ret = -EINTR;
2330 	}
2331 
2332 	return ret;
2333 }
2334 
2335 asmlinkage long
2336 sys_kill(int pid, int sig)
2337 {
2338 	struct siginfo info;
2339 
2340 	info.si_signo = sig;
2341 	info.si_errno = 0;
2342 	info.si_code = SI_USER;
2343 	info.si_pid = current->tgid;
2344 	info.si_uid = current->uid;
2345 
2346 	return kill_something_info(sig, &info, pid);
2347 }
2348 
2349 static int do_tkill(int tgid, int pid, int sig)
2350 {
2351 	int error;
2352 	struct siginfo info;
2353 	struct task_struct *p;
2354 
2355 	error = -ESRCH;
2356 	info.si_signo = sig;
2357 	info.si_errno = 0;
2358 	info.si_code = SI_TKILL;
2359 	info.si_pid = current->tgid;
2360 	info.si_uid = current->uid;
2361 
2362 	read_lock(&tasklist_lock);
2363 	p = find_task_by_pid(pid);
2364 	if (p && (tgid <= 0 || p->tgid == tgid)) {
2365 		error = check_kill_permission(sig, &info, p);
2366 		/*
2367 		 * The null signal is a permissions and process existence
2368 		 * probe.  No signal is actually delivered.
2369 		 */
2370 		if (!error && sig && p->sighand) {
2371 			spin_lock_irq(&p->sighand->siglock);
2372 			handle_stop_signal(sig, p);
2373 			error = specific_send_sig_info(sig, &info, p);
2374 			spin_unlock_irq(&p->sighand->siglock);
2375 		}
2376 	}
2377 	read_unlock(&tasklist_lock);
2378 
2379 	return error;
2380 }
2381 
2382 /**
2383  *  sys_tgkill - send signal to one specific thread
2384  *  @tgid: the thread group ID of the thread
2385  *  @pid: the PID of the thread
2386  *  @sig: signal to be sent
2387  *
2388  *  This syscall also checks the tgid and returns -ESRCH even if the PID
2389  *  exists but it's not belonging to the target process anymore. This
2390  *  method solves the problem of threads exiting and PIDs getting reused.
2391  */
2392 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2393 {
2394 	/* This is only valid for single tasks */
2395 	if (pid <= 0 || tgid <= 0)
2396 		return -EINVAL;
2397 
2398 	return do_tkill(tgid, pid, sig);
2399 }
2400 
2401 /*
2402  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2403  */
2404 asmlinkage long
2405 sys_tkill(int pid, int sig)
2406 {
2407 	/* This is only valid for single tasks */
2408 	if (pid <= 0)
2409 		return -EINVAL;
2410 
2411 	return do_tkill(0, pid, sig);
2412 }
2413 
2414 asmlinkage long
2415 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2416 {
2417 	siginfo_t info;
2418 
2419 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2420 		return -EFAULT;
2421 
2422 	/* Not even root can pretend to send signals from the kernel.
2423 	   Nor can they impersonate a kill(), which adds source info.  */
2424 	if (info.si_code >= 0)
2425 		return -EPERM;
2426 	info.si_signo = sig;
2427 
2428 	/* POSIX.1b doesn't mention process groups.  */
2429 	return kill_proc_info(sig, &info, pid);
2430 }
2431 
2432 int
2433 do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2434 {
2435 	struct k_sigaction *k;
2436 	sigset_t mask;
2437 
2438 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2439 		return -EINVAL;
2440 
2441 	k = &current->sighand->action[sig-1];
2442 
2443 	spin_lock_irq(&current->sighand->siglock);
2444 	if (signal_pending(current)) {
2445 		/*
2446 		 * If there might be a fatal signal pending on multiple
2447 		 * threads, make sure we take it before changing the action.
2448 		 */
2449 		spin_unlock_irq(&current->sighand->siglock);
2450 		return -ERESTARTNOINTR;
2451 	}
2452 
2453 	if (oact)
2454 		*oact = *k;
2455 
2456 	if (act) {
2457 		sigdelsetmask(&act->sa.sa_mask,
2458 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2459 		/*
2460 		 * POSIX 3.3.1.3:
2461 		 *  "Setting a signal action to SIG_IGN for a signal that is
2462 		 *   pending shall cause the pending signal to be discarded,
2463 		 *   whether or not it is blocked."
2464 		 *
2465 		 *  "Setting a signal action to SIG_DFL for a signal that is
2466 		 *   pending and whose default action is to ignore the signal
2467 		 *   (for example, SIGCHLD), shall cause the pending signal to
2468 		 *   be discarded, whether or not it is blocked"
2469 		 */
2470 		if (act->sa.sa_handler == SIG_IGN ||
2471 		    (act->sa.sa_handler == SIG_DFL &&
2472 		     sig_kernel_ignore(sig))) {
2473 			/*
2474 			 * This is a fairly rare case, so we only take the
2475 			 * tasklist_lock once we're sure we'll need it.
2476 			 * Now we must do this little unlock and relock
2477 			 * dance to maintain the lock hierarchy.
2478 			 */
2479 			struct task_struct *t = current;
2480 			spin_unlock_irq(&t->sighand->siglock);
2481 			read_lock(&tasklist_lock);
2482 			spin_lock_irq(&t->sighand->siglock);
2483 			*k = *act;
2484 			sigemptyset(&mask);
2485 			sigaddset(&mask, sig);
2486 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2487 			do {
2488 				rm_from_queue_full(&mask, &t->pending);
2489 				recalc_sigpending_tsk(t);
2490 				t = next_thread(t);
2491 			} while (t != current);
2492 			spin_unlock_irq(&current->sighand->siglock);
2493 			read_unlock(&tasklist_lock);
2494 			return 0;
2495 		}
2496 
2497 		*k = *act;
2498 	}
2499 
2500 	spin_unlock_irq(&current->sighand->siglock);
2501 	return 0;
2502 }
2503 
2504 int
2505 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2506 {
2507 	stack_t oss;
2508 	int error;
2509 
2510 	if (uoss) {
2511 		oss.ss_sp = (void __user *) current->sas_ss_sp;
2512 		oss.ss_size = current->sas_ss_size;
2513 		oss.ss_flags = sas_ss_flags(sp);
2514 	}
2515 
2516 	if (uss) {
2517 		void __user *ss_sp;
2518 		size_t ss_size;
2519 		int ss_flags;
2520 
2521 		error = -EFAULT;
2522 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2523 		    || __get_user(ss_sp, &uss->ss_sp)
2524 		    || __get_user(ss_flags, &uss->ss_flags)
2525 		    || __get_user(ss_size, &uss->ss_size))
2526 			goto out;
2527 
2528 		error = -EPERM;
2529 		if (on_sig_stack(sp))
2530 			goto out;
2531 
2532 		error = -EINVAL;
2533 		/*
2534 		 *
2535 		 * Note - this code used to test ss_flags incorrectly
2536 		 *  	  old code may have been written using ss_flags==0
2537 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2538 		 *	  way that worked) - this fix preserves that older
2539 		 *	  mechanism
2540 		 */
2541 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2542 			goto out;
2543 
2544 		if (ss_flags == SS_DISABLE) {
2545 			ss_size = 0;
2546 			ss_sp = NULL;
2547 		} else {
2548 			error = -ENOMEM;
2549 			if (ss_size < MINSIGSTKSZ)
2550 				goto out;
2551 		}
2552 
2553 		current->sas_ss_sp = (unsigned long) ss_sp;
2554 		current->sas_ss_size = ss_size;
2555 	}
2556 
2557 	if (uoss) {
2558 		error = -EFAULT;
2559 		if (copy_to_user(uoss, &oss, sizeof(oss)))
2560 			goto out;
2561 	}
2562 
2563 	error = 0;
2564 out:
2565 	return error;
2566 }
2567 
2568 #ifdef __ARCH_WANT_SYS_SIGPENDING
2569 
2570 asmlinkage long
2571 sys_sigpending(old_sigset_t __user *set)
2572 {
2573 	return do_sigpending(set, sizeof(*set));
2574 }
2575 
2576 #endif
2577 
2578 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2579 /* Some platforms have their own version with special arguments others
2580    support only sys_rt_sigprocmask.  */
2581 
2582 asmlinkage long
2583 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2584 {
2585 	int error;
2586 	old_sigset_t old_set, new_set;
2587 
2588 	if (set) {
2589 		error = -EFAULT;
2590 		if (copy_from_user(&new_set, set, sizeof(*set)))
2591 			goto out;
2592 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2593 
2594 		spin_lock_irq(&current->sighand->siglock);
2595 		old_set = current->blocked.sig[0];
2596 
2597 		error = 0;
2598 		switch (how) {
2599 		default:
2600 			error = -EINVAL;
2601 			break;
2602 		case SIG_BLOCK:
2603 			sigaddsetmask(&current->blocked, new_set);
2604 			break;
2605 		case SIG_UNBLOCK:
2606 			sigdelsetmask(&current->blocked, new_set);
2607 			break;
2608 		case SIG_SETMASK:
2609 			current->blocked.sig[0] = new_set;
2610 			break;
2611 		}
2612 
2613 		recalc_sigpending();
2614 		spin_unlock_irq(&current->sighand->siglock);
2615 		if (error)
2616 			goto out;
2617 		if (oset)
2618 			goto set_old;
2619 	} else if (oset) {
2620 		old_set = current->blocked.sig[0];
2621 	set_old:
2622 		error = -EFAULT;
2623 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2624 			goto out;
2625 	}
2626 	error = 0;
2627 out:
2628 	return error;
2629 }
2630 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2631 
2632 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2633 asmlinkage long
2634 sys_rt_sigaction(int sig,
2635 		 const struct sigaction __user *act,
2636 		 struct sigaction __user *oact,
2637 		 size_t sigsetsize)
2638 {
2639 	struct k_sigaction new_sa, old_sa;
2640 	int ret = -EINVAL;
2641 
2642 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2643 	if (sigsetsize != sizeof(sigset_t))
2644 		goto out;
2645 
2646 	if (act) {
2647 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2648 			return -EFAULT;
2649 	}
2650 
2651 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2652 
2653 	if (!ret && oact) {
2654 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2655 			return -EFAULT;
2656 	}
2657 out:
2658 	return ret;
2659 }
2660 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2661 
2662 #ifdef __ARCH_WANT_SYS_SGETMASK
2663 
2664 /*
2665  * For backwards compatibility.  Functionality superseded by sigprocmask.
2666  */
2667 asmlinkage long
2668 sys_sgetmask(void)
2669 {
2670 	/* SMP safe */
2671 	return current->blocked.sig[0];
2672 }
2673 
2674 asmlinkage long
2675 sys_ssetmask(int newmask)
2676 {
2677 	int old;
2678 
2679 	spin_lock_irq(&current->sighand->siglock);
2680 	old = current->blocked.sig[0];
2681 
2682 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2683 						  sigmask(SIGSTOP)));
2684 	recalc_sigpending();
2685 	spin_unlock_irq(&current->sighand->siglock);
2686 
2687 	return old;
2688 }
2689 #endif /* __ARCH_WANT_SGETMASK */
2690 
2691 #ifdef __ARCH_WANT_SYS_SIGNAL
2692 /*
2693  * For backwards compatibility.  Functionality superseded by sigaction.
2694  */
2695 asmlinkage unsigned long
2696 sys_signal(int sig, __sighandler_t handler)
2697 {
2698 	struct k_sigaction new_sa, old_sa;
2699 	int ret;
2700 
2701 	new_sa.sa.sa_handler = handler;
2702 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2703 	sigemptyset(&new_sa.sa.sa_mask);
2704 
2705 	ret = do_sigaction(sig, &new_sa, &old_sa);
2706 
2707 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2708 }
2709 #endif /* __ARCH_WANT_SYS_SIGNAL */
2710 
2711 #ifdef __ARCH_WANT_SYS_PAUSE
2712 
2713 asmlinkage long
2714 sys_pause(void)
2715 {
2716 	current->state = TASK_INTERRUPTIBLE;
2717 	schedule();
2718 	return -ERESTARTNOHAND;
2719 }
2720 
2721 #endif
2722 
2723 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2724 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2725 {
2726 	sigset_t newset;
2727 
2728 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2729 	if (sigsetsize != sizeof(sigset_t))
2730 		return -EINVAL;
2731 
2732 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2733 		return -EFAULT;
2734 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2735 
2736 	spin_lock_irq(&current->sighand->siglock);
2737 	current->saved_sigmask = current->blocked;
2738 	current->blocked = newset;
2739 	recalc_sigpending();
2740 	spin_unlock_irq(&current->sighand->siglock);
2741 
2742 	current->state = TASK_INTERRUPTIBLE;
2743 	schedule();
2744 	set_thread_flag(TIF_RESTORE_SIGMASK);
2745 	return -ERESTARTNOHAND;
2746 }
2747 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2748 
2749 void __init signals_init(void)
2750 {
2751 	sigqueue_cachep =
2752 		kmem_cache_create("sigqueue",
2753 				  sizeof(struct sigqueue),
2754 				  __alignof__(struct sigqueue),
2755 				  SLAB_PANIC, NULL, NULL);
2756 }
2757