xref: /linux/kernel/signal.c (revision 2b8232ce512105e28453f301d1510de8363bccd1)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
29 
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h"	/* audit_signal_info() */
35 
36 /*
37  * SLAB caches for signal bits.
38  */
39 
40 static struct kmem_cache *sigqueue_cachep;
41 
42 
43 static int sig_ignored(struct task_struct *t, int sig)
44 {
45 	void __user * handler;
46 
47 	/*
48 	 * Tracers always want to know about signals..
49 	 */
50 	if (t->ptrace & PT_PTRACED)
51 		return 0;
52 
53 	/*
54 	 * Blocked signals are never ignored, since the
55 	 * signal handler may change by the time it is
56 	 * unblocked.
57 	 */
58 	if (sigismember(&t->blocked, sig))
59 		return 0;
60 
61 	/* Is it explicitly or implicitly ignored? */
62 	handler = t->sighand->action[sig-1].sa.sa_handler;
63 	return   handler == SIG_IGN ||
64 		(handler == SIG_DFL && sig_kernel_ignore(sig));
65 }
66 
67 /*
68  * Re-calculate pending state from the set of locally pending
69  * signals, globally pending signals, and blocked signals.
70  */
71 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
72 {
73 	unsigned long ready;
74 	long i;
75 
76 	switch (_NSIG_WORDS) {
77 	default:
78 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
79 			ready |= signal->sig[i] &~ blocked->sig[i];
80 		break;
81 
82 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
83 		ready |= signal->sig[2] &~ blocked->sig[2];
84 		ready |= signal->sig[1] &~ blocked->sig[1];
85 		ready |= signal->sig[0] &~ blocked->sig[0];
86 		break;
87 
88 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
89 		ready |= signal->sig[0] &~ blocked->sig[0];
90 		break;
91 
92 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
93 	}
94 	return ready !=	0;
95 }
96 
97 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
98 
99 static int recalc_sigpending_tsk(struct task_struct *t)
100 {
101 	if (t->signal->group_stop_count > 0 ||
102 	    (freezing(t)) ||
103 	    PENDING(&t->pending, &t->blocked) ||
104 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
105 		set_tsk_thread_flag(t, TIF_SIGPENDING);
106 		return 1;
107 	}
108 	/*
109 	 * We must never clear the flag in another thread, or in current
110 	 * when it's possible the current syscall is returning -ERESTART*.
111 	 * So we don't clear it here, and only callers who know they should do.
112 	 */
113 	return 0;
114 }
115 
116 /*
117  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
118  * This is superfluous when called on current, the wakeup is a harmless no-op.
119  */
120 void recalc_sigpending_and_wake(struct task_struct *t)
121 {
122 	if (recalc_sigpending_tsk(t))
123 		signal_wake_up(t, 0);
124 }
125 
126 void recalc_sigpending(void)
127 {
128 	if (!recalc_sigpending_tsk(current))
129 		clear_thread_flag(TIF_SIGPENDING);
130 
131 }
132 
133 /* Given the mask, find the first available signal that should be serviced. */
134 
135 int next_signal(struct sigpending *pending, sigset_t *mask)
136 {
137 	unsigned long i, *s, *m, x;
138 	int sig = 0;
139 
140 	s = pending->signal.sig;
141 	m = mask->sig;
142 	switch (_NSIG_WORDS) {
143 	default:
144 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
145 			if ((x = *s &~ *m) != 0) {
146 				sig = ffz(~x) + i*_NSIG_BPW + 1;
147 				break;
148 			}
149 		break;
150 
151 	case 2: if ((x = s[0] &~ m[0]) != 0)
152 			sig = 1;
153 		else if ((x = s[1] &~ m[1]) != 0)
154 			sig = _NSIG_BPW + 1;
155 		else
156 			break;
157 		sig += ffz(~x);
158 		break;
159 
160 	case 1: if ((x = *s &~ *m) != 0)
161 			sig = ffz(~x) + 1;
162 		break;
163 	}
164 
165 	return sig;
166 }
167 
168 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
169 					 int override_rlimit)
170 {
171 	struct sigqueue *q = NULL;
172 	struct user_struct *user;
173 
174 	/*
175 	 * In order to avoid problems with "switch_user()", we want to make
176 	 * sure that the compiler doesn't re-load "t->user"
177 	 */
178 	user = t->user;
179 	barrier();
180 	atomic_inc(&user->sigpending);
181 	if (override_rlimit ||
182 	    atomic_read(&user->sigpending) <=
183 			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
184 		q = kmem_cache_alloc(sigqueue_cachep, flags);
185 	if (unlikely(q == NULL)) {
186 		atomic_dec(&user->sigpending);
187 	} else {
188 		INIT_LIST_HEAD(&q->list);
189 		q->flags = 0;
190 		q->user = get_uid(user);
191 	}
192 	return(q);
193 }
194 
195 static void __sigqueue_free(struct sigqueue *q)
196 {
197 	if (q->flags & SIGQUEUE_PREALLOC)
198 		return;
199 	atomic_dec(&q->user->sigpending);
200 	free_uid(q->user);
201 	kmem_cache_free(sigqueue_cachep, q);
202 }
203 
204 void flush_sigqueue(struct sigpending *queue)
205 {
206 	struct sigqueue *q;
207 
208 	sigemptyset(&queue->signal);
209 	while (!list_empty(&queue->list)) {
210 		q = list_entry(queue->list.next, struct sigqueue , list);
211 		list_del_init(&q->list);
212 		__sigqueue_free(q);
213 	}
214 }
215 
216 /*
217  * Flush all pending signals for a task.
218  */
219 void flush_signals(struct task_struct *t)
220 {
221 	unsigned long flags;
222 
223 	spin_lock_irqsave(&t->sighand->siglock, flags);
224 	clear_tsk_thread_flag(t,TIF_SIGPENDING);
225 	flush_sigqueue(&t->pending);
226 	flush_sigqueue(&t->signal->shared_pending);
227 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
228 }
229 
230 void ignore_signals(struct task_struct *t)
231 {
232 	int i;
233 
234 	for (i = 0; i < _NSIG; ++i)
235 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
236 
237 	flush_signals(t);
238 }
239 
240 /*
241  * Flush all handlers for a task.
242  */
243 
244 void
245 flush_signal_handlers(struct task_struct *t, int force_default)
246 {
247 	int i;
248 	struct k_sigaction *ka = &t->sighand->action[0];
249 	for (i = _NSIG ; i != 0 ; i--) {
250 		if (force_default || ka->sa.sa_handler != SIG_IGN)
251 			ka->sa.sa_handler = SIG_DFL;
252 		ka->sa.sa_flags = 0;
253 		sigemptyset(&ka->sa.sa_mask);
254 		ka++;
255 	}
256 }
257 
258 int unhandled_signal(struct task_struct *tsk, int sig)
259 {
260 	if (is_init(tsk))
261 		return 1;
262 	if (tsk->ptrace & PT_PTRACED)
263 		return 0;
264 	return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
265 		(tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
266 }
267 
268 
269 /* Notify the system that a driver wants to block all signals for this
270  * process, and wants to be notified if any signals at all were to be
271  * sent/acted upon.  If the notifier routine returns non-zero, then the
272  * signal will be acted upon after all.  If the notifier routine returns 0,
273  * then then signal will be blocked.  Only one block per process is
274  * allowed.  priv is a pointer to private data that the notifier routine
275  * can use to determine if the signal should be blocked or not.  */
276 
277 void
278 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
279 {
280 	unsigned long flags;
281 
282 	spin_lock_irqsave(&current->sighand->siglock, flags);
283 	current->notifier_mask = mask;
284 	current->notifier_data = priv;
285 	current->notifier = notifier;
286 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
287 }
288 
289 /* Notify the system that blocking has ended. */
290 
291 void
292 unblock_all_signals(void)
293 {
294 	unsigned long flags;
295 
296 	spin_lock_irqsave(&current->sighand->siglock, flags);
297 	current->notifier = NULL;
298 	current->notifier_data = NULL;
299 	recalc_sigpending();
300 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
301 }
302 
303 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
304 {
305 	struct sigqueue *q, *first = NULL;
306 	int still_pending = 0;
307 
308 	if (unlikely(!sigismember(&list->signal, sig)))
309 		return 0;
310 
311 	/*
312 	 * Collect the siginfo appropriate to this signal.  Check if
313 	 * there is another siginfo for the same signal.
314 	*/
315 	list_for_each_entry(q, &list->list, list) {
316 		if (q->info.si_signo == sig) {
317 			if (first) {
318 				still_pending = 1;
319 				break;
320 			}
321 			first = q;
322 		}
323 	}
324 	if (first) {
325 		list_del_init(&first->list);
326 		copy_siginfo(info, &first->info);
327 		__sigqueue_free(first);
328 		if (!still_pending)
329 			sigdelset(&list->signal, sig);
330 	} else {
331 
332 		/* Ok, it wasn't in the queue.  This must be
333 		   a fast-pathed signal or we must have been
334 		   out of queue space.  So zero out the info.
335 		 */
336 		sigdelset(&list->signal, sig);
337 		info->si_signo = sig;
338 		info->si_errno = 0;
339 		info->si_code = 0;
340 		info->si_pid = 0;
341 		info->si_uid = 0;
342 	}
343 	return 1;
344 }
345 
346 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
347 			siginfo_t *info)
348 {
349 	int sig = next_signal(pending, mask);
350 
351 	if (sig) {
352 		if (current->notifier) {
353 			if (sigismember(current->notifier_mask, sig)) {
354 				if (!(current->notifier)(current->notifier_data)) {
355 					clear_thread_flag(TIF_SIGPENDING);
356 					return 0;
357 				}
358 			}
359 		}
360 
361 		if (!collect_signal(sig, pending, info))
362 			sig = 0;
363 	}
364 
365 	return sig;
366 }
367 
368 /*
369  * Dequeue a signal and return the element to the caller, which is
370  * expected to free it.
371  *
372  * All callers have to hold the siglock.
373  */
374 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
375 {
376 	int signr = 0;
377 
378 	/* We only dequeue private signals from ourselves, we don't let
379 	 * signalfd steal them
380 	 */
381 	signr = __dequeue_signal(&tsk->pending, mask, info);
382 	if (!signr) {
383 		signr = __dequeue_signal(&tsk->signal->shared_pending,
384 					 mask, info);
385 		/*
386 		 * itimer signal ?
387 		 *
388 		 * itimers are process shared and we restart periodic
389 		 * itimers in the signal delivery path to prevent DoS
390 		 * attacks in the high resolution timer case. This is
391 		 * compliant with the old way of self restarting
392 		 * itimers, as the SIGALRM is a legacy signal and only
393 		 * queued once. Changing the restart behaviour to
394 		 * restart the timer in the signal dequeue path is
395 		 * reducing the timer noise on heavy loaded !highres
396 		 * systems too.
397 		 */
398 		if (unlikely(signr == SIGALRM)) {
399 			struct hrtimer *tmr = &tsk->signal->real_timer;
400 
401 			if (!hrtimer_is_queued(tmr) &&
402 			    tsk->signal->it_real_incr.tv64 != 0) {
403 				hrtimer_forward(tmr, tmr->base->get_time(),
404 						tsk->signal->it_real_incr);
405 				hrtimer_restart(tmr);
406 			}
407 		}
408 	}
409 	recalc_sigpending();
410 	if (signr && unlikely(sig_kernel_stop(signr))) {
411 		/*
412 		 * Set a marker that we have dequeued a stop signal.  Our
413 		 * caller might release the siglock and then the pending
414 		 * stop signal it is about to process is no longer in the
415 		 * pending bitmasks, but must still be cleared by a SIGCONT
416 		 * (and overruled by a SIGKILL).  So those cases clear this
417 		 * shared flag after we've set it.  Note that this flag may
418 		 * remain set after the signal we return is ignored or
419 		 * handled.  That doesn't matter because its only purpose
420 		 * is to alert stop-signal processing code when another
421 		 * processor has come along and cleared the flag.
422 		 */
423 		if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
424 			tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
425 	}
426 	if (signr &&
427 	     ((info->si_code & __SI_MASK) == __SI_TIMER) &&
428 	     info->si_sys_private){
429 		/*
430 		 * Release the siglock to ensure proper locking order
431 		 * of timer locks outside of siglocks.  Note, we leave
432 		 * irqs disabled here, since the posix-timers code is
433 		 * about to disable them again anyway.
434 		 */
435 		spin_unlock(&tsk->sighand->siglock);
436 		do_schedule_next_timer(info);
437 		spin_lock(&tsk->sighand->siglock);
438 	}
439 	return signr;
440 }
441 
442 /*
443  * Tell a process that it has a new active signal..
444  *
445  * NOTE! we rely on the previous spin_lock to
446  * lock interrupts for us! We can only be called with
447  * "siglock" held, and the local interrupt must
448  * have been disabled when that got acquired!
449  *
450  * No need to set need_resched since signal event passing
451  * goes through ->blocked
452  */
453 void signal_wake_up(struct task_struct *t, int resume)
454 {
455 	unsigned int mask;
456 
457 	set_tsk_thread_flag(t, TIF_SIGPENDING);
458 
459 	/*
460 	 * For SIGKILL, we want to wake it up in the stopped/traced case.
461 	 * We don't check t->state here because there is a race with it
462 	 * executing another processor and just now entering stopped state.
463 	 * By using wake_up_state, we ensure the process will wake up and
464 	 * handle its death signal.
465 	 */
466 	mask = TASK_INTERRUPTIBLE;
467 	if (resume)
468 		mask |= TASK_STOPPED | TASK_TRACED;
469 	if (!wake_up_state(t, mask))
470 		kick_process(t);
471 }
472 
473 /*
474  * Remove signals in mask from the pending set and queue.
475  * Returns 1 if any signals were found.
476  *
477  * All callers must be holding the siglock.
478  *
479  * This version takes a sigset mask and looks at all signals,
480  * not just those in the first mask word.
481  */
482 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
483 {
484 	struct sigqueue *q, *n;
485 	sigset_t m;
486 
487 	sigandsets(&m, mask, &s->signal);
488 	if (sigisemptyset(&m))
489 		return 0;
490 
491 	signandsets(&s->signal, &s->signal, mask);
492 	list_for_each_entry_safe(q, n, &s->list, list) {
493 		if (sigismember(mask, q->info.si_signo)) {
494 			list_del_init(&q->list);
495 			__sigqueue_free(q);
496 		}
497 	}
498 	return 1;
499 }
500 /*
501  * Remove signals in mask from the pending set and queue.
502  * Returns 1 if any signals were found.
503  *
504  * All callers must be holding the siglock.
505  */
506 static int rm_from_queue(unsigned long mask, struct sigpending *s)
507 {
508 	struct sigqueue *q, *n;
509 
510 	if (!sigtestsetmask(&s->signal, mask))
511 		return 0;
512 
513 	sigdelsetmask(&s->signal, mask);
514 	list_for_each_entry_safe(q, n, &s->list, list) {
515 		if (q->info.si_signo < SIGRTMIN &&
516 		    (mask & sigmask(q->info.si_signo))) {
517 			list_del_init(&q->list);
518 			__sigqueue_free(q);
519 		}
520 	}
521 	return 1;
522 }
523 
524 /*
525  * Bad permissions for sending the signal
526  */
527 static int check_kill_permission(int sig, struct siginfo *info,
528 				 struct task_struct *t)
529 {
530 	int error = -EINVAL;
531 	if (!valid_signal(sig))
532 		return error;
533 
534 	if (info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) {
535 		error = audit_signal_info(sig, t); /* Let audit system see the signal */
536 		if (error)
537 			return error;
538 		error = -EPERM;
539 		if (((sig != SIGCONT) ||
540 			(process_session(current) != process_session(t)))
541 		    && (current->euid ^ t->suid) && (current->euid ^ t->uid)
542 		    && (current->uid ^ t->suid) && (current->uid ^ t->uid)
543 		    && !capable(CAP_KILL))
544 		return error;
545 	}
546 
547 	return security_task_kill(t, info, sig, 0);
548 }
549 
550 /* forward decl */
551 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
552 
553 /*
554  * Handle magic process-wide effects of stop/continue signals.
555  * Unlike the signal actions, these happen immediately at signal-generation
556  * time regardless of blocking, ignoring, or handling.  This does the
557  * actual continuing for SIGCONT, but not the actual stopping for stop
558  * signals.  The process stop is done as a signal action for SIG_DFL.
559  */
560 static void handle_stop_signal(int sig, struct task_struct *p)
561 {
562 	struct task_struct *t;
563 
564 	if (p->signal->flags & SIGNAL_GROUP_EXIT)
565 		/*
566 		 * The process is in the middle of dying already.
567 		 */
568 		return;
569 
570 	if (sig_kernel_stop(sig)) {
571 		/*
572 		 * This is a stop signal.  Remove SIGCONT from all queues.
573 		 */
574 		rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
575 		t = p;
576 		do {
577 			rm_from_queue(sigmask(SIGCONT), &t->pending);
578 			t = next_thread(t);
579 		} while (t != p);
580 	} else if (sig == SIGCONT) {
581 		/*
582 		 * Remove all stop signals from all queues,
583 		 * and wake all threads.
584 		 */
585 		if (unlikely(p->signal->group_stop_count > 0)) {
586 			/*
587 			 * There was a group stop in progress.  We'll
588 			 * pretend it finished before we got here.  We are
589 			 * obliged to report it to the parent: if the
590 			 * SIGSTOP happened "after" this SIGCONT, then it
591 			 * would have cleared this pending SIGCONT.  If it
592 			 * happened "before" this SIGCONT, then the parent
593 			 * got the SIGCHLD about the stop finishing before
594 			 * the continue happened.  We do the notification
595 			 * now, and it's as if the stop had finished and
596 			 * the SIGCHLD was pending on entry to this kill.
597 			 */
598 			p->signal->group_stop_count = 0;
599 			p->signal->flags = SIGNAL_STOP_CONTINUED;
600 			spin_unlock(&p->sighand->siglock);
601 			do_notify_parent_cldstop(p, CLD_STOPPED);
602 			spin_lock(&p->sighand->siglock);
603 		}
604 		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
605 		t = p;
606 		do {
607 			unsigned int state;
608 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
609 
610 			/*
611 			 * If there is a handler for SIGCONT, we must make
612 			 * sure that no thread returns to user mode before
613 			 * we post the signal, in case it was the only
614 			 * thread eligible to run the signal handler--then
615 			 * it must not do anything between resuming and
616 			 * running the handler.  With the TIF_SIGPENDING
617 			 * flag set, the thread will pause and acquire the
618 			 * siglock that we hold now and until we've queued
619 			 * the pending signal.
620 			 *
621 			 * Wake up the stopped thread _after_ setting
622 			 * TIF_SIGPENDING
623 			 */
624 			state = TASK_STOPPED;
625 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
626 				set_tsk_thread_flag(t, TIF_SIGPENDING);
627 				state |= TASK_INTERRUPTIBLE;
628 			}
629 			wake_up_state(t, state);
630 
631 			t = next_thread(t);
632 		} while (t != p);
633 
634 		if (p->signal->flags & SIGNAL_STOP_STOPPED) {
635 			/*
636 			 * We were in fact stopped, and are now continued.
637 			 * Notify the parent with CLD_CONTINUED.
638 			 */
639 			p->signal->flags = SIGNAL_STOP_CONTINUED;
640 			p->signal->group_exit_code = 0;
641 			spin_unlock(&p->sighand->siglock);
642 			do_notify_parent_cldstop(p, CLD_CONTINUED);
643 			spin_lock(&p->sighand->siglock);
644 		} else {
645 			/*
646 			 * We are not stopped, but there could be a stop
647 			 * signal in the middle of being processed after
648 			 * being removed from the queue.  Clear that too.
649 			 */
650 			p->signal->flags = 0;
651 		}
652 	} else if (sig == SIGKILL) {
653 		/*
654 		 * Make sure that any pending stop signal already dequeued
655 		 * is undone by the wakeup for SIGKILL.
656 		 */
657 		p->signal->flags = 0;
658 	}
659 }
660 
661 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
662 			struct sigpending *signals)
663 {
664 	struct sigqueue * q = NULL;
665 	int ret = 0;
666 
667 	/*
668 	 * Deliver the signal to listening signalfds. This must be called
669 	 * with the sighand lock held.
670 	 */
671 	signalfd_notify(t, sig);
672 
673 	/*
674 	 * fast-pathed signals for kernel-internal things like SIGSTOP
675 	 * or SIGKILL.
676 	 */
677 	if (info == SEND_SIG_FORCED)
678 		goto out_set;
679 
680 	/* Real-time signals must be queued if sent by sigqueue, or
681 	   some other real-time mechanism.  It is implementation
682 	   defined whether kill() does so.  We attempt to do so, on
683 	   the principle of least surprise, but since kill is not
684 	   allowed to fail with EAGAIN when low on memory we just
685 	   make sure at least one signal gets delivered and don't
686 	   pass on the info struct.  */
687 
688 	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
689 					     (is_si_special(info) ||
690 					      info->si_code >= 0)));
691 	if (q) {
692 		list_add_tail(&q->list, &signals->list);
693 		switch ((unsigned long) info) {
694 		case (unsigned long) SEND_SIG_NOINFO:
695 			q->info.si_signo = sig;
696 			q->info.si_errno = 0;
697 			q->info.si_code = SI_USER;
698 			q->info.si_pid = current->pid;
699 			q->info.si_uid = current->uid;
700 			break;
701 		case (unsigned long) SEND_SIG_PRIV:
702 			q->info.si_signo = sig;
703 			q->info.si_errno = 0;
704 			q->info.si_code = SI_KERNEL;
705 			q->info.si_pid = 0;
706 			q->info.si_uid = 0;
707 			break;
708 		default:
709 			copy_siginfo(&q->info, info);
710 			break;
711 		}
712 	} else if (!is_si_special(info)) {
713 		if (sig >= SIGRTMIN && info->si_code != SI_USER)
714 		/*
715 		 * Queue overflow, abort.  We may abort if the signal was rt
716 		 * and sent by user using something other than kill().
717 		 */
718 			return -EAGAIN;
719 	}
720 
721 out_set:
722 	sigaddset(&signals->signal, sig);
723 	return ret;
724 }
725 
726 #define LEGACY_QUEUE(sigptr, sig) \
727 	(((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
728 
729 int print_fatal_signals;
730 
731 static void print_fatal_signal(struct pt_regs *regs, int signr)
732 {
733 	printk("%s/%d: potentially unexpected fatal signal %d.\n",
734 		current->comm, current->pid, signr);
735 
736 #ifdef __i386__
737 	printk("code at %08lx: ", regs->eip);
738 	{
739 		int i;
740 		for (i = 0; i < 16; i++) {
741 			unsigned char insn;
742 
743 			__get_user(insn, (unsigned char *)(regs->eip + i));
744 			printk("%02x ", insn);
745 		}
746 	}
747 #endif
748 	printk("\n");
749 	show_regs(regs);
750 }
751 
752 static int __init setup_print_fatal_signals(char *str)
753 {
754 	get_option (&str, &print_fatal_signals);
755 
756 	return 1;
757 }
758 
759 __setup("print-fatal-signals=", setup_print_fatal_signals);
760 
761 static int
762 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
763 {
764 	int ret = 0;
765 
766 	BUG_ON(!irqs_disabled());
767 	assert_spin_locked(&t->sighand->siglock);
768 
769 	/* Short-circuit ignored signals.  */
770 	if (sig_ignored(t, sig))
771 		goto out;
772 
773 	/* Support queueing exactly one non-rt signal, so that we
774 	   can get more detailed information about the cause of
775 	   the signal. */
776 	if (LEGACY_QUEUE(&t->pending, sig))
777 		goto out;
778 
779 	ret = send_signal(sig, info, t, &t->pending);
780 	if (!ret && !sigismember(&t->blocked, sig))
781 		signal_wake_up(t, sig == SIGKILL);
782 out:
783 	return ret;
784 }
785 
786 /*
787  * Force a signal that the process can't ignore: if necessary
788  * we unblock the signal and change any SIG_IGN to SIG_DFL.
789  *
790  * Note: If we unblock the signal, we always reset it to SIG_DFL,
791  * since we do not want to have a signal handler that was blocked
792  * be invoked when user space had explicitly blocked it.
793  *
794  * We don't want to have recursive SIGSEGV's etc, for example.
795  */
796 int
797 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
798 {
799 	unsigned long int flags;
800 	int ret, blocked, ignored;
801 	struct k_sigaction *action;
802 
803 	spin_lock_irqsave(&t->sighand->siglock, flags);
804 	action = &t->sighand->action[sig-1];
805 	ignored = action->sa.sa_handler == SIG_IGN;
806 	blocked = sigismember(&t->blocked, sig);
807 	if (blocked || ignored) {
808 		action->sa.sa_handler = SIG_DFL;
809 		if (blocked) {
810 			sigdelset(&t->blocked, sig);
811 			recalc_sigpending_and_wake(t);
812 		}
813 	}
814 	ret = specific_send_sig_info(sig, info, t);
815 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
816 
817 	return ret;
818 }
819 
820 void
821 force_sig_specific(int sig, struct task_struct *t)
822 {
823 	force_sig_info(sig, SEND_SIG_FORCED, t);
824 }
825 
826 /*
827  * Test if P wants to take SIG.  After we've checked all threads with this,
828  * it's equivalent to finding no threads not blocking SIG.  Any threads not
829  * blocking SIG were ruled out because they are not running and already
830  * have pending signals.  Such threads will dequeue from the shared queue
831  * as soon as they're available, so putting the signal on the shared queue
832  * will be equivalent to sending it to one such thread.
833  */
834 static inline int wants_signal(int sig, struct task_struct *p)
835 {
836 	if (sigismember(&p->blocked, sig))
837 		return 0;
838 	if (p->flags & PF_EXITING)
839 		return 0;
840 	if (sig == SIGKILL)
841 		return 1;
842 	if (p->state & (TASK_STOPPED | TASK_TRACED))
843 		return 0;
844 	return task_curr(p) || !signal_pending(p);
845 }
846 
847 static void
848 __group_complete_signal(int sig, struct task_struct *p)
849 {
850 	struct task_struct *t;
851 
852 	/*
853 	 * Now find a thread we can wake up to take the signal off the queue.
854 	 *
855 	 * If the main thread wants the signal, it gets first crack.
856 	 * Probably the least surprising to the average bear.
857 	 */
858 	if (wants_signal(sig, p))
859 		t = p;
860 	else if (thread_group_empty(p))
861 		/*
862 		 * There is just one thread and it does not need to be woken.
863 		 * It will dequeue unblocked signals before it runs again.
864 		 */
865 		return;
866 	else {
867 		/*
868 		 * Otherwise try to find a suitable thread.
869 		 */
870 		t = p->signal->curr_target;
871 		if (t == NULL)
872 			/* restart balancing at this thread */
873 			t = p->signal->curr_target = p;
874 
875 		while (!wants_signal(sig, t)) {
876 			t = next_thread(t);
877 			if (t == p->signal->curr_target)
878 				/*
879 				 * No thread needs to be woken.
880 				 * Any eligible threads will see
881 				 * the signal in the queue soon.
882 				 */
883 				return;
884 		}
885 		p->signal->curr_target = t;
886 	}
887 
888 	/*
889 	 * Found a killable thread.  If the signal will be fatal,
890 	 * then start taking the whole group down immediately.
891 	 */
892 	if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
893 	    !sigismember(&t->real_blocked, sig) &&
894 	    (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
895 		/*
896 		 * This signal will be fatal to the whole group.
897 		 */
898 		if (!sig_kernel_coredump(sig)) {
899 			/*
900 			 * Start a group exit and wake everybody up.
901 			 * This way we don't have other threads
902 			 * running and doing things after a slower
903 			 * thread has the fatal signal pending.
904 			 */
905 			p->signal->flags = SIGNAL_GROUP_EXIT;
906 			p->signal->group_exit_code = sig;
907 			p->signal->group_stop_count = 0;
908 			t = p;
909 			do {
910 				sigaddset(&t->pending.signal, SIGKILL);
911 				signal_wake_up(t, 1);
912 				t = next_thread(t);
913 			} while (t != p);
914 			return;
915 		}
916 
917 		/*
918 		 * There will be a core dump.  We make all threads other
919 		 * than the chosen one go into a group stop so that nothing
920 		 * happens until it gets scheduled, takes the signal off
921 		 * the shared queue, and does the core dump.  This is a
922 		 * little more complicated than strictly necessary, but it
923 		 * keeps the signal state that winds up in the core dump
924 		 * unchanged from the death state, e.g. which thread had
925 		 * the core-dump signal unblocked.
926 		 */
927 		rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
928 		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
929 		p->signal->group_stop_count = 0;
930 		p->signal->group_exit_task = t;
931 		t = p;
932 		do {
933 			p->signal->group_stop_count++;
934 			signal_wake_up(t, 0);
935 			t = next_thread(t);
936 		} while (t != p);
937 		wake_up_process(p->signal->group_exit_task);
938 		return;
939 	}
940 
941 	/*
942 	 * The signal is already in the shared-pending queue.
943 	 * Tell the chosen thread to wake up and dequeue it.
944 	 */
945 	signal_wake_up(t, sig == SIGKILL);
946 	return;
947 }
948 
949 int
950 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
951 {
952 	int ret = 0;
953 
954 	assert_spin_locked(&p->sighand->siglock);
955 	handle_stop_signal(sig, p);
956 
957 	/* Short-circuit ignored signals.  */
958 	if (sig_ignored(p, sig))
959 		return ret;
960 
961 	if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
962 		/* This is a non-RT signal and we already have one queued.  */
963 		return ret;
964 
965 	/*
966 	 * Put this signal on the shared-pending queue, or fail with EAGAIN.
967 	 * We always use the shared queue for process-wide signals,
968 	 * to avoid several races.
969 	 */
970 	ret = send_signal(sig, info, p, &p->signal->shared_pending);
971 	if (unlikely(ret))
972 		return ret;
973 
974 	__group_complete_signal(sig, p);
975 	return 0;
976 }
977 
978 /*
979  * Nuke all other threads in the group.
980  */
981 void zap_other_threads(struct task_struct *p)
982 {
983 	struct task_struct *t;
984 
985 	p->signal->flags = SIGNAL_GROUP_EXIT;
986 	p->signal->group_stop_count = 0;
987 
988 	if (thread_group_empty(p))
989 		return;
990 
991 	for (t = next_thread(p); t != p; t = next_thread(t)) {
992 		/*
993 		 * Don't bother with already dead threads
994 		 */
995 		if (t->exit_state)
996 			continue;
997 
998 		/* SIGKILL will be handled before any pending SIGSTOP */
999 		sigaddset(&t->pending.signal, SIGKILL);
1000 		signal_wake_up(t, 1);
1001 	}
1002 }
1003 
1004 /*
1005  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1006  */
1007 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1008 {
1009 	struct sighand_struct *sighand;
1010 
1011 	for (;;) {
1012 		sighand = rcu_dereference(tsk->sighand);
1013 		if (unlikely(sighand == NULL))
1014 			break;
1015 
1016 		spin_lock_irqsave(&sighand->siglock, *flags);
1017 		if (likely(sighand == tsk->sighand))
1018 			break;
1019 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1020 	}
1021 
1022 	return sighand;
1023 }
1024 
1025 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1026 {
1027 	unsigned long flags;
1028 	int ret;
1029 
1030 	ret = check_kill_permission(sig, info, p);
1031 
1032 	if (!ret && sig) {
1033 		ret = -ESRCH;
1034 		if (lock_task_sighand(p, &flags)) {
1035 			ret = __group_send_sig_info(sig, info, p);
1036 			unlock_task_sighand(p, &flags);
1037 		}
1038 	}
1039 
1040 	return ret;
1041 }
1042 
1043 /*
1044  * kill_pgrp_info() sends a signal to a process group: this is what the tty
1045  * control characters do (^C, ^Z etc)
1046  */
1047 
1048 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1049 {
1050 	struct task_struct *p = NULL;
1051 	int retval, success;
1052 
1053 	success = 0;
1054 	retval = -ESRCH;
1055 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1056 		int err = group_send_sig_info(sig, info, p);
1057 		success |= !err;
1058 		retval = err;
1059 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1060 	return success ? 0 : retval;
1061 }
1062 
1063 int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1064 {
1065 	int retval;
1066 
1067 	read_lock(&tasklist_lock);
1068 	retval = __kill_pgrp_info(sig, info, pgrp);
1069 	read_unlock(&tasklist_lock);
1070 
1071 	return retval;
1072 }
1073 
1074 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1075 {
1076 	int error;
1077 	struct task_struct *p;
1078 
1079 	rcu_read_lock();
1080 	if (unlikely(sig_needs_tasklist(sig)))
1081 		read_lock(&tasklist_lock);
1082 
1083 	p = pid_task(pid, PIDTYPE_PID);
1084 	error = -ESRCH;
1085 	if (p)
1086 		error = group_send_sig_info(sig, info, p);
1087 
1088 	if (unlikely(sig_needs_tasklist(sig)))
1089 		read_unlock(&tasklist_lock);
1090 	rcu_read_unlock();
1091 	return error;
1092 }
1093 
1094 int
1095 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1096 {
1097 	int error;
1098 	rcu_read_lock();
1099 	error = kill_pid_info(sig, info, find_pid(pid));
1100 	rcu_read_unlock();
1101 	return error;
1102 }
1103 
1104 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1105 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1106 		      uid_t uid, uid_t euid, u32 secid)
1107 {
1108 	int ret = -EINVAL;
1109 	struct task_struct *p;
1110 
1111 	if (!valid_signal(sig))
1112 		return ret;
1113 
1114 	read_lock(&tasklist_lock);
1115 	p = pid_task(pid, PIDTYPE_PID);
1116 	if (!p) {
1117 		ret = -ESRCH;
1118 		goto out_unlock;
1119 	}
1120 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1121 	    && (euid != p->suid) && (euid != p->uid)
1122 	    && (uid != p->suid) && (uid != p->uid)) {
1123 		ret = -EPERM;
1124 		goto out_unlock;
1125 	}
1126 	ret = security_task_kill(p, info, sig, secid);
1127 	if (ret)
1128 		goto out_unlock;
1129 	if (sig && p->sighand) {
1130 		unsigned long flags;
1131 		spin_lock_irqsave(&p->sighand->siglock, flags);
1132 		ret = __group_send_sig_info(sig, info, p);
1133 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1134 	}
1135 out_unlock:
1136 	read_unlock(&tasklist_lock);
1137 	return ret;
1138 }
1139 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1140 
1141 /*
1142  * kill_something_info() interprets pid in interesting ways just like kill(2).
1143  *
1144  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1145  * is probably wrong.  Should make it like BSD or SYSV.
1146  */
1147 
1148 static int kill_something_info(int sig, struct siginfo *info, int pid)
1149 {
1150 	int ret;
1151 	rcu_read_lock();
1152 	if (!pid) {
1153 		ret = kill_pgrp_info(sig, info, task_pgrp(current));
1154 	} else if (pid == -1) {
1155 		int retval = 0, count = 0;
1156 		struct task_struct * p;
1157 
1158 		read_lock(&tasklist_lock);
1159 		for_each_process(p) {
1160 			if (p->pid > 1 && p->tgid != current->tgid) {
1161 				int err = group_send_sig_info(sig, info, p);
1162 				++count;
1163 				if (err != -EPERM)
1164 					retval = err;
1165 			}
1166 		}
1167 		read_unlock(&tasklist_lock);
1168 		ret = count ? retval : -ESRCH;
1169 	} else if (pid < 0) {
1170 		ret = kill_pgrp_info(sig, info, find_pid(-pid));
1171 	} else {
1172 		ret = kill_pid_info(sig, info, find_pid(pid));
1173 	}
1174 	rcu_read_unlock();
1175 	return ret;
1176 }
1177 
1178 /*
1179  * These are for backward compatibility with the rest of the kernel source.
1180  */
1181 
1182 /*
1183  * These two are the most common entry points.  They send a signal
1184  * just to the specific thread.
1185  */
1186 int
1187 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1188 {
1189 	int ret;
1190 	unsigned long flags;
1191 
1192 	/*
1193 	 * Make sure legacy kernel users don't send in bad values
1194 	 * (normal paths check this in check_kill_permission).
1195 	 */
1196 	if (!valid_signal(sig))
1197 		return -EINVAL;
1198 
1199 	/*
1200 	 * We need the tasklist lock even for the specific
1201 	 * thread case (when we don't need to follow the group
1202 	 * lists) in order to avoid races with "p->sighand"
1203 	 * going away or changing from under us.
1204 	 */
1205 	read_lock(&tasklist_lock);
1206 	spin_lock_irqsave(&p->sighand->siglock, flags);
1207 	ret = specific_send_sig_info(sig, info, p);
1208 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1209 	read_unlock(&tasklist_lock);
1210 	return ret;
1211 }
1212 
1213 #define __si_special(priv) \
1214 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1215 
1216 int
1217 send_sig(int sig, struct task_struct *p, int priv)
1218 {
1219 	return send_sig_info(sig, __si_special(priv), p);
1220 }
1221 
1222 /*
1223  * This is the entry point for "process-wide" signals.
1224  * They will go to an appropriate thread in the thread group.
1225  */
1226 int
1227 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1228 {
1229 	int ret;
1230 	read_lock(&tasklist_lock);
1231 	ret = group_send_sig_info(sig, info, p);
1232 	read_unlock(&tasklist_lock);
1233 	return ret;
1234 }
1235 
1236 void
1237 force_sig(int sig, struct task_struct *p)
1238 {
1239 	force_sig_info(sig, SEND_SIG_PRIV, p);
1240 }
1241 
1242 /*
1243  * When things go south during signal handling, we
1244  * will force a SIGSEGV. And if the signal that caused
1245  * the problem was already a SIGSEGV, we'll want to
1246  * make sure we don't even try to deliver the signal..
1247  */
1248 int
1249 force_sigsegv(int sig, struct task_struct *p)
1250 {
1251 	if (sig == SIGSEGV) {
1252 		unsigned long flags;
1253 		spin_lock_irqsave(&p->sighand->siglock, flags);
1254 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1255 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1256 	}
1257 	force_sig(SIGSEGV, p);
1258 	return 0;
1259 }
1260 
1261 int kill_pgrp(struct pid *pid, int sig, int priv)
1262 {
1263 	return kill_pgrp_info(sig, __si_special(priv), pid);
1264 }
1265 EXPORT_SYMBOL(kill_pgrp);
1266 
1267 int kill_pid(struct pid *pid, int sig, int priv)
1268 {
1269 	return kill_pid_info(sig, __si_special(priv), pid);
1270 }
1271 EXPORT_SYMBOL(kill_pid);
1272 
1273 int
1274 kill_proc(pid_t pid, int sig, int priv)
1275 {
1276 	return kill_proc_info(sig, __si_special(priv), pid);
1277 }
1278 
1279 /*
1280  * These functions support sending signals using preallocated sigqueue
1281  * structures.  This is needed "because realtime applications cannot
1282  * afford to lose notifications of asynchronous events, like timer
1283  * expirations or I/O completions".  In the case of Posix Timers
1284  * we allocate the sigqueue structure from the timer_create.  If this
1285  * allocation fails we are able to report the failure to the application
1286  * with an EAGAIN error.
1287  */
1288 
1289 struct sigqueue *sigqueue_alloc(void)
1290 {
1291 	struct sigqueue *q;
1292 
1293 	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1294 		q->flags |= SIGQUEUE_PREALLOC;
1295 	return(q);
1296 }
1297 
1298 void sigqueue_free(struct sigqueue *q)
1299 {
1300 	unsigned long flags;
1301 	spinlock_t *lock = &current->sighand->siglock;
1302 
1303 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1304 	/*
1305 	 * If the signal is still pending remove it from the
1306 	 * pending queue. We must hold ->siglock while testing
1307 	 * q->list to serialize with collect_signal().
1308 	 */
1309 	spin_lock_irqsave(lock, flags);
1310 	if (!list_empty(&q->list))
1311 		list_del_init(&q->list);
1312 	spin_unlock_irqrestore(lock, flags);
1313 
1314 	q->flags &= ~SIGQUEUE_PREALLOC;
1315 	__sigqueue_free(q);
1316 }
1317 
1318 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1319 {
1320 	unsigned long flags;
1321 	int ret = 0;
1322 
1323 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1324 
1325 	/*
1326 	 * The rcu based delayed sighand destroy makes it possible to
1327 	 * run this without tasklist lock held. The task struct itself
1328 	 * cannot go away as create_timer did get_task_struct().
1329 	 *
1330 	 * We return -1, when the task is marked exiting, so
1331 	 * posix_timer_event can redirect it to the group leader
1332 	 */
1333 	rcu_read_lock();
1334 
1335 	if (!likely(lock_task_sighand(p, &flags))) {
1336 		ret = -1;
1337 		goto out_err;
1338 	}
1339 
1340 	if (unlikely(!list_empty(&q->list))) {
1341 		/*
1342 		 * If an SI_TIMER entry is already queue just increment
1343 		 * the overrun count.
1344 		 */
1345 		BUG_ON(q->info.si_code != SI_TIMER);
1346 		q->info.si_overrun++;
1347 		goto out;
1348 	}
1349 	/* Short-circuit ignored signals.  */
1350 	if (sig_ignored(p, sig)) {
1351 		ret = 1;
1352 		goto out;
1353 	}
1354 	/*
1355 	 * Deliver the signal to listening signalfds. This must be called
1356 	 * with the sighand lock held.
1357 	 */
1358 	signalfd_notify(p, sig);
1359 
1360 	list_add_tail(&q->list, &p->pending.list);
1361 	sigaddset(&p->pending.signal, sig);
1362 	if (!sigismember(&p->blocked, sig))
1363 		signal_wake_up(p, sig == SIGKILL);
1364 
1365 out:
1366 	unlock_task_sighand(p, &flags);
1367 out_err:
1368 	rcu_read_unlock();
1369 
1370 	return ret;
1371 }
1372 
1373 int
1374 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1375 {
1376 	unsigned long flags;
1377 	int ret = 0;
1378 
1379 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1380 
1381 	read_lock(&tasklist_lock);
1382 	/* Since it_lock is held, p->sighand cannot be NULL. */
1383 	spin_lock_irqsave(&p->sighand->siglock, flags);
1384 	handle_stop_signal(sig, p);
1385 
1386 	/* Short-circuit ignored signals.  */
1387 	if (sig_ignored(p, sig)) {
1388 		ret = 1;
1389 		goto out;
1390 	}
1391 
1392 	if (unlikely(!list_empty(&q->list))) {
1393 		/*
1394 		 * If an SI_TIMER entry is already queue just increment
1395 		 * the overrun count.  Other uses should not try to
1396 		 * send the signal multiple times.
1397 		 */
1398 		BUG_ON(q->info.si_code != SI_TIMER);
1399 		q->info.si_overrun++;
1400 		goto out;
1401 	}
1402 	/*
1403 	 * Deliver the signal to listening signalfds. This must be called
1404 	 * with the sighand lock held.
1405 	 */
1406 	signalfd_notify(p, sig);
1407 
1408 	/*
1409 	 * Put this signal on the shared-pending queue.
1410 	 * We always use the shared queue for process-wide signals,
1411 	 * to avoid several races.
1412 	 */
1413 	list_add_tail(&q->list, &p->signal->shared_pending.list);
1414 	sigaddset(&p->signal->shared_pending.signal, sig);
1415 
1416 	__group_complete_signal(sig, p);
1417 out:
1418 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1419 	read_unlock(&tasklist_lock);
1420 	return ret;
1421 }
1422 
1423 /*
1424  * Wake up any threads in the parent blocked in wait* syscalls.
1425  */
1426 static inline void __wake_up_parent(struct task_struct *p,
1427 				    struct task_struct *parent)
1428 {
1429 	wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1430 }
1431 
1432 /*
1433  * Let a parent know about the death of a child.
1434  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1435  */
1436 
1437 void do_notify_parent(struct task_struct *tsk, int sig)
1438 {
1439 	struct siginfo info;
1440 	unsigned long flags;
1441 	struct sighand_struct *psig;
1442 
1443 	BUG_ON(sig == -1);
1444 
1445  	/* do_notify_parent_cldstop should have been called instead.  */
1446  	BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1447 
1448 	BUG_ON(!tsk->ptrace &&
1449 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1450 
1451 	info.si_signo = sig;
1452 	info.si_errno = 0;
1453 	info.si_pid = tsk->pid;
1454 	info.si_uid = tsk->uid;
1455 
1456 	/* FIXME: find out whether or not this is supposed to be c*time. */
1457 	info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1458 						       tsk->signal->utime));
1459 	info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1460 						       tsk->signal->stime));
1461 
1462 	info.si_status = tsk->exit_code & 0x7f;
1463 	if (tsk->exit_code & 0x80)
1464 		info.si_code = CLD_DUMPED;
1465 	else if (tsk->exit_code & 0x7f)
1466 		info.si_code = CLD_KILLED;
1467 	else {
1468 		info.si_code = CLD_EXITED;
1469 		info.si_status = tsk->exit_code >> 8;
1470 	}
1471 
1472 	psig = tsk->parent->sighand;
1473 	spin_lock_irqsave(&psig->siglock, flags);
1474 	if (!tsk->ptrace && sig == SIGCHLD &&
1475 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1476 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1477 		/*
1478 		 * We are exiting and our parent doesn't care.  POSIX.1
1479 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1480 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1481 		 * automatically and not left for our parent's wait4 call.
1482 		 * Rather than having the parent do it as a magic kind of
1483 		 * signal handler, we just set this to tell do_exit that we
1484 		 * can be cleaned up without becoming a zombie.  Note that
1485 		 * we still call __wake_up_parent in this case, because a
1486 		 * blocked sys_wait4 might now return -ECHILD.
1487 		 *
1488 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1489 		 * is implementation-defined: we do (if you don't want
1490 		 * it, just use SIG_IGN instead).
1491 		 */
1492 		tsk->exit_signal = -1;
1493 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1494 			sig = 0;
1495 	}
1496 	if (valid_signal(sig) && sig > 0)
1497 		__group_send_sig_info(sig, &info, tsk->parent);
1498 	__wake_up_parent(tsk, tsk->parent);
1499 	spin_unlock_irqrestore(&psig->siglock, flags);
1500 }
1501 
1502 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1503 {
1504 	struct siginfo info;
1505 	unsigned long flags;
1506 	struct task_struct *parent;
1507 	struct sighand_struct *sighand;
1508 
1509 	if (tsk->ptrace & PT_PTRACED)
1510 		parent = tsk->parent;
1511 	else {
1512 		tsk = tsk->group_leader;
1513 		parent = tsk->real_parent;
1514 	}
1515 
1516 	info.si_signo = SIGCHLD;
1517 	info.si_errno = 0;
1518 	info.si_pid = tsk->pid;
1519 	info.si_uid = tsk->uid;
1520 
1521 	/* FIXME: find out whether or not this is supposed to be c*time. */
1522 	info.si_utime = cputime_to_jiffies(tsk->utime);
1523 	info.si_stime = cputime_to_jiffies(tsk->stime);
1524 
1525  	info.si_code = why;
1526  	switch (why) {
1527  	case CLD_CONTINUED:
1528  		info.si_status = SIGCONT;
1529  		break;
1530  	case CLD_STOPPED:
1531  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1532  		break;
1533  	case CLD_TRAPPED:
1534  		info.si_status = tsk->exit_code & 0x7f;
1535  		break;
1536  	default:
1537  		BUG();
1538  	}
1539 
1540 	sighand = parent->sighand;
1541 	spin_lock_irqsave(&sighand->siglock, flags);
1542 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1543 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1544 		__group_send_sig_info(SIGCHLD, &info, parent);
1545 	/*
1546 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1547 	 */
1548 	__wake_up_parent(tsk, parent);
1549 	spin_unlock_irqrestore(&sighand->siglock, flags);
1550 }
1551 
1552 static inline int may_ptrace_stop(void)
1553 {
1554 	if (!likely(current->ptrace & PT_PTRACED))
1555 		return 0;
1556 
1557 	if (unlikely(current->parent == current->real_parent &&
1558 		    (current->ptrace & PT_ATTACHED)))
1559 		return 0;
1560 
1561 	/*
1562 	 * Are we in the middle of do_coredump?
1563 	 * If so and our tracer is also part of the coredump stopping
1564 	 * is a deadlock situation, and pointless because our tracer
1565 	 * is dead so don't allow us to stop.
1566 	 * If SIGKILL was already sent before the caller unlocked
1567 	 * ->siglock we must see ->core_waiters != 0. Otherwise it
1568 	 * is safe to enter schedule().
1569 	 */
1570 	if (unlikely(current->mm->core_waiters) &&
1571 	    unlikely(current->mm == current->parent->mm))
1572 		return 0;
1573 
1574 	return 1;
1575 }
1576 
1577 /*
1578  * This must be called with current->sighand->siglock held.
1579  *
1580  * This should be the path for all ptrace stops.
1581  * We always set current->last_siginfo while stopped here.
1582  * That makes it a way to test a stopped process for
1583  * being ptrace-stopped vs being job-control-stopped.
1584  *
1585  * If we actually decide not to stop at all because the tracer is gone,
1586  * we leave nostop_code in current->exit_code.
1587  */
1588 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1589 {
1590 	/*
1591 	 * If there is a group stop in progress,
1592 	 * we must participate in the bookkeeping.
1593 	 */
1594 	if (current->signal->group_stop_count > 0)
1595 		--current->signal->group_stop_count;
1596 
1597 	current->last_siginfo = info;
1598 	current->exit_code = exit_code;
1599 
1600 	/* Let the debugger run.  */
1601 	set_current_state(TASK_TRACED);
1602 	spin_unlock_irq(&current->sighand->siglock);
1603 	try_to_freeze();
1604 	read_lock(&tasklist_lock);
1605 	if (may_ptrace_stop()) {
1606 		do_notify_parent_cldstop(current, CLD_TRAPPED);
1607 		read_unlock(&tasklist_lock);
1608 		schedule();
1609 	} else {
1610 		/*
1611 		 * By the time we got the lock, our tracer went away.
1612 		 * Don't stop here.
1613 		 */
1614 		read_unlock(&tasklist_lock);
1615 		set_current_state(TASK_RUNNING);
1616 		current->exit_code = nostop_code;
1617 	}
1618 
1619 	/*
1620 	 * We are back.  Now reacquire the siglock before touching
1621 	 * last_siginfo, so that we are sure to have synchronized with
1622 	 * any signal-sending on another CPU that wants to examine it.
1623 	 */
1624 	spin_lock_irq(&current->sighand->siglock);
1625 	current->last_siginfo = NULL;
1626 
1627 	/*
1628 	 * Queued signals ignored us while we were stopped for tracing.
1629 	 * So check for any that we should take before resuming user mode.
1630 	 * This sets TIF_SIGPENDING, but never clears it.
1631 	 */
1632 	recalc_sigpending_tsk(current);
1633 }
1634 
1635 void ptrace_notify(int exit_code)
1636 {
1637 	siginfo_t info;
1638 
1639 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1640 
1641 	memset(&info, 0, sizeof info);
1642 	info.si_signo = SIGTRAP;
1643 	info.si_code = exit_code;
1644 	info.si_pid = current->pid;
1645 	info.si_uid = current->uid;
1646 
1647 	/* Let the debugger run.  */
1648 	spin_lock_irq(&current->sighand->siglock);
1649 	ptrace_stop(exit_code, 0, &info);
1650 	spin_unlock_irq(&current->sighand->siglock);
1651 }
1652 
1653 static void
1654 finish_stop(int stop_count)
1655 {
1656 	/*
1657 	 * If there are no other threads in the group, or if there is
1658 	 * a group stop in progress and we are the last to stop,
1659 	 * report to the parent.  When ptraced, every thread reports itself.
1660 	 */
1661 	if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1662 		read_lock(&tasklist_lock);
1663 		do_notify_parent_cldstop(current, CLD_STOPPED);
1664 		read_unlock(&tasklist_lock);
1665 	}
1666 
1667 	do {
1668 		schedule();
1669 	} while (try_to_freeze());
1670 	/*
1671 	 * Now we don't run again until continued.
1672 	 */
1673 	current->exit_code = 0;
1674 }
1675 
1676 /*
1677  * This performs the stopping for SIGSTOP and other stop signals.
1678  * We have to stop all threads in the thread group.
1679  * Returns nonzero if we've actually stopped and released the siglock.
1680  * Returns zero if we didn't stop and still hold the siglock.
1681  */
1682 static int do_signal_stop(int signr)
1683 {
1684 	struct signal_struct *sig = current->signal;
1685 	int stop_count;
1686 
1687 	if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1688 		return 0;
1689 
1690 	if (sig->group_stop_count > 0) {
1691 		/*
1692 		 * There is a group stop in progress.  We don't need to
1693 		 * start another one.
1694 		 */
1695 		stop_count = --sig->group_stop_count;
1696 	} else {
1697 		/*
1698 		 * There is no group stop already in progress.
1699 		 * We must initiate one now.
1700 		 */
1701 		struct task_struct *t;
1702 
1703 		sig->group_exit_code = signr;
1704 
1705 		stop_count = 0;
1706 		for (t = next_thread(current); t != current; t = next_thread(t))
1707 			/*
1708 			 * Setting state to TASK_STOPPED for a group
1709 			 * stop is always done with the siglock held,
1710 			 * so this check has no races.
1711 			 */
1712 			if (!t->exit_state &&
1713 			    !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1714 				stop_count++;
1715 				signal_wake_up(t, 0);
1716 			}
1717 		sig->group_stop_count = stop_count;
1718 	}
1719 
1720 	if (stop_count == 0)
1721 		sig->flags = SIGNAL_STOP_STOPPED;
1722 	current->exit_code = sig->group_exit_code;
1723 	__set_current_state(TASK_STOPPED);
1724 
1725 	spin_unlock_irq(&current->sighand->siglock);
1726 	finish_stop(stop_count);
1727 	return 1;
1728 }
1729 
1730 /*
1731  * Do appropriate magic when group_stop_count > 0.
1732  * We return nonzero if we stopped, after releasing the siglock.
1733  * We return zero if we still hold the siglock and should look
1734  * for another signal without checking group_stop_count again.
1735  */
1736 static int handle_group_stop(void)
1737 {
1738 	int stop_count;
1739 
1740 	if (current->signal->group_exit_task == current) {
1741 		/*
1742 		 * Group stop is so we can do a core dump,
1743 		 * We are the initiating thread, so get on with it.
1744 		 */
1745 		current->signal->group_exit_task = NULL;
1746 		return 0;
1747 	}
1748 
1749 	if (current->signal->flags & SIGNAL_GROUP_EXIT)
1750 		/*
1751 		 * Group stop is so another thread can do a core dump,
1752 		 * or else we are racing against a death signal.
1753 		 * Just punt the stop so we can get the next signal.
1754 		 */
1755 		return 0;
1756 
1757 	/*
1758 	 * There is a group stop in progress.  We stop
1759 	 * without any associated signal being in our queue.
1760 	 */
1761 	stop_count = --current->signal->group_stop_count;
1762 	if (stop_count == 0)
1763 		current->signal->flags = SIGNAL_STOP_STOPPED;
1764 	current->exit_code = current->signal->group_exit_code;
1765 	set_current_state(TASK_STOPPED);
1766 	spin_unlock_irq(&current->sighand->siglock);
1767 	finish_stop(stop_count);
1768 	return 1;
1769 }
1770 
1771 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1772 			  struct pt_regs *regs, void *cookie)
1773 {
1774 	sigset_t *mask = &current->blocked;
1775 	int signr = 0;
1776 
1777 	try_to_freeze();
1778 
1779 relock:
1780 	spin_lock_irq(&current->sighand->siglock);
1781 	for (;;) {
1782 		struct k_sigaction *ka;
1783 
1784 		if (unlikely(current->signal->group_stop_count > 0) &&
1785 		    handle_group_stop())
1786 			goto relock;
1787 
1788 		signr = dequeue_signal(current, mask, info);
1789 
1790 		if (!signr)
1791 			break; /* will return 0 */
1792 
1793 		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1794 			ptrace_signal_deliver(regs, cookie);
1795 
1796 			/* Let the debugger run.  */
1797 			ptrace_stop(signr, signr, info);
1798 
1799 			/* We're back.  Did the debugger cancel the sig?  */
1800 			signr = current->exit_code;
1801 			if (signr == 0)
1802 				continue;
1803 
1804 			current->exit_code = 0;
1805 
1806 			/* Update the siginfo structure if the signal has
1807 			   changed.  If the debugger wanted something
1808 			   specific in the siginfo structure then it should
1809 			   have updated *info via PTRACE_SETSIGINFO.  */
1810 			if (signr != info->si_signo) {
1811 				info->si_signo = signr;
1812 				info->si_errno = 0;
1813 				info->si_code = SI_USER;
1814 				info->si_pid = current->parent->pid;
1815 				info->si_uid = current->parent->uid;
1816 			}
1817 
1818 			/* If the (new) signal is now blocked, requeue it.  */
1819 			if (sigismember(&current->blocked, signr)) {
1820 				specific_send_sig_info(signr, info, current);
1821 				continue;
1822 			}
1823 		}
1824 
1825 		ka = &current->sighand->action[signr-1];
1826 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1827 			continue;
1828 		if (ka->sa.sa_handler != SIG_DFL) {
1829 			/* Run the handler.  */
1830 			*return_ka = *ka;
1831 
1832 			if (ka->sa.sa_flags & SA_ONESHOT)
1833 				ka->sa.sa_handler = SIG_DFL;
1834 
1835 			break; /* will return non-zero "signr" value */
1836 		}
1837 
1838 		/*
1839 		 * Now we are doing the default action for this signal.
1840 		 */
1841 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1842 			continue;
1843 
1844 		/*
1845 		 * Init of a pid space gets no signals it doesn't want from
1846 		 * within that pid space. It can of course get signals from
1847 		 * its parent pid space.
1848 		 */
1849 		if (current == child_reaper(current))
1850 			continue;
1851 
1852 		if (sig_kernel_stop(signr)) {
1853 			/*
1854 			 * The default action is to stop all threads in
1855 			 * the thread group.  The job control signals
1856 			 * do nothing in an orphaned pgrp, but SIGSTOP
1857 			 * always works.  Note that siglock needs to be
1858 			 * dropped during the call to is_orphaned_pgrp()
1859 			 * because of lock ordering with tasklist_lock.
1860 			 * This allows an intervening SIGCONT to be posted.
1861 			 * We need to check for that and bail out if necessary.
1862 			 */
1863 			if (signr != SIGSTOP) {
1864 				spin_unlock_irq(&current->sighand->siglock);
1865 
1866 				/* signals can be posted during this window */
1867 
1868 				if (is_current_pgrp_orphaned())
1869 					goto relock;
1870 
1871 				spin_lock_irq(&current->sighand->siglock);
1872 			}
1873 
1874 			if (likely(do_signal_stop(signr))) {
1875 				/* It released the siglock.  */
1876 				goto relock;
1877 			}
1878 
1879 			/*
1880 			 * We didn't actually stop, due to a race
1881 			 * with SIGCONT or something like that.
1882 			 */
1883 			continue;
1884 		}
1885 
1886 		spin_unlock_irq(&current->sighand->siglock);
1887 
1888 		/*
1889 		 * Anything else is fatal, maybe with a core dump.
1890 		 */
1891 		current->flags |= PF_SIGNALED;
1892 		if ((signr != SIGKILL) && print_fatal_signals)
1893 			print_fatal_signal(regs, signr);
1894 		if (sig_kernel_coredump(signr)) {
1895 			/*
1896 			 * If it was able to dump core, this kills all
1897 			 * other threads in the group and synchronizes with
1898 			 * their demise.  If we lost the race with another
1899 			 * thread getting here, it set group_exit_code
1900 			 * first and our do_group_exit call below will use
1901 			 * that value and ignore the one we pass it.
1902 			 */
1903 			do_coredump((long)signr, signr, regs);
1904 		}
1905 
1906 		/*
1907 		 * Death signals, no core dump.
1908 		 */
1909 		do_group_exit(signr);
1910 		/* NOTREACHED */
1911 	}
1912 	spin_unlock_irq(&current->sighand->siglock);
1913 	return signr;
1914 }
1915 
1916 EXPORT_SYMBOL(recalc_sigpending);
1917 EXPORT_SYMBOL_GPL(dequeue_signal);
1918 EXPORT_SYMBOL(flush_signals);
1919 EXPORT_SYMBOL(force_sig);
1920 EXPORT_SYMBOL(kill_proc);
1921 EXPORT_SYMBOL(ptrace_notify);
1922 EXPORT_SYMBOL(send_sig);
1923 EXPORT_SYMBOL(send_sig_info);
1924 EXPORT_SYMBOL(sigprocmask);
1925 EXPORT_SYMBOL(block_all_signals);
1926 EXPORT_SYMBOL(unblock_all_signals);
1927 
1928 
1929 /*
1930  * System call entry points.
1931  */
1932 
1933 asmlinkage long sys_restart_syscall(void)
1934 {
1935 	struct restart_block *restart = &current_thread_info()->restart_block;
1936 	return restart->fn(restart);
1937 }
1938 
1939 long do_no_restart_syscall(struct restart_block *param)
1940 {
1941 	return -EINTR;
1942 }
1943 
1944 /*
1945  * We don't need to get the kernel lock - this is all local to this
1946  * particular thread.. (and that's good, because this is _heavily_
1947  * used by various programs)
1948  */
1949 
1950 /*
1951  * This is also useful for kernel threads that want to temporarily
1952  * (or permanently) block certain signals.
1953  *
1954  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1955  * interface happily blocks "unblockable" signals like SIGKILL
1956  * and friends.
1957  */
1958 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1959 {
1960 	int error;
1961 
1962 	spin_lock_irq(&current->sighand->siglock);
1963 	if (oldset)
1964 		*oldset = current->blocked;
1965 
1966 	error = 0;
1967 	switch (how) {
1968 	case SIG_BLOCK:
1969 		sigorsets(&current->blocked, &current->blocked, set);
1970 		break;
1971 	case SIG_UNBLOCK:
1972 		signandsets(&current->blocked, &current->blocked, set);
1973 		break;
1974 	case SIG_SETMASK:
1975 		current->blocked = *set;
1976 		break;
1977 	default:
1978 		error = -EINVAL;
1979 	}
1980 	recalc_sigpending();
1981 	spin_unlock_irq(&current->sighand->siglock);
1982 
1983 	return error;
1984 }
1985 
1986 asmlinkage long
1987 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1988 {
1989 	int error = -EINVAL;
1990 	sigset_t old_set, new_set;
1991 
1992 	/* XXX: Don't preclude handling different sized sigset_t's.  */
1993 	if (sigsetsize != sizeof(sigset_t))
1994 		goto out;
1995 
1996 	if (set) {
1997 		error = -EFAULT;
1998 		if (copy_from_user(&new_set, set, sizeof(*set)))
1999 			goto out;
2000 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2001 
2002 		error = sigprocmask(how, &new_set, &old_set);
2003 		if (error)
2004 			goto out;
2005 		if (oset)
2006 			goto set_old;
2007 	} else if (oset) {
2008 		spin_lock_irq(&current->sighand->siglock);
2009 		old_set = current->blocked;
2010 		spin_unlock_irq(&current->sighand->siglock);
2011 
2012 	set_old:
2013 		error = -EFAULT;
2014 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2015 			goto out;
2016 	}
2017 	error = 0;
2018 out:
2019 	return error;
2020 }
2021 
2022 long do_sigpending(void __user *set, unsigned long sigsetsize)
2023 {
2024 	long error = -EINVAL;
2025 	sigset_t pending;
2026 
2027 	if (sigsetsize > sizeof(sigset_t))
2028 		goto out;
2029 
2030 	spin_lock_irq(&current->sighand->siglock);
2031 	sigorsets(&pending, &current->pending.signal,
2032 		  &current->signal->shared_pending.signal);
2033 	spin_unlock_irq(&current->sighand->siglock);
2034 
2035 	/* Outside the lock because only this thread touches it.  */
2036 	sigandsets(&pending, &current->blocked, &pending);
2037 
2038 	error = -EFAULT;
2039 	if (!copy_to_user(set, &pending, sigsetsize))
2040 		error = 0;
2041 
2042 out:
2043 	return error;
2044 }
2045 
2046 asmlinkage long
2047 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2048 {
2049 	return do_sigpending(set, sigsetsize);
2050 }
2051 
2052 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2053 
2054 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2055 {
2056 	int err;
2057 
2058 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2059 		return -EFAULT;
2060 	if (from->si_code < 0)
2061 		return __copy_to_user(to, from, sizeof(siginfo_t))
2062 			? -EFAULT : 0;
2063 	/*
2064 	 * If you change siginfo_t structure, please be sure
2065 	 * this code is fixed accordingly.
2066 	 * Please remember to update the signalfd_copyinfo() function
2067 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2068 	 * It should never copy any pad contained in the structure
2069 	 * to avoid security leaks, but must copy the generic
2070 	 * 3 ints plus the relevant union member.
2071 	 */
2072 	err = __put_user(from->si_signo, &to->si_signo);
2073 	err |= __put_user(from->si_errno, &to->si_errno);
2074 	err |= __put_user((short)from->si_code, &to->si_code);
2075 	switch (from->si_code & __SI_MASK) {
2076 	case __SI_KILL:
2077 		err |= __put_user(from->si_pid, &to->si_pid);
2078 		err |= __put_user(from->si_uid, &to->si_uid);
2079 		break;
2080 	case __SI_TIMER:
2081 		 err |= __put_user(from->si_tid, &to->si_tid);
2082 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2083 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2084 		break;
2085 	case __SI_POLL:
2086 		err |= __put_user(from->si_band, &to->si_band);
2087 		err |= __put_user(from->si_fd, &to->si_fd);
2088 		break;
2089 	case __SI_FAULT:
2090 		err |= __put_user(from->si_addr, &to->si_addr);
2091 #ifdef __ARCH_SI_TRAPNO
2092 		err |= __put_user(from->si_trapno, &to->si_trapno);
2093 #endif
2094 		break;
2095 	case __SI_CHLD:
2096 		err |= __put_user(from->si_pid, &to->si_pid);
2097 		err |= __put_user(from->si_uid, &to->si_uid);
2098 		err |= __put_user(from->si_status, &to->si_status);
2099 		err |= __put_user(from->si_utime, &to->si_utime);
2100 		err |= __put_user(from->si_stime, &to->si_stime);
2101 		break;
2102 	case __SI_RT: /* This is not generated by the kernel as of now. */
2103 	case __SI_MESGQ: /* But this is */
2104 		err |= __put_user(from->si_pid, &to->si_pid);
2105 		err |= __put_user(from->si_uid, &to->si_uid);
2106 		err |= __put_user(from->si_ptr, &to->si_ptr);
2107 		break;
2108 	default: /* this is just in case for now ... */
2109 		err |= __put_user(from->si_pid, &to->si_pid);
2110 		err |= __put_user(from->si_uid, &to->si_uid);
2111 		break;
2112 	}
2113 	return err;
2114 }
2115 
2116 #endif
2117 
2118 asmlinkage long
2119 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2120 		    siginfo_t __user *uinfo,
2121 		    const struct timespec __user *uts,
2122 		    size_t sigsetsize)
2123 {
2124 	int ret, sig;
2125 	sigset_t these;
2126 	struct timespec ts;
2127 	siginfo_t info;
2128 	long timeout = 0;
2129 
2130 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2131 	if (sigsetsize != sizeof(sigset_t))
2132 		return -EINVAL;
2133 
2134 	if (copy_from_user(&these, uthese, sizeof(these)))
2135 		return -EFAULT;
2136 
2137 	/*
2138 	 * Invert the set of allowed signals to get those we
2139 	 * want to block.
2140 	 */
2141 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2142 	signotset(&these);
2143 
2144 	if (uts) {
2145 		if (copy_from_user(&ts, uts, sizeof(ts)))
2146 			return -EFAULT;
2147 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2148 		    || ts.tv_sec < 0)
2149 			return -EINVAL;
2150 	}
2151 
2152 	spin_lock_irq(&current->sighand->siglock);
2153 	sig = dequeue_signal(current, &these, &info);
2154 	if (!sig) {
2155 		timeout = MAX_SCHEDULE_TIMEOUT;
2156 		if (uts)
2157 			timeout = (timespec_to_jiffies(&ts)
2158 				   + (ts.tv_sec || ts.tv_nsec));
2159 
2160 		if (timeout) {
2161 			/* None ready -- temporarily unblock those we're
2162 			 * interested while we are sleeping in so that we'll
2163 			 * be awakened when they arrive.  */
2164 			current->real_blocked = current->blocked;
2165 			sigandsets(&current->blocked, &current->blocked, &these);
2166 			recalc_sigpending();
2167 			spin_unlock_irq(&current->sighand->siglock);
2168 
2169 			timeout = schedule_timeout_interruptible(timeout);
2170 
2171 			spin_lock_irq(&current->sighand->siglock);
2172 			sig = dequeue_signal(current, &these, &info);
2173 			current->blocked = current->real_blocked;
2174 			siginitset(&current->real_blocked, 0);
2175 			recalc_sigpending();
2176 		}
2177 	}
2178 	spin_unlock_irq(&current->sighand->siglock);
2179 
2180 	if (sig) {
2181 		ret = sig;
2182 		if (uinfo) {
2183 			if (copy_siginfo_to_user(uinfo, &info))
2184 				ret = -EFAULT;
2185 		}
2186 	} else {
2187 		ret = -EAGAIN;
2188 		if (timeout)
2189 			ret = -EINTR;
2190 	}
2191 
2192 	return ret;
2193 }
2194 
2195 asmlinkage long
2196 sys_kill(int pid, int sig)
2197 {
2198 	struct siginfo info;
2199 
2200 	info.si_signo = sig;
2201 	info.si_errno = 0;
2202 	info.si_code = SI_USER;
2203 	info.si_pid = current->tgid;
2204 	info.si_uid = current->uid;
2205 
2206 	return kill_something_info(sig, &info, pid);
2207 }
2208 
2209 static int do_tkill(int tgid, int pid, int sig)
2210 {
2211 	int error;
2212 	struct siginfo info;
2213 	struct task_struct *p;
2214 
2215 	error = -ESRCH;
2216 	info.si_signo = sig;
2217 	info.si_errno = 0;
2218 	info.si_code = SI_TKILL;
2219 	info.si_pid = current->tgid;
2220 	info.si_uid = current->uid;
2221 
2222 	read_lock(&tasklist_lock);
2223 	p = find_task_by_pid(pid);
2224 	if (p && (tgid <= 0 || p->tgid == tgid)) {
2225 		error = check_kill_permission(sig, &info, p);
2226 		/*
2227 		 * The null signal is a permissions and process existence
2228 		 * probe.  No signal is actually delivered.
2229 		 */
2230 		if (!error && sig && p->sighand) {
2231 			spin_lock_irq(&p->sighand->siglock);
2232 			handle_stop_signal(sig, p);
2233 			error = specific_send_sig_info(sig, &info, p);
2234 			spin_unlock_irq(&p->sighand->siglock);
2235 		}
2236 	}
2237 	read_unlock(&tasklist_lock);
2238 
2239 	return error;
2240 }
2241 
2242 /**
2243  *  sys_tgkill - send signal to one specific thread
2244  *  @tgid: the thread group ID of the thread
2245  *  @pid: the PID of the thread
2246  *  @sig: signal to be sent
2247  *
2248  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2249  *  exists but it's not belonging to the target process anymore. This
2250  *  method solves the problem of threads exiting and PIDs getting reused.
2251  */
2252 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2253 {
2254 	/* This is only valid for single tasks */
2255 	if (pid <= 0 || tgid <= 0)
2256 		return -EINVAL;
2257 
2258 	return do_tkill(tgid, pid, sig);
2259 }
2260 
2261 /*
2262  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2263  */
2264 asmlinkage long
2265 sys_tkill(int pid, int sig)
2266 {
2267 	/* This is only valid for single tasks */
2268 	if (pid <= 0)
2269 		return -EINVAL;
2270 
2271 	return do_tkill(0, pid, sig);
2272 }
2273 
2274 asmlinkage long
2275 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2276 {
2277 	siginfo_t info;
2278 
2279 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2280 		return -EFAULT;
2281 
2282 	/* Not even root can pretend to send signals from the kernel.
2283 	   Nor can they impersonate a kill(), which adds source info.  */
2284 	if (info.si_code >= 0)
2285 		return -EPERM;
2286 	info.si_signo = sig;
2287 
2288 	/* POSIX.1b doesn't mention process groups.  */
2289 	return kill_proc_info(sig, &info, pid);
2290 }
2291 
2292 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2293 {
2294 	struct k_sigaction *k;
2295 	sigset_t mask;
2296 
2297 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2298 		return -EINVAL;
2299 
2300 	k = &current->sighand->action[sig-1];
2301 
2302 	spin_lock_irq(&current->sighand->siglock);
2303 	if (signal_pending(current)) {
2304 		/*
2305 		 * If there might be a fatal signal pending on multiple
2306 		 * threads, make sure we take it before changing the action.
2307 		 */
2308 		spin_unlock_irq(&current->sighand->siglock);
2309 		return -ERESTARTNOINTR;
2310 	}
2311 
2312 	if (oact)
2313 		*oact = *k;
2314 
2315 	if (act) {
2316 		sigdelsetmask(&act->sa.sa_mask,
2317 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2318 		*k = *act;
2319 		/*
2320 		 * POSIX 3.3.1.3:
2321 		 *  "Setting a signal action to SIG_IGN for a signal that is
2322 		 *   pending shall cause the pending signal to be discarded,
2323 		 *   whether or not it is blocked."
2324 		 *
2325 		 *  "Setting a signal action to SIG_DFL for a signal that is
2326 		 *   pending and whose default action is to ignore the signal
2327 		 *   (for example, SIGCHLD), shall cause the pending signal to
2328 		 *   be discarded, whether or not it is blocked"
2329 		 */
2330 		if (act->sa.sa_handler == SIG_IGN ||
2331 		   (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2332 			struct task_struct *t = current;
2333 			sigemptyset(&mask);
2334 			sigaddset(&mask, sig);
2335 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2336 			do {
2337 				rm_from_queue_full(&mask, &t->pending);
2338 				recalc_sigpending_and_wake(t);
2339 				t = next_thread(t);
2340 			} while (t != current);
2341 		}
2342 	}
2343 
2344 	spin_unlock_irq(&current->sighand->siglock);
2345 	return 0;
2346 }
2347 
2348 int
2349 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2350 {
2351 	stack_t oss;
2352 	int error;
2353 
2354 	if (uoss) {
2355 		oss.ss_sp = (void __user *) current->sas_ss_sp;
2356 		oss.ss_size = current->sas_ss_size;
2357 		oss.ss_flags = sas_ss_flags(sp);
2358 	}
2359 
2360 	if (uss) {
2361 		void __user *ss_sp;
2362 		size_t ss_size;
2363 		int ss_flags;
2364 
2365 		error = -EFAULT;
2366 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2367 		    || __get_user(ss_sp, &uss->ss_sp)
2368 		    || __get_user(ss_flags, &uss->ss_flags)
2369 		    || __get_user(ss_size, &uss->ss_size))
2370 			goto out;
2371 
2372 		error = -EPERM;
2373 		if (on_sig_stack(sp))
2374 			goto out;
2375 
2376 		error = -EINVAL;
2377 		/*
2378 		 *
2379 		 * Note - this code used to test ss_flags incorrectly
2380 		 *  	  old code may have been written using ss_flags==0
2381 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2382 		 *	  way that worked) - this fix preserves that older
2383 		 *	  mechanism
2384 		 */
2385 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2386 			goto out;
2387 
2388 		if (ss_flags == SS_DISABLE) {
2389 			ss_size = 0;
2390 			ss_sp = NULL;
2391 		} else {
2392 			error = -ENOMEM;
2393 			if (ss_size < MINSIGSTKSZ)
2394 				goto out;
2395 		}
2396 
2397 		current->sas_ss_sp = (unsigned long) ss_sp;
2398 		current->sas_ss_size = ss_size;
2399 	}
2400 
2401 	if (uoss) {
2402 		error = -EFAULT;
2403 		if (copy_to_user(uoss, &oss, sizeof(oss)))
2404 			goto out;
2405 	}
2406 
2407 	error = 0;
2408 out:
2409 	return error;
2410 }
2411 
2412 #ifdef __ARCH_WANT_SYS_SIGPENDING
2413 
2414 asmlinkage long
2415 sys_sigpending(old_sigset_t __user *set)
2416 {
2417 	return do_sigpending(set, sizeof(*set));
2418 }
2419 
2420 #endif
2421 
2422 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2423 /* Some platforms have their own version with special arguments others
2424    support only sys_rt_sigprocmask.  */
2425 
2426 asmlinkage long
2427 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2428 {
2429 	int error;
2430 	old_sigset_t old_set, new_set;
2431 
2432 	if (set) {
2433 		error = -EFAULT;
2434 		if (copy_from_user(&new_set, set, sizeof(*set)))
2435 			goto out;
2436 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2437 
2438 		spin_lock_irq(&current->sighand->siglock);
2439 		old_set = current->blocked.sig[0];
2440 
2441 		error = 0;
2442 		switch (how) {
2443 		default:
2444 			error = -EINVAL;
2445 			break;
2446 		case SIG_BLOCK:
2447 			sigaddsetmask(&current->blocked, new_set);
2448 			break;
2449 		case SIG_UNBLOCK:
2450 			sigdelsetmask(&current->blocked, new_set);
2451 			break;
2452 		case SIG_SETMASK:
2453 			current->blocked.sig[0] = new_set;
2454 			break;
2455 		}
2456 
2457 		recalc_sigpending();
2458 		spin_unlock_irq(&current->sighand->siglock);
2459 		if (error)
2460 			goto out;
2461 		if (oset)
2462 			goto set_old;
2463 	} else if (oset) {
2464 		old_set = current->blocked.sig[0];
2465 	set_old:
2466 		error = -EFAULT;
2467 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2468 			goto out;
2469 	}
2470 	error = 0;
2471 out:
2472 	return error;
2473 }
2474 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2475 
2476 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2477 asmlinkage long
2478 sys_rt_sigaction(int sig,
2479 		 const struct sigaction __user *act,
2480 		 struct sigaction __user *oact,
2481 		 size_t sigsetsize)
2482 {
2483 	struct k_sigaction new_sa, old_sa;
2484 	int ret = -EINVAL;
2485 
2486 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2487 	if (sigsetsize != sizeof(sigset_t))
2488 		goto out;
2489 
2490 	if (act) {
2491 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2492 			return -EFAULT;
2493 	}
2494 
2495 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2496 
2497 	if (!ret && oact) {
2498 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2499 			return -EFAULT;
2500 	}
2501 out:
2502 	return ret;
2503 }
2504 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2505 
2506 #ifdef __ARCH_WANT_SYS_SGETMASK
2507 
2508 /*
2509  * For backwards compatibility.  Functionality superseded by sigprocmask.
2510  */
2511 asmlinkage long
2512 sys_sgetmask(void)
2513 {
2514 	/* SMP safe */
2515 	return current->blocked.sig[0];
2516 }
2517 
2518 asmlinkage long
2519 sys_ssetmask(int newmask)
2520 {
2521 	int old;
2522 
2523 	spin_lock_irq(&current->sighand->siglock);
2524 	old = current->blocked.sig[0];
2525 
2526 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2527 						  sigmask(SIGSTOP)));
2528 	recalc_sigpending();
2529 	spin_unlock_irq(&current->sighand->siglock);
2530 
2531 	return old;
2532 }
2533 #endif /* __ARCH_WANT_SGETMASK */
2534 
2535 #ifdef __ARCH_WANT_SYS_SIGNAL
2536 /*
2537  * For backwards compatibility.  Functionality superseded by sigaction.
2538  */
2539 asmlinkage unsigned long
2540 sys_signal(int sig, __sighandler_t handler)
2541 {
2542 	struct k_sigaction new_sa, old_sa;
2543 	int ret;
2544 
2545 	new_sa.sa.sa_handler = handler;
2546 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2547 	sigemptyset(&new_sa.sa.sa_mask);
2548 
2549 	ret = do_sigaction(sig, &new_sa, &old_sa);
2550 
2551 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2552 }
2553 #endif /* __ARCH_WANT_SYS_SIGNAL */
2554 
2555 #ifdef __ARCH_WANT_SYS_PAUSE
2556 
2557 asmlinkage long
2558 sys_pause(void)
2559 {
2560 	current->state = TASK_INTERRUPTIBLE;
2561 	schedule();
2562 	return -ERESTARTNOHAND;
2563 }
2564 
2565 #endif
2566 
2567 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2568 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2569 {
2570 	sigset_t newset;
2571 
2572 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2573 	if (sigsetsize != sizeof(sigset_t))
2574 		return -EINVAL;
2575 
2576 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2577 		return -EFAULT;
2578 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2579 
2580 	spin_lock_irq(&current->sighand->siglock);
2581 	current->saved_sigmask = current->blocked;
2582 	current->blocked = newset;
2583 	recalc_sigpending();
2584 	spin_unlock_irq(&current->sighand->siglock);
2585 
2586 	current->state = TASK_INTERRUPTIBLE;
2587 	schedule();
2588 	set_thread_flag(TIF_RESTORE_SIGMASK);
2589 	return -ERESTARTNOHAND;
2590 }
2591 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2592 
2593 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2594 {
2595 	return NULL;
2596 }
2597 
2598 void __init signals_init(void)
2599 {
2600 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2601 }
2602