xref: /linux/kernel/signal.c (revision 3eeebf17f31c583f83e081b17b3076477cb96886)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/tracehook.h>
26 #include <linux/capability.h>
27 #include <linux/freezer.h>
28 #include <linux/pid_namespace.h>
29 #include <linux/nsproxy.h>
30 #include <trace/sched.h>
31 
32 #include <asm/param.h>
33 #include <asm/uaccess.h>
34 #include <asm/unistd.h>
35 #include <asm/siginfo.h>
36 #include "audit.h"	/* audit_signal_info() */
37 
38 /*
39  * SLAB caches for signal bits.
40  */
41 
42 static struct kmem_cache *sigqueue_cachep;
43 
44 static void __user *sig_handler(struct task_struct *t, int sig)
45 {
46 	return t->sighand->action[sig - 1].sa.sa_handler;
47 }
48 
49 static int sig_handler_ignored(void __user *handler, int sig)
50 {
51 	/* Is it explicitly or implicitly ignored? */
52 	return handler == SIG_IGN ||
53 		(handler == SIG_DFL && sig_kernel_ignore(sig));
54 }
55 
56 static int sig_ignored(struct task_struct *t, int sig)
57 {
58 	void __user *handler;
59 
60 	/*
61 	 * Blocked signals are never ignored, since the
62 	 * signal handler may change by the time it is
63 	 * unblocked.
64 	 */
65 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
66 		return 0;
67 
68 	handler = sig_handler(t, sig);
69 	if (!sig_handler_ignored(handler, sig))
70 		return 0;
71 
72 	/*
73 	 * Tracers may want to know about even ignored signals.
74 	 */
75 	return !tracehook_consider_ignored_signal(t, sig, handler);
76 }
77 
78 /*
79  * Re-calculate pending state from the set of locally pending
80  * signals, globally pending signals, and blocked signals.
81  */
82 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
83 {
84 	unsigned long ready;
85 	long i;
86 
87 	switch (_NSIG_WORDS) {
88 	default:
89 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
90 			ready |= signal->sig[i] &~ blocked->sig[i];
91 		break;
92 
93 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
94 		ready |= signal->sig[2] &~ blocked->sig[2];
95 		ready |= signal->sig[1] &~ blocked->sig[1];
96 		ready |= signal->sig[0] &~ blocked->sig[0];
97 		break;
98 
99 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
100 		ready |= signal->sig[0] &~ blocked->sig[0];
101 		break;
102 
103 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
104 	}
105 	return ready !=	0;
106 }
107 
108 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
109 
110 static int recalc_sigpending_tsk(struct task_struct *t)
111 {
112 	if (t->signal->group_stop_count > 0 ||
113 	    PENDING(&t->pending, &t->blocked) ||
114 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
115 		set_tsk_thread_flag(t, TIF_SIGPENDING);
116 		return 1;
117 	}
118 	/*
119 	 * We must never clear the flag in another thread, or in current
120 	 * when it's possible the current syscall is returning -ERESTART*.
121 	 * So we don't clear it here, and only callers who know they should do.
122 	 */
123 	return 0;
124 }
125 
126 /*
127  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
128  * This is superfluous when called on current, the wakeup is a harmless no-op.
129  */
130 void recalc_sigpending_and_wake(struct task_struct *t)
131 {
132 	if (recalc_sigpending_tsk(t))
133 		signal_wake_up(t, 0);
134 }
135 
136 void recalc_sigpending(void)
137 {
138 	if (unlikely(tracehook_force_sigpending()))
139 		set_thread_flag(TIF_SIGPENDING);
140 	else if (!recalc_sigpending_tsk(current) && !freezing(current))
141 		clear_thread_flag(TIF_SIGPENDING);
142 
143 }
144 
145 /* Given the mask, find the first available signal that should be serviced. */
146 
147 int next_signal(struct sigpending *pending, sigset_t *mask)
148 {
149 	unsigned long i, *s, *m, x;
150 	int sig = 0;
151 
152 	s = pending->signal.sig;
153 	m = mask->sig;
154 	switch (_NSIG_WORDS) {
155 	default:
156 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
157 			if ((x = *s &~ *m) != 0) {
158 				sig = ffz(~x) + i*_NSIG_BPW + 1;
159 				break;
160 			}
161 		break;
162 
163 	case 2: if ((x = s[0] &~ m[0]) != 0)
164 			sig = 1;
165 		else if ((x = s[1] &~ m[1]) != 0)
166 			sig = _NSIG_BPW + 1;
167 		else
168 			break;
169 		sig += ffz(~x);
170 		break;
171 
172 	case 1: if ((x = *s &~ *m) != 0)
173 			sig = ffz(~x) + 1;
174 		break;
175 	}
176 
177 	return sig;
178 }
179 
180 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
181 					 int override_rlimit)
182 {
183 	struct sigqueue *q = NULL;
184 	struct user_struct *user;
185 
186 	/*
187 	 * In order to avoid problems with "switch_user()", we want to make
188 	 * sure that the compiler doesn't re-load "t->user"
189 	 */
190 	user = t->user;
191 	barrier();
192 	atomic_inc(&user->sigpending);
193 	if (override_rlimit ||
194 	    atomic_read(&user->sigpending) <=
195 			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
196 		q = kmem_cache_alloc(sigqueue_cachep, flags);
197 	if (unlikely(q == NULL)) {
198 		atomic_dec(&user->sigpending);
199 	} else {
200 		INIT_LIST_HEAD(&q->list);
201 		q->flags = 0;
202 		q->user = get_uid(user);
203 	}
204 	return(q);
205 }
206 
207 static void __sigqueue_free(struct sigqueue *q)
208 {
209 	if (q->flags & SIGQUEUE_PREALLOC)
210 		return;
211 	atomic_dec(&q->user->sigpending);
212 	free_uid(q->user);
213 	kmem_cache_free(sigqueue_cachep, q);
214 }
215 
216 void flush_sigqueue(struct sigpending *queue)
217 {
218 	struct sigqueue *q;
219 
220 	sigemptyset(&queue->signal);
221 	while (!list_empty(&queue->list)) {
222 		q = list_entry(queue->list.next, struct sigqueue , list);
223 		list_del_init(&q->list);
224 		__sigqueue_free(q);
225 	}
226 }
227 
228 /*
229  * Flush all pending signals for a task.
230  */
231 void flush_signals(struct task_struct *t)
232 {
233 	unsigned long flags;
234 
235 	spin_lock_irqsave(&t->sighand->siglock, flags);
236 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
237 	flush_sigqueue(&t->pending);
238 	flush_sigqueue(&t->signal->shared_pending);
239 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
240 }
241 
242 static void __flush_itimer_signals(struct sigpending *pending)
243 {
244 	sigset_t signal, retain;
245 	struct sigqueue *q, *n;
246 
247 	signal = pending->signal;
248 	sigemptyset(&retain);
249 
250 	list_for_each_entry_safe(q, n, &pending->list, list) {
251 		int sig = q->info.si_signo;
252 
253 		if (likely(q->info.si_code != SI_TIMER)) {
254 			sigaddset(&retain, sig);
255 		} else {
256 			sigdelset(&signal, sig);
257 			list_del_init(&q->list);
258 			__sigqueue_free(q);
259 		}
260 	}
261 
262 	sigorsets(&pending->signal, &signal, &retain);
263 }
264 
265 void flush_itimer_signals(void)
266 {
267 	struct task_struct *tsk = current;
268 	unsigned long flags;
269 
270 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
271 	__flush_itimer_signals(&tsk->pending);
272 	__flush_itimer_signals(&tsk->signal->shared_pending);
273 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
274 }
275 
276 void ignore_signals(struct task_struct *t)
277 {
278 	int i;
279 
280 	for (i = 0; i < _NSIG; ++i)
281 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
282 
283 	flush_signals(t);
284 }
285 
286 /*
287  * Flush all handlers for a task.
288  */
289 
290 void
291 flush_signal_handlers(struct task_struct *t, int force_default)
292 {
293 	int i;
294 	struct k_sigaction *ka = &t->sighand->action[0];
295 	for (i = _NSIG ; i != 0 ; i--) {
296 		if (force_default || ka->sa.sa_handler != SIG_IGN)
297 			ka->sa.sa_handler = SIG_DFL;
298 		ka->sa.sa_flags = 0;
299 		sigemptyset(&ka->sa.sa_mask);
300 		ka++;
301 	}
302 }
303 
304 int unhandled_signal(struct task_struct *tsk, int sig)
305 {
306 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
307 	if (is_global_init(tsk))
308 		return 1;
309 	if (handler != SIG_IGN && handler != SIG_DFL)
310 		return 0;
311 	return !tracehook_consider_fatal_signal(tsk, sig, handler);
312 }
313 
314 
315 /* Notify the system that a driver wants to block all signals for this
316  * process, and wants to be notified if any signals at all were to be
317  * sent/acted upon.  If the notifier routine returns non-zero, then the
318  * signal will be acted upon after all.  If the notifier routine returns 0,
319  * then then signal will be blocked.  Only one block per process is
320  * allowed.  priv is a pointer to private data that the notifier routine
321  * can use to determine if the signal should be blocked or not.  */
322 
323 void
324 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
325 {
326 	unsigned long flags;
327 
328 	spin_lock_irqsave(&current->sighand->siglock, flags);
329 	current->notifier_mask = mask;
330 	current->notifier_data = priv;
331 	current->notifier = notifier;
332 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
333 }
334 
335 /* Notify the system that blocking has ended. */
336 
337 void
338 unblock_all_signals(void)
339 {
340 	unsigned long flags;
341 
342 	spin_lock_irqsave(&current->sighand->siglock, flags);
343 	current->notifier = NULL;
344 	current->notifier_data = NULL;
345 	recalc_sigpending();
346 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
347 }
348 
349 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
350 {
351 	struct sigqueue *q, *first = NULL;
352 
353 	/*
354 	 * Collect the siginfo appropriate to this signal.  Check if
355 	 * there is another siginfo for the same signal.
356 	*/
357 	list_for_each_entry(q, &list->list, list) {
358 		if (q->info.si_signo == sig) {
359 			if (first)
360 				goto still_pending;
361 			first = q;
362 		}
363 	}
364 
365 	sigdelset(&list->signal, sig);
366 
367 	if (first) {
368 still_pending:
369 		list_del_init(&first->list);
370 		copy_siginfo(info, &first->info);
371 		__sigqueue_free(first);
372 	} else {
373 		/* Ok, it wasn't in the queue.  This must be
374 		   a fast-pathed signal or we must have been
375 		   out of queue space.  So zero out the info.
376 		 */
377 		info->si_signo = sig;
378 		info->si_errno = 0;
379 		info->si_code = 0;
380 		info->si_pid = 0;
381 		info->si_uid = 0;
382 	}
383 }
384 
385 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
386 			siginfo_t *info)
387 {
388 	int sig = next_signal(pending, mask);
389 
390 	if (sig) {
391 		if (current->notifier) {
392 			if (sigismember(current->notifier_mask, sig)) {
393 				if (!(current->notifier)(current->notifier_data)) {
394 					clear_thread_flag(TIF_SIGPENDING);
395 					return 0;
396 				}
397 			}
398 		}
399 
400 		collect_signal(sig, pending, info);
401 	}
402 
403 	return sig;
404 }
405 
406 /*
407  * Dequeue a signal and return the element to the caller, which is
408  * expected to free it.
409  *
410  * All callers have to hold the siglock.
411  */
412 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
413 {
414 	int signr;
415 
416 	/* We only dequeue private signals from ourselves, we don't let
417 	 * signalfd steal them
418 	 */
419 	signr = __dequeue_signal(&tsk->pending, mask, info);
420 	if (!signr) {
421 		signr = __dequeue_signal(&tsk->signal->shared_pending,
422 					 mask, info);
423 		/*
424 		 * itimer signal ?
425 		 *
426 		 * itimers are process shared and we restart periodic
427 		 * itimers in the signal delivery path to prevent DoS
428 		 * attacks in the high resolution timer case. This is
429 		 * compliant with the old way of self restarting
430 		 * itimers, as the SIGALRM is a legacy signal and only
431 		 * queued once. Changing the restart behaviour to
432 		 * restart the timer in the signal dequeue path is
433 		 * reducing the timer noise on heavy loaded !highres
434 		 * systems too.
435 		 */
436 		if (unlikely(signr == SIGALRM)) {
437 			struct hrtimer *tmr = &tsk->signal->real_timer;
438 
439 			if (!hrtimer_is_queued(tmr) &&
440 			    tsk->signal->it_real_incr.tv64 != 0) {
441 				hrtimer_forward(tmr, tmr->base->get_time(),
442 						tsk->signal->it_real_incr);
443 				hrtimer_restart(tmr);
444 			}
445 		}
446 	}
447 
448 	recalc_sigpending();
449 	if (!signr)
450 		return 0;
451 
452 	if (unlikely(sig_kernel_stop(signr))) {
453 		/*
454 		 * Set a marker that we have dequeued a stop signal.  Our
455 		 * caller might release the siglock and then the pending
456 		 * stop signal it is about to process is no longer in the
457 		 * pending bitmasks, but must still be cleared by a SIGCONT
458 		 * (and overruled by a SIGKILL).  So those cases clear this
459 		 * shared flag after we've set it.  Note that this flag may
460 		 * remain set after the signal we return is ignored or
461 		 * handled.  That doesn't matter because its only purpose
462 		 * is to alert stop-signal processing code when another
463 		 * processor has come along and cleared the flag.
464 		 */
465 		tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
466 	}
467 	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
468 		/*
469 		 * Release the siglock to ensure proper locking order
470 		 * of timer locks outside of siglocks.  Note, we leave
471 		 * irqs disabled here, since the posix-timers code is
472 		 * about to disable them again anyway.
473 		 */
474 		spin_unlock(&tsk->sighand->siglock);
475 		do_schedule_next_timer(info);
476 		spin_lock(&tsk->sighand->siglock);
477 	}
478 	return signr;
479 }
480 
481 /*
482  * Tell a process that it has a new active signal..
483  *
484  * NOTE! we rely on the previous spin_lock to
485  * lock interrupts for us! We can only be called with
486  * "siglock" held, and the local interrupt must
487  * have been disabled when that got acquired!
488  *
489  * No need to set need_resched since signal event passing
490  * goes through ->blocked
491  */
492 void signal_wake_up(struct task_struct *t, int resume)
493 {
494 	unsigned int mask;
495 
496 	set_tsk_thread_flag(t, TIF_SIGPENDING);
497 
498 	/*
499 	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
500 	 * case. We don't check t->state here because there is a race with it
501 	 * executing another processor and just now entering stopped state.
502 	 * By using wake_up_state, we ensure the process will wake up and
503 	 * handle its death signal.
504 	 */
505 	mask = TASK_INTERRUPTIBLE;
506 	if (resume)
507 		mask |= TASK_WAKEKILL;
508 	if (!wake_up_state(t, mask))
509 		kick_process(t);
510 }
511 
512 /*
513  * Remove signals in mask from the pending set and queue.
514  * Returns 1 if any signals were found.
515  *
516  * All callers must be holding the siglock.
517  *
518  * This version takes a sigset mask and looks at all signals,
519  * not just those in the first mask word.
520  */
521 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
522 {
523 	struct sigqueue *q, *n;
524 	sigset_t m;
525 
526 	sigandsets(&m, mask, &s->signal);
527 	if (sigisemptyset(&m))
528 		return 0;
529 
530 	signandsets(&s->signal, &s->signal, mask);
531 	list_for_each_entry_safe(q, n, &s->list, list) {
532 		if (sigismember(mask, q->info.si_signo)) {
533 			list_del_init(&q->list);
534 			__sigqueue_free(q);
535 		}
536 	}
537 	return 1;
538 }
539 /*
540  * Remove signals in mask from the pending set and queue.
541  * Returns 1 if any signals were found.
542  *
543  * All callers must be holding the siglock.
544  */
545 static int rm_from_queue(unsigned long mask, struct sigpending *s)
546 {
547 	struct sigqueue *q, *n;
548 
549 	if (!sigtestsetmask(&s->signal, mask))
550 		return 0;
551 
552 	sigdelsetmask(&s->signal, mask);
553 	list_for_each_entry_safe(q, n, &s->list, list) {
554 		if (q->info.si_signo < SIGRTMIN &&
555 		    (mask & sigmask(q->info.si_signo))) {
556 			list_del_init(&q->list);
557 			__sigqueue_free(q);
558 		}
559 	}
560 	return 1;
561 }
562 
563 /*
564  * Bad permissions for sending the signal
565  */
566 static int check_kill_permission(int sig, struct siginfo *info,
567 				 struct task_struct *t)
568 {
569 	struct pid *sid;
570 	int error;
571 
572 	if (!valid_signal(sig))
573 		return -EINVAL;
574 
575 	if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
576 		return 0;
577 
578 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
579 	if (error)
580 		return error;
581 
582 	if ((current->euid ^ t->suid) && (current->euid ^ t->uid) &&
583 	    (current->uid  ^ t->suid) && (current->uid  ^ t->uid) &&
584 	    !capable(CAP_KILL)) {
585 		switch (sig) {
586 		case SIGCONT:
587 			sid = task_session(t);
588 			/*
589 			 * We don't return the error if sid == NULL. The
590 			 * task was unhashed, the caller must notice this.
591 			 */
592 			if (!sid || sid == task_session(current))
593 				break;
594 		default:
595 			return -EPERM;
596 		}
597 	}
598 
599 	return security_task_kill(t, info, sig, 0);
600 }
601 
602 /*
603  * Handle magic process-wide effects of stop/continue signals. Unlike
604  * the signal actions, these happen immediately at signal-generation
605  * time regardless of blocking, ignoring, or handling.  This does the
606  * actual continuing for SIGCONT, but not the actual stopping for stop
607  * signals. The process stop is done as a signal action for SIG_DFL.
608  *
609  * Returns true if the signal should be actually delivered, otherwise
610  * it should be dropped.
611  */
612 static int prepare_signal(int sig, struct task_struct *p)
613 {
614 	struct signal_struct *signal = p->signal;
615 	struct task_struct *t;
616 
617 	if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
618 		/*
619 		 * The process is in the middle of dying, nothing to do.
620 		 */
621 	} else if (sig_kernel_stop(sig)) {
622 		/*
623 		 * This is a stop signal.  Remove SIGCONT from all queues.
624 		 */
625 		rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
626 		t = p;
627 		do {
628 			rm_from_queue(sigmask(SIGCONT), &t->pending);
629 		} while_each_thread(p, t);
630 	} else if (sig == SIGCONT) {
631 		unsigned int why;
632 		/*
633 		 * Remove all stop signals from all queues,
634 		 * and wake all threads.
635 		 */
636 		rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
637 		t = p;
638 		do {
639 			unsigned int state;
640 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
641 			/*
642 			 * If there is a handler for SIGCONT, we must make
643 			 * sure that no thread returns to user mode before
644 			 * we post the signal, in case it was the only
645 			 * thread eligible to run the signal handler--then
646 			 * it must not do anything between resuming and
647 			 * running the handler.  With the TIF_SIGPENDING
648 			 * flag set, the thread will pause and acquire the
649 			 * siglock that we hold now and until we've queued
650 			 * the pending signal.
651 			 *
652 			 * Wake up the stopped thread _after_ setting
653 			 * TIF_SIGPENDING
654 			 */
655 			state = __TASK_STOPPED;
656 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
657 				set_tsk_thread_flag(t, TIF_SIGPENDING);
658 				state |= TASK_INTERRUPTIBLE;
659 			}
660 			wake_up_state(t, state);
661 		} while_each_thread(p, t);
662 
663 		/*
664 		 * Notify the parent with CLD_CONTINUED if we were stopped.
665 		 *
666 		 * If we were in the middle of a group stop, we pretend it
667 		 * was already finished, and then continued. Since SIGCHLD
668 		 * doesn't queue we report only CLD_STOPPED, as if the next
669 		 * CLD_CONTINUED was dropped.
670 		 */
671 		why = 0;
672 		if (signal->flags & SIGNAL_STOP_STOPPED)
673 			why |= SIGNAL_CLD_CONTINUED;
674 		else if (signal->group_stop_count)
675 			why |= SIGNAL_CLD_STOPPED;
676 
677 		if (why) {
678 			/*
679 			 * The first thread which returns from finish_stop()
680 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
681 			 * notify its parent. See get_signal_to_deliver().
682 			 */
683 			signal->flags = why | SIGNAL_STOP_CONTINUED;
684 			signal->group_stop_count = 0;
685 			signal->group_exit_code = 0;
686 		} else {
687 			/*
688 			 * We are not stopped, but there could be a stop
689 			 * signal in the middle of being processed after
690 			 * being removed from the queue.  Clear that too.
691 			 */
692 			signal->flags &= ~SIGNAL_STOP_DEQUEUED;
693 		}
694 	}
695 
696 	return !sig_ignored(p, sig);
697 }
698 
699 /*
700  * Test if P wants to take SIG.  After we've checked all threads with this,
701  * it's equivalent to finding no threads not blocking SIG.  Any threads not
702  * blocking SIG were ruled out because they are not running and already
703  * have pending signals.  Such threads will dequeue from the shared queue
704  * as soon as they're available, so putting the signal on the shared queue
705  * will be equivalent to sending it to one such thread.
706  */
707 static inline int wants_signal(int sig, struct task_struct *p)
708 {
709 	if (sigismember(&p->blocked, sig))
710 		return 0;
711 	if (p->flags & PF_EXITING)
712 		return 0;
713 	if (sig == SIGKILL)
714 		return 1;
715 	if (task_is_stopped_or_traced(p))
716 		return 0;
717 	return task_curr(p) || !signal_pending(p);
718 }
719 
720 static void complete_signal(int sig, struct task_struct *p, int group)
721 {
722 	struct signal_struct *signal = p->signal;
723 	struct task_struct *t;
724 
725 	/*
726 	 * Now find a thread we can wake up to take the signal off the queue.
727 	 *
728 	 * If the main thread wants the signal, it gets first crack.
729 	 * Probably the least surprising to the average bear.
730 	 */
731 	if (wants_signal(sig, p))
732 		t = p;
733 	else if (!group || thread_group_empty(p))
734 		/*
735 		 * There is just one thread and it does not need to be woken.
736 		 * It will dequeue unblocked signals before it runs again.
737 		 */
738 		return;
739 	else {
740 		/*
741 		 * Otherwise try to find a suitable thread.
742 		 */
743 		t = signal->curr_target;
744 		while (!wants_signal(sig, t)) {
745 			t = next_thread(t);
746 			if (t == signal->curr_target)
747 				/*
748 				 * No thread needs to be woken.
749 				 * Any eligible threads will see
750 				 * the signal in the queue soon.
751 				 */
752 				return;
753 		}
754 		signal->curr_target = t;
755 	}
756 
757 	/*
758 	 * Found a killable thread.  If the signal will be fatal,
759 	 * then start taking the whole group down immediately.
760 	 */
761 	if (sig_fatal(p, sig) &&
762 	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
763 	    !sigismember(&t->real_blocked, sig) &&
764 	    (sig == SIGKILL ||
765 	     !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
766 		/*
767 		 * This signal will be fatal to the whole group.
768 		 */
769 		if (!sig_kernel_coredump(sig)) {
770 			/*
771 			 * Start a group exit and wake everybody up.
772 			 * This way we don't have other threads
773 			 * running and doing things after a slower
774 			 * thread has the fatal signal pending.
775 			 */
776 			signal->flags = SIGNAL_GROUP_EXIT;
777 			signal->group_exit_code = sig;
778 			signal->group_stop_count = 0;
779 			t = p;
780 			do {
781 				sigaddset(&t->pending.signal, SIGKILL);
782 				signal_wake_up(t, 1);
783 			} while_each_thread(p, t);
784 			return;
785 		}
786 	}
787 
788 	/*
789 	 * The signal is already in the shared-pending queue.
790 	 * Tell the chosen thread to wake up and dequeue it.
791 	 */
792 	signal_wake_up(t, sig == SIGKILL);
793 	return;
794 }
795 
796 static inline int legacy_queue(struct sigpending *signals, int sig)
797 {
798 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
799 }
800 
801 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
802 			int group)
803 {
804 	struct sigpending *pending;
805 	struct sigqueue *q;
806 
807 	trace_sched_signal_send(sig, t);
808 
809 	assert_spin_locked(&t->sighand->siglock);
810 	if (!prepare_signal(sig, t))
811 		return 0;
812 
813 	pending = group ? &t->signal->shared_pending : &t->pending;
814 	/*
815 	 * Short-circuit ignored signals and support queuing
816 	 * exactly one non-rt signal, so that we can get more
817 	 * detailed information about the cause of the signal.
818 	 */
819 	if (legacy_queue(pending, sig))
820 		return 0;
821 	/*
822 	 * fast-pathed signals for kernel-internal things like SIGSTOP
823 	 * or SIGKILL.
824 	 */
825 	if (info == SEND_SIG_FORCED)
826 		goto out_set;
827 
828 	/* Real-time signals must be queued if sent by sigqueue, or
829 	   some other real-time mechanism.  It is implementation
830 	   defined whether kill() does so.  We attempt to do so, on
831 	   the principle of least surprise, but since kill is not
832 	   allowed to fail with EAGAIN when low on memory we just
833 	   make sure at least one signal gets delivered and don't
834 	   pass on the info struct.  */
835 
836 	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
837 					     (is_si_special(info) ||
838 					      info->si_code >= 0)));
839 	if (q) {
840 		list_add_tail(&q->list, &pending->list);
841 		switch ((unsigned long) info) {
842 		case (unsigned long) SEND_SIG_NOINFO:
843 			q->info.si_signo = sig;
844 			q->info.si_errno = 0;
845 			q->info.si_code = SI_USER;
846 			q->info.si_pid = task_pid_vnr(current);
847 			q->info.si_uid = current->uid;
848 			break;
849 		case (unsigned long) SEND_SIG_PRIV:
850 			q->info.si_signo = sig;
851 			q->info.si_errno = 0;
852 			q->info.si_code = SI_KERNEL;
853 			q->info.si_pid = 0;
854 			q->info.si_uid = 0;
855 			break;
856 		default:
857 			copy_siginfo(&q->info, info);
858 			break;
859 		}
860 	} else if (!is_si_special(info)) {
861 		if (sig >= SIGRTMIN && info->si_code != SI_USER)
862 		/*
863 		 * Queue overflow, abort.  We may abort if the signal was rt
864 		 * and sent by user using something other than kill().
865 		 */
866 			return -EAGAIN;
867 	}
868 
869 out_set:
870 	signalfd_notify(t, sig);
871 	sigaddset(&pending->signal, sig);
872 	complete_signal(sig, t, group);
873 	return 0;
874 }
875 
876 int print_fatal_signals;
877 
878 static void print_fatal_signal(struct pt_regs *regs, int signr)
879 {
880 	printk("%s/%d: potentially unexpected fatal signal %d.\n",
881 		current->comm, task_pid_nr(current), signr);
882 
883 #if defined(__i386__) && !defined(__arch_um__)
884 	printk("code at %08lx: ", regs->ip);
885 	{
886 		int i;
887 		for (i = 0; i < 16; i++) {
888 			unsigned char insn;
889 
890 			__get_user(insn, (unsigned char *)(regs->ip + i));
891 			printk("%02x ", insn);
892 		}
893 	}
894 #endif
895 	printk("\n");
896 	show_regs(regs);
897 }
898 
899 static int __init setup_print_fatal_signals(char *str)
900 {
901 	get_option (&str, &print_fatal_signals);
902 
903 	return 1;
904 }
905 
906 __setup("print-fatal-signals=", setup_print_fatal_signals);
907 
908 int
909 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
910 {
911 	return send_signal(sig, info, p, 1);
912 }
913 
914 static int
915 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
916 {
917 	return send_signal(sig, info, t, 0);
918 }
919 
920 /*
921  * Force a signal that the process can't ignore: if necessary
922  * we unblock the signal and change any SIG_IGN to SIG_DFL.
923  *
924  * Note: If we unblock the signal, we always reset it to SIG_DFL,
925  * since we do not want to have a signal handler that was blocked
926  * be invoked when user space had explicitly blocked it.
927  *
928  * We don't want to have recursive SIGSEGV's etc, for example,
929  * that is why we also clear SIGNAL_UNKILLABLE.
930  */
931 int
932 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
933 {
934 	unsigned long int flags;
935 	int ret, blocked, ignored;
936 	struct k_sigaction *action;
937 
938 	spin_lock_irqsave(&t->sighand->siglock, flags);
939 	action = &t->sighand->action[sig-1];
940 	ignored = action->sa.sa_handler == SIG_IGN;
941 	blocked = sigismember(&t->blocked, sig);
942 	if (blocked || ignored) {
943 		action->sa.sa_handler = SIG_DFL;
944 		if (blocked) {
945 			sigdelset(&t->blocked, sig);
946 			recalc_sigpending_and_wake(t);
947 		}
948 	}
949 	if (action->sa.sa_handler == SIG_DFL)
950 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
951 	ret = specific_send_sig_info(sig, info, t);
952 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
953 
954 	return ret;
955 }
956 
957 void
958 force_sig_specific(int sig, struct task_struct *t)
959 {
960 	force_sig_info(sig, SEND_SIG_FORCED, t);
961 }
962 
963 /*
964  * Nuke all other threads in the group.
965  */
966 void zap_other_threads(struct task_struct *p)
967 {
968 	struct task_struct *t;
969 
970 	p->signal->group_stop_count = 0;
971 
972 	for (t = next_thread(p); t != p; t = next_thread(t)) {
973 		/*
974 		 * Don't bother with already dead threads
975 		 */
976 		if (t->exit_state)
977 			continue;
978 
979 		/* SIGKILL will be handled before any pending SIGSTOP */
980 		sigaddset(&t->pending.signal, SIGKILL);
981 		signal_wake_up(t, 1);
982 	}
983 }
984 
985 int __fatal_signal_pending(struct task_struct *tsk)
986 {
987 	return sigismember(&tsk->pending.signal, SIGKILL);
988 }
989 EXPORT_SYMBOL(__fatal_signal_pending);
990 
991 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
992 {
993 	struct sighand_struct *sighand;
994 
995 	rcu_read_lock();
996 	for (;;) {
997 		sighand = rcu_dereference(tsk->sighand);
998 		if (unlikely(sighand == NULL))
999 			break;
1000 
1001 		spin_lock_irqsave(&sighand->siglock, *flags);
1002 		if (likely(sighand == tsk->sighand))
1003 			break;
1004 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1005 	}
1006 	rcu_read_unlock();
1007 
1008 	return sighand;
1009 }
1010 
1011 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1012 {
1013 	unsigned long flags;
1014 	int ret;
1015 
1016 	ret = check_kill_permission(sig, info, p);
1017 
1018 	if (!ret && sig) {
1019 		ret = -ESRCH;
1020 		if (lock_task_sighand(p, &flags)) {
1021 			ret = __group_send_sig_info(sig, info, p);
1022 			unlock_task_sighand(p, &flags);
1023 		}
1024 	}
1025 
1026 	return ret;
1027 }
1028 
1029 /*
1030  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1031  * control characters do (^C, ^Z etc)
1032  */
1033 
1034 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1035 {
1036 	struct task_struct *p = NULL;
1037 	int retval, success;
1038 
1039 	success = 0;
1040 	retval = -ESRCH;
1041 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1042 		int err = group_send_sig_info(sig, info, p);
1043 		success |= !err;
1044 		retval = err;
1045 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1046 	return success ? 0 : retval;
1047 }
1048 
1049 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1050 {
1051 	int error = -ESRCH;
1052 	struct task_struct *p;
1053 
1054 	rcu_read_lock();
1055 retry:
1056 	p = pid_task(pid, PIDTYPE_PID);
1057 	if (p) {
1058 		error = group_send_sig_info(sig, info, p);
1059 		if (unlikely(error == -ESRCH))
1060 			/*
1061 			 * The task was unhashed in between, try again.
1062 			 * If it is dead, pid_task() will return NULL,
1063 			 * if we race with de_thread() it will find the
1064 			 * new leader.
1065 			 */
1066 			goto retry;
1067 	}
1068 	rcu_read_unlock();
1069 
1070 	return error;
1071 }
1072 
1073 int
1074 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1075 {
1076 	int error;
1077 	rcu_read_lock();
1078 	error = kill_pid_info(sig, info, find_vpid(pid));
1079 	rcu_read_unlock();
1080 	return error;
1081 }
1082 
1083 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1084 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1085 		      uid_t uid, uid_t euid, u32 secid)
1086 {
1087 	int ret = -EINVAL;
1088 	struct task_struct *p;
1089 
1090 	if (!valid_signal(sig))
1091 		return ret;
1092 
1093 	read_lock(&tasklist_lock);
1094 	p = pid_task(pid, PIDTYPE_PID);
1095 	if (!p) {
1096 		ret = -ESRCH;
1097 		goto out_unlock;
1098 	}
1099 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1100 	    && (euid != p->suid) && (euid != p->uid)
1101 	    && (uid != p->suid) && (uid != p->uid)) {
1102 		ret = -EPERM;
1103 		goto out_unlock;
1104 	}
1105 	ret = security_task_kill(p, info, sig, secid);
1106 	if (ret)
1107 		goto out_unlock;
1108 	if (sig && p->sighand) {
1109 		unsigned long flags;
1110 		spin_lock_irqsave(&p->sighand->siglock, flags);
1111 		ret = __group_send_sig_info(sig, info, p);
1112 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1113 	}
1114 out_unlock:
1115 	read_unlock(&tasklist_lock);
1116 	return ret;
1117 }
1118 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1119 
1120 /*
1121  * kill_something_info() interprets pid in interesting ways just like kill(2).
1122  *
1123  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1124  * is probably wrong.  Should make it like BSD or SYSV.
1125  */
1126 
1127 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1128 {
1129 	int ret;
1130 
1131 	if (pid > 0) {
1132 		rcu_read_lock();
1133 		ret = kill_pid_info(sig, info, find_vpid(pid));
1134 		rcu_read_unlock();
1135 		return ret;
1136 	}
1137 
1138 	read_lock(&tasklist_lock);
1139 	if (pid != -1) {
1140 		ret = __kill_pgrp_info(sig, info,
1141 				pid ? find_vpid(-pid) : task_pgrp(current));
1142 	} else {
1143 		int retval = 0, count = 0;
1144 		struct task_struct * p;
1145 
1146 		for_each_process(p) {
1147 			if (p->pid > 1 && !same_thread_group(p, current)) {
1148 				int err = group_send_sig_info(sig, info, p);
1149 				++count;
1150 				if (err != -EPERM)
1151 					retval = err;
1152 			}
1153 		}
1154 		ret = count ? retval : -ESRCH;
1155 	}
1156 	read_unlock(&tasklist_lock);
1157 
1158 	return ret;
1159 }
1160 
1161 /*
1162  * These are for backward compatibility with the rest of the kernel source.
1163  */
1164 
1165 /*
1166  * The caller must ensure the task can't exit.
1167  */
1168 int
1169 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1170 {
1171 	int ret;
1172 	unsigned long flags;
1173 
1174 	/*
1175 	 * Make sure legacy kernel users don't send in bad values
1176 	 * (normal paths check this in check_kill_permission).
1177 	 */
1178 	if (!valid_signal(sig))
1179 		return -EINVAL;
1180 
1181 	spin_lock_irqsave(&p->sighand->siglock, flags);
1182 	ret = specific_send_sig_info(sig, info, p);
1183 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1184 	return ret;
1185 }
1186 
1187 #define __si_special(priv) \
1188 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1189 
1190 int
1191 send_sig(int sig, struct task_struct *p, int priv)
1192 {
1193 	return send_sig_info(sig, __si_special(priv), p);
1194 }
1195 
1196 void
1197 force_sig(int sig, struct task_struct *p)
1198 {
1199 	force_sig_info(sig, SEND_SIG_PRIV, p);
1200 }
1201 
1202 /*
1203  * When things go south during signal handling, we
1204  * will force a SIGSEGV. And if the signal that caused
1205  * the problem was already a SIGSEGV, we'll want to
1206  * make sure we don't even try to deliver the signal..
1207  */
1208 int
1209 force_sigsegv(int sig, struct task_struct *p)
1210 {
1211 	if (sig == SIGSEGV) {
1212 		unsigned long flags;
1213 		spin_lock_irqsave(&p->sighand->siglock, flags);
1214 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1215 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1216 	}
1217 	force_sig(SIGSEGV, p);
1218 	return 0;
1219 }
1220 
1221 int kill_pgrp(struct pid *pid, int sig, int priv)
1222 {
1223 	int ret;
1224 
1225 	read_lock(&tasklist_lock);
1226 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1227 	read_unlock(&tasklist_lock);
1228 
1229 	return ret;
1230 }
1231 EXPORT_SYMBOL(kill_pgrp);
1232 
1233 int kill_pid(struct pid *pid, int sig, int priv)
1234 {
1235 	return kill_pid_info(sig, __si_special(priv), pid);
1236 }
1237 EXPORT_SYMBOL(kill_pid);
1238 
1239 /*
1240  * These functions support sending signals using preallocated sigqueue
1241  * structures.  This is needed "because realtime applications cannot
1242  * afford to lose notifications of asynchronous events, like timer
1243  * expirations or I/O completions".  In the case of Posix Timers
1244  * we allocate the sigqueue structure from the timer_create.  If this
1245  * allocation fails we are able to report the failure to the application
1246  * with an EAGAIN error.
1247  */
1248 
1249 struct sigqueue *sigqueue_alloc(void)
1250 {
1251 	struct sigqueue *q;
1252 
1253 	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1254 		q->flags |= SIGQUEUE_PREALLOC;
1255 	return(q);
1256 }
1257 
1258 void sigqueue_free(struct sigqueue *q)
1259 {
1260 	unsigned long flags;
1261 	spinlock_t *lock = &current->sighand->siglock;
1262 
1263 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1264 	/*
1265 	 * We must hold ->siglock while testing q->list
1266 	 * to serialize with collect_signal() or with
1267 	 * __exit_signal()->flush_sigqueue().
1268 	 */
1269 	spin_lock_irqsave(lock, flags);
1270 	q->flags &= ~SIGQUEUE_PREALLOC;
1271 	/*
1272 	 * If it is queued it will be freed when dequeued,
1273 	 * like the "regular" sigqueue.
1274 	 */
1275 	if (!list_empty(&q->list))
1276 		q = NULL;
1277 	spin_unlock_irqrestore(lock, flags);
1278 
1279 	if (q)
1280 		__sigqueue_free(q);
1281 }
1282 
1283 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1284 {
1285 	int sig = q->info.si_signo;
1286 	struct sigpending *pending;
1287 	unsigned long flags;
1288 	int ret;
1289 
1290 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1291 
1292 	ret = -1;
1293 	if (!likely(lock_task_sighand(t, &flags)))
1294 		goto ret;
1295 
1296 	ret = 1; /* the signal is ignored */
1297 	if (!prepare_signal(sig, t))
1298 		goto out;
1299 
1300 	ret = 0;
1301 	if (unlikely(!list_empty(&q->list))) {
1302 		/*
1303 		 * If an SI_TIMER entry is already queue just increment
1304 		 * the overrun count.
1305 		 */
1306 		BUG_ON(q->info.si_code != SI_TIMER);
1307 		q->info.si_overrun++;
1308 		goto out;
1309 	}
1310 	q->info.si_overrun = 0;
1311 
1312 	signalfd_notify(t, sig);
1313 	pending = group ? &t->signal->shared_pending : &t->pending;
1314 	list_add_tail(&q->list, &pending->list);
1315 	sigaddset(&pending->signal, sig);
1316 	complete_signal(sig, t, group);
1317 out:
1318 	unlock_task_sighand(t, &flags);
1319 ret:
1320 	return ret;
1321 }
1322 
1323 /*
1324  * Wake up any threads in the parent blocked in wait* syscalls.
1325  */
1326 static inline void __wake_up_parent(struct task_struct *p,
1327 				    struct task_struct *parent)
1328 {
1329 	wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1330 }
1331 
1332 /*
1333  * Let a parent know about the death of a child.
1334  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1335  *
1336  * Returns -1 if our parent ignored us and so we've switched to
1337  * self-reaping, or else @sig.
1338  */
1339 int do_notify_parent(struct task_struct *tsk, int sig)
1340 {
1341 	struct siginfo info;
1342 	unsigned long flags;
1343 	struct sighand_struct *psig;
1344 	struct task_cputime cputime;
1345 	int ret = sig;
1346 
1347 	BUG_ON(sig == -1);
1348 
1349  	/* do_notify_parent_cldstop should have been called instead.  */
1350  	BUG_ON(task_is_stopped_or_traced(tsk));
1351 
1352 	BUG_ON(!tsk->ptrace &&
1353 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1354 
1355 	info.si_signo = sig;
1356 	info.si_errno = 0;
1357 	/*
1358 	 * we are under tasklist_lock here so our parent is tied to
1359 	 * us and cannot exit and release its namespace.
1360 	 *
1361 	 * the only it can is to switch its nsproxy with sys_unshare,
1362 	 * bu uncharing pid namespaces is not allowed, so we'll always
1363 	 * see relevant namespace
1364 	 *
1365 	 * write_lock() currently calls preempt_disable() which is the
1366 	 * same as rcu_read_lock(), but according to Oleg, this is not
1367 	 * correct to rely on this
1368 	 */
1369 	rcu_read_lock();
1370 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1371 	rcu_read_unlock();
1372 
1373 	info.si_uid = tsk->uid;
1374 
1375 	thread_group_cputime(tsk, &cputime);
1376 	info.si_utime = cputime_to_jiffies(cputime.utime);
1377 	info.si_stime = cputime_to_jiffies(cputime.stime);
1378 
1379 	info.si_status = tsk->exit_code & 0x7f;
1380 	if (tsk->exit_code & 0x80)
1381 		info.si_code = CLD_DUMPED;
1382 	else if (tsk->exit_code & 0x7f)
1383 		info.si_code = CLD_KILLED;
1384 	else {
1385 		info.si_code = CLD_EXITED;
1386 		info.si_status = tsk->exit_code >> 8;
1387 	}
1388 
1389 	psig = tsk->parent->sighand;
1390 	spin_lock_irqsave(&psig->siglock, flags);
1391 	if (!tsk->ptrace && sig == SIGCHLD &&
1392 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1393 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1394 		/*
1395 		 * We are exiting and our parent doesn't care.  POSIX.1
1396 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1397 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1398 		 * automatically and not left for our parent's wait4 call.
1399 		 * Rather than having the parent do it as a magic kind of
1400 		 * signal handler, we just set this to tell do_exit that we
1401 		 * can be cleaned up without becoming a zombie.  Note that
1402 		 * we still call __wake_up_parent in this case, because a
1403 		 * blocked sys_wait4 might now return -ECHILD.
1404 		 *
1405 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1406 		 * is implementation-defined: we do (if you don't want
1407 		 * it, just use SIG_IGN instead).
1408 		 */
1409 		ret = tsk->exit_signal = -1;
1410 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1411 			sig = -1;
1412 	}
1413 	if (valid_signal(sig) && sig > 0)
1414 		__group_send_sig_info(sig, &info, tsk->parent);
1415 	__wake_up_parent(tsk, tsk->parent);
1416 	spin_unlock_irqrestore(&psig->siglock, flags);
1417 
1418 	return ret;
1419 }
1420 
1421 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1422 {
1423 	struct siginfo info;
1424 	unsigned long flags;
1425 	struct task_struct *parent;
1426 	struct sighand_struct *sighand;
1427 
1428 	if (tsk->ptrace & PT_PTRACED)
1429 		parent = tsk->parent;
1430 	else {
1431 		tsk = tsk->group_leader;
1432 		parent = tsk->real_parent;
1433 	}
1434 
1435 	info.si_signo = SIGCHLD;
1436 	info.si_errno = 0;
1437 	/*
1438 	 * see comment in do_notify_parent() abot the following 3 lines
1439 	 */
1440 	rcu_read_lock();
1441 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1442 	rcu_read_unlock();
1443 
1444 	info.si_uid = tsk->uid;
1445 
1446 	info.si_utime = cputime_to_clock_t(tsk->utime);
1447 	info.si_stime = cputime_to_clock_t(tsk->stime);
1448 
1449  	info.si_code = why;
1450  	switch (why) {
1451  	case CLD_CONTINUED:
1452  		info.si_status = SIGCONT;
1453  		break;
1454  	case CLD_STOPPED:
1455  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1456  		break;
1457  	case CLD_TRAPPED:
1458  		info.si_status = tsk->exit_code & 0x7f;
1459  		break;
1460  	default:
1461  		BUG();
1462  	}
1463 
1464 	sighand = parent->sighand;
1465 	spin_lock_irqsave(&sighand->siglock, flags);
1466 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1467 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1468 		__group_send_sig_info(SIGCHLD, &info, parent);
1469 	/*
1470 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1471 	 */
1472 	__wake_up_parent(tsk, parent);
1473 	spin_unlock_irqrestore(&sighand->siglock, flags);
1474 }
1475 
1476 static inline int may_ptrace_stop(void)
1477 {
1478 	if (!likely(current->ptrace & PT_PTRACED))
1479 		return 0;
1480 	/*
1481 	 * Are we in the middle of do_coredump?
1482 	 * If so and our tracer is also part of the coredump stopping
1483 	 * is a deadlock situation, and pointless because our tracer
1484 	 * is dead so don't allow us to stop.
1485 	 * If SIGKILL was already sent before the caller unlocked
1486 	 * ->siglock we must see ->core_state != NULL. Otherwise it
1487 	 * is safe to enter schedule().
1488 	 */
1489 	if (unlikely(current->mm->core_state) &&
1490 	    unlikely(current->mm == current->parent->mm))
1491 		return 0;
1492 
1493 	return 1;
1494 }
1495 
1496 /*
1497  * Return nonzero if there is a SIGKILL that should be waking us up.
1498  * Called with the siglock held.
1499  */
1500 static int sigkill_pending(struct task_struct *tsk)
1501 {
1502 	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1503 		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1504 }
1505 
1506 /*
1507  * This must be called with current->sighand->siglock held.
1508  *
1509  * This should be the path for all ptrace stops.
1510  * We always set current->last_siginfo while stopped here.
1511  * That makes it a way to test a stopped process for
1512  * being ptrace-stopped vs being job-control-stopped.
1513  *
1514  * If we actually decide not to stop at all because the tracer
1515  * is gone, we keep current->exit_code unless clear_code.
1516  */
1517 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1518 {
1519 	if (arch_ptrace_stop_needed(exit_code, info)) {
1520 		/*
1521 		 * The arch code has something special to do before a
1522 		 * ptrace stop.  This is allowed to block, e.g. for faults
1523 		 * on user stack pages.  We can't keep the siglock while
1524 		 * calling arch_ptrace_stop, so we must release it now.
1525 		 * To preserve proper semantics, we must do this before
1526 		 * any signal bookkeeping like checking group_stop_count.
1527 		 * Meanwhile, a SIGKILL could come in before we retake the
1528 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1529 		 * So after regaining the lock, we must check for SIGKILL.
1530 		 */
1531 		spin_unlock_irq(&current->sighand->siglock);
1532 		arch_ptrace_stop(exit_code, info);
1533 		spin_lock_irq(&current->sighand->siglock);
1534 		if (sigkill_pending(current))
1535 			return;
1536 	}
1537 
1538 	/*
1539 	 * If there is a group stop in progress,
1540 	 * we must participate in the bookkeeping.
1541 	 */
1542 	if (current->signal->group_stop_count > 0)
1543 		--current->signal->group_stop_count;
1544 
1545 	current->last_siginfo = info;
1546 	current->exit_code = exit_code;
1547 
1548 	/* Let the debugger run.  */
1549 	__set_current_state(TASK_TRACED);
1550 	spin_unlock_irq(&current->sighand->siglock);
1551 	read_lock(&tasklist_lock);
1552 	if (may_ptrace_stop()) {
1553 		do_notify_parent_cldstop(current, CLD_TRAPPED);
1554 		read_unlock(&tasklist_lock);
1555 		schedule();
1556 	} else {
1557 		/*
1558 		 * By the time we got the lock, our tracer went away.
1559 		 * Don't drop the lock yet, another tracer may come.
1560 		 */
1561 		__set_current_state(TASK_RUNNING);
1562 		if (clear_code)
1563 			current->exit_code = 0;
1564 		read_unlock(&tasklist_lock);
1565 	}
1566 
1567 	/*
1568 	 * While in TASK_TRACED, we were considered "frozen enough".
1569 	 * Now that we woke up, it's crucial if we're supposed to be
1570 	 * frozen that we freeze now before running anything substantial.
1571 	 */
1572 	try_to_freeze();
1573 
1574 	/*
1575 	 * We are back.  Now reacquire the siglock before touching
1576 	 * last_siginfo, so that we are sure to have synchronized with
1577 	 * any signal-sending on another CPU that wants to examine it.
1578 	 */
1579 	spin_lock_irq(&current->sighand->siglock);
1580 	current->last_siginfo = NULL;
1581 
1582 	/*
1583 	 * Queued signals ignored us while we were stopped for tracing.
1584 	 * So check for any that we should take before resuming user mode.
1585 	 * This sets TIF_SIGPENDING, but never clears it.
1586 	 */
1587 	recalc_sigpending_tsk(current);
1588 }
1589 
1590 void ptrace_notify(int exit_code)
1591 {
1592 	siginfo_t info;
1593 
1594 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1595 
1596 	memset(&info, 0, sizeof info);
1597 	info.si_signo = SIGTRAP;
1598 	info.si_code = exit_code;
1599 	info.si_pid = task_pid_vnr(current);
1600 	info.si_uid = current->uid;
1601 
1602 	/* Let the debugger run.  */
1603 	spin_lock_irq(&current->sighand->siglock);
1604 	ptrace_stop(exit_code, 1, &info);
1605 	spin_unlock_irq(&current->sighand->siglock);
1606 }
1607 
1608 static void
1609 finish_stop(int stop_count)
1610 {
1611 	/*
1612 	 * If there are no other threads in the group, or if there is
1613 	 * a group stop in progress and we are the last to stop,
1614 	 * report to the parent.  When ptraced, every thread reports itself.
1615 	 */
1616 	if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1617 		read_lock(&tasklist_lock);
1618 		do_notify_parent_cldstop(current, CLD_STOPPED);
1619 		read_unlock(&tasklist_lock);
1620 	}
1621 
1622 	do {
1623 		schedule();
1624 	} while (try_to_freeze());
1625 	/*
1626 	 * Now we don't run again until continued.
1627 	 */
1628 	current->exit_code = 0;
1629 }
1630 
1631 /*
1632  * This performs the stopping for SIGSTOP and other stop signals.
1633  * We have to stop all threads in the thread group.
1634  * Returns nonzero if we've actually stopped and released the siglock.
1635  * Returns zero if we didn't stop and still hold the siglock.
1636  */
1637 static int do_signal_stop(int signr)
1638 {
1639 	struct signal_struct *sig = current->signal;
1640 	int stop_count;
1641 
1642 	if (sig->group_stop_count > 0) {
1643 		/*
1644 		 * There is a group stop in progress.  We don't need to
1645 		 * start another one.
1646 		 */
1647 		stop_count = --sig->group_stop_count;
1648 	} else {
1649 		struct task_struct *t;
1650 
1651 		if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1652 		    unlikely(signal_group_exit(sig)))
1653 			return 0;
1654 		/*
1655 		 * There is no group stop already in progress.
1656 		 * We must initiate one now.
1657 		 */
1658 		sig->group_exit_code = signr;
1659 
1660 		stop_count = 0;
1661 		for (t = next_thread(current); t != current; t = next_thread(t))
1662 			/*
1663 			 * Setting state to TASK_STOPPED for a group
1664 			 * stop is always done with the siglock held,
1665 			 * so this check has no races.
1666 			 */
1667 			if (!(t->flags & PF_EXITING) &&
1668 			    !task_is_stopped_or_traced(t)) {
1669 				stop_count++;
1670 				signal_wake_up(t, 0);
1671 			}
1672 		sig->group_stop_count = stop_count;
1673 	}
1674 
1675 	if (stop_count == 0)
1676 		sig->flags = SIGNAL_STOP_STOPPED;
1677 	current->exit_code = sig->group_exit_code;
1678 	__set_current_state(TASK_STOPPED);
1679 
1680 	spin_unlock_irq(&current->sighand->siglock);
1681 	finish_stop(stop_count);
1682 	return 1;
1683 }
1684 
1685 static int ptrace_signal(int signr, siginfo_t *info,
1686 			 struct pt_regs *regs, void *cookie)
1687 {
1688 	if (!(current->ptrace & PT_PTRACED))
1689 		return signr;
1690 
1691 	ptrace_signal_deliver(regs, cookie);
1692 
1693 	/* Let the debugger run.  */
1694 	ptrace_stop(signr, 0, info);
1695 
1696 	/* We're back.  Did the debugger cancel the sig?  */
1697 	signr = current->exit_code;
1698 	if (signr == 0)
1699 		return signr;
1700 
1701 	current->exit_code = 0;
1702 
1703 	/* Update the siginfo structure if the signal has
1704 	   changed.  If the debugger wanted something
1705 	   specific in the siginfo structure then it should
1706 	   have updated *info via PTRACE_SETSIGINFO.  */
1707 	if (signr != info->si_signo) {
1708 		info->si_signo = signr;
1709 		info->si_errno = 0;
1710 		info->si_code = SI_USER;
1711 		info->si_pid = task_pid_vnr(current->parent);
1712 		info->si_uid = current->parent->uid;
1713 	}
1714 
1715 	/* If the (new) signal is now blocked, requeue it.  */
1716 	if (sigismember(&current->blocked, signr)) {
1717 		specific_send_sig_info(signr, info, current);
1718 		signr = 0;
1719 	}
1720 
1721 	return signr;
1722 }
1723 
1724 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1725 			  struct pt_regs *regs, void *cookie)
1726 {
1727 	struct sighand_struct *sighand = current->sighand;
1728 	struct signal_struct *signal = current->signal;
1729 	int signr;
1730 
1731 relock:
1732 	/*
1733 	 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1734 	 * While in TASK_STOPPED, we were considered "frozen enough".
1735 	 * Now that we woke up, it's crucial if we're supposed to be
1736 	 * frozen that we freeze now before running anything substantial.
1737 	 */
1738 	try_to_freeze();
1739 
1740 	spin_lock_irq(&sighand->siglock);
1741 	/*
1742 	 * Every stopped thread goes here after wakeup. Check to see if
1743 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
1744 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1745 	 */
1746 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1747 		int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1748 				? CLD_CONTINUED : CLD_STOPPED;
1749 		signal->flags &= ~SIGNAL_CLD_MASK;
1750 		spin_unlock_irq(&sighand->siglock);
1751 
1752 		if (unlikely(!tracehook_notify_jctl(1, why)))
1753 			goto relock;
1754 
1755 		read_lock(&tasklist_lock);
1756 		do_notify_parent_cldstop(current->group_leader, why);
1757 		read_unlock(&tasklist_lock);
1758 		goto relock;
1759 	}
1760 
1761 	for (;;) {
1762 		struct k_sigaction *ka;
1763 
1764 		if (unlikely(signal->group_stop_count > 0) &&
1765 		    do_signal_stop(0))
1766 			goto relock;
1767 
1768 		/*
1769 		 * Tracing can induce an artifical signal and choose sigaction.
1770 		 * The return value in @signr determines the default action,
1771 		 * but @info->si_signo is the signal number we will report.
1772 		 */
1773 		signr = tracehook_get_signal(current, regs, info, return_ka);
1774 		if (unlikely(signr < 0))
1775 			goto relock;
1776 		if (unlikely(signr != 0))
1777 			ka = return_ka;
1778 		else {
1779 			signr = dequeue_signal(current, &current->blocked,
1780 					       info);
1781 
1782 			if (!signr)
1783 				break; /* will return 0 */
1784 
1785 			if (signr != SIGKILL) {
1786 				signr = ptrace_signal(signr, info,
1787 						      regs, cookie);
1788 				if (!signr)
1789 					continue;
1790 			}
1791 
1792 			ka = &sighand->action[signr-1];
1793 		}
1794 
1795 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1796 			continue;
1797 		if (ka->sa.sa_handler != SIG_DFL) {
1798 			/* Run the handler.  */
1799 			*return_ka = *ka;
1800 
1801 			if (ka->sa.sa_flags & SA_ONESHOT)
1802 				ka->sa.sa_handler = SIG_DFL;
1803 
1804 			break; /* will return non-zero "signr" value */
1805 		}
1806 
1807 		/*
1808 		 * Now we are doing the default action for this signal.
1809 		 */
1810 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1811 			continue;
1812 
1813 		/*
1814 		 * Global init gets no signals it doesn't want.
1815 		 */
1816 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1817 		    !signal_group_exit(signal))
1818 			continue;
1819 
1820 		if (sig_kernel_stop(signr)) {
1821 			/*
1822 			 * The default action is to stop all threads in
1823 			 * the thread group.  The job control signals
1824 			 * do nothing in an orphaned pgrp, but SIGSTOP
1825 			 * always works.  Note that siglock needs to be
1826 			 * dropped during the call to is_orphaned_pgrp()
1827 			 * because of lock ordering with tasklist_lock.
1828 			 * This allows an intervening SIGCONT to be posted.
1829 			 * We need to check for that and bail out if necessary.
1830 			 */
1831 			if (signr != SIGSTOP) {
1832 				spin_unlock_irq(&sighand->siglock);
1833 
1834 				/* signals can be posted during this window */
1835 
1836 				if (is_current_pgrp_orphaned())
1837 					goto relock;
1838 
1839 				spin_lock_irq(&sighand->siglock);
1840 			}
1841 
1842 			if (likely(do_signal_stop(info->si_signo))) {
1843 				/* It released the siglock.  */
1844 				goto relock;
1845 			}
1846 
1847 			/*
1848 			 * We didn't actually stop, due to a race
1849 			 * with SIGCONT or something like that.
1850 			 */
1851 			continue;
1852 		}
1853 
1854 		spin_unlock_irq(&sighand->siglock);
1855 
1856 		/*
1857 		 * Anything else is fatal, maybe with a core dump.
1858 		 */
1859 		current->flags |= PF_SIGNALED;
1860 
1861 		if (sig_kernel_coredump(signr)) {
1862 			if (print_fatal_signals)
1863 				print_fatal_signal(regs, info->si_signo);
1864 			/*
1865 			 * If it was able to dump core, this kills all
1866 			 * other threads in the group and synchronizes with
1867 			 * their demise.  If we lost the race with another
1868 			 * thread getting here, it set group_exit_code
1869 			 * first and our do_group_exit call below will use
1870 			 * that value and ignore the one we pass it.
1871 			 */
1872 			do_coredump(info->si_signo, info->si_signo, regs);
1873 		}
1874 
1875 		/*
1876 		 * Death signals, no core dump.
1877 		 */
1878 		do_group_exit(info->si_signo);
1879 		/* NOTREACHED */
1880 	}
1881 	spin_unlock_irq(&sighand->siglock);
1882 	return signr;
1883 }
1884 
1885 void exit_signals(struct task_struct *tsk)
1886 {
1887 	int group_stop = 0;
1888 	struct task_struct *t;
1889 
1890 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1891 		tsk->flags |= PF_EXITING;
1892 		return;
1893 	}
1894 
1895 	spin_lock_irq(&tsk->sighand->siglock);
1896 	/*
1897 	 * From now this task is not visible for group-wide signals,
1898 	 * see wants_signal(), do_signal_stop().
1899 	 */
1900 	tsk->flags |= PF_EXITING;
1901 	if (!signal_pending(tsk))
1902 		goto out;
1903 
1904 	/* It could be that __group_complete_signal() choose us to
1905 	 * notify about group-wide signal. Another thread should be
1906 	 * woken now to take the signal since we will not.
1907 	 */
1908 	for (t = tsk; (t = next_thread(t)) != tsk; )
1909 		if (!signal_pending(t) && !(t->flags & PF_EXITING))
1910 			recalc_sigpending_and_wake(t);
1911 
1912 	if (unlikely(tsk->signal->group_stop_count) &&
1913 			!--tsk->signal->group_stop_count) {
1914 		tsk->signal->flags = SIGNAL_STOP_STOPPED;
1915 		group_stop = 1;
1916 	}
1917 out:
1918 	spin_unlock_irq(&tsk->sighand->siglock);
1919 
1920 	if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
1921 		read_lock(&tasklist_lock);
1922 		do_notify_parent_cldstop(tsk, CLD_STOPPED);
1923 		read_unlock(&tasklist_lock);
1924 	}
1925 }
1926 
1927 EXPORT_SYMBOL(recalc_sigpending);
1928 EXPORT_SYMBOL_GPL(dequeue_signal);
1929 EXPORT_SYMBOL(flush_signals);
1930 EXPORT_SYMBOL(force_sig);
1931 EXPORT_SYMBOL(send_sig);
1932 EXPORT_SYMBOL(send_sig_info);
1933 EXPORT_SYMBOL(sigprocmask);
1934 EXPORT_SYMBOL(block_all_signals);
1935 EXPORT_SYMBOL(unblock_all_signals);
1936 
1937 
1938 /*
1939  * System call entry points.
1940  */
1941 
1942 asmlinkage long sys_restart_syscall(void)
1943 {
1944 	struct restart_block *restart = &current_thread_info()->restart_block;
1945 	return restart->fn(restart);
1946 }
1947 
1948 long do_no_restart_syscall(struct restart_block *param)
1949 {
1950 	return -EINTR;
1951 }
1952 
1953 /*
1954  * We don't need to get the kernel lock - this is all local to this
1955  * particular thread.. (and that's good, because this is _heavily_
1956  * used by various programs)
1957  */
1958 
1959 /*
1960  * This is also useful for kernel threads that want to temporarily
1961  * (or permanently) block certain signals.
1962  *
1963  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1964  * interface happily blocks "unblockable" signals like SIGKILL
1965  * and friends.
1966  */
1967 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1968 {
1969 	int error;
1970 
1971 	spin_lock_irq(&current->sighand->siglock);
1972 	if (oldset)
1973 		*oldset = current->blocked;
1974 
1975 	error = 0;
1976 	switch (how) {
1977 	case SIG_BLOCK:
1978 		sigorsets(&current->blocked, &current->blocked, set);
1979 		break;
1980 	case SIG_UNBLOCK:
1981 		signandsets(&current->blocked, &current->blocked, set);
1982 		break;
1983 	case SIG_SETMASK:
1984 		current->blocked = *set;
1985 		break;
1986 	default:
1987 		error = -EINVAL;
1988 	}
1989 	recalc_sigpending();
1990 	spin_unlock_irq(&current->sighand->siglock);
1991 
1992 	return error;
1993 }
1994 
1995 asmlinkage long
1996 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1997 {
1998 	int error = -EINVAL;
1999 	sigset_t old_set, new_set;
2000 
2001 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2002 	if (sigsetsize != sizeof(sigset_t))
2003 		goto out;
2004 
2005 	if (set) {
2006 		error = -EFAULT;
2007 		if (copy_from_user(&new_set, set, sizeof(*set)))
2008 			goto out;
2009 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2010 
2011 		error = sigprocmask(how, &new_set, &old_set);
2012 		if (error)
2013 			goto out;
2014 		if (oset)
2015 			goto set_old;
2016 	} else if (oset) {
2017 		spin_lock_irq(&current->sighand->siglock);
2018 		old_set = current->blocked;
2019 		spin_unlock_irq(&current->sighand->siglock);
2020 
2021 	set_old:
2022 		error = -EFAULT;
2023 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2024 			goto out;
2025 	}
2026 	error = 0;
2027 out:
2028 	return error;
2029 }
2030 
2031 long do_sigpending(void __user *set, unsigned long sigsetsize)
2032 {
2033 	long error = -EINVAL;
2034 	sigset_t pending;
2035 
2036 	if (sigsetsize > sizeof(sigset_t))
2037 		goto out;
2038 
2039 	spin_lock_irq(&current->sighand->siglock);
2040 	sigorsets(&pending, &current->pending.signal,
2041 		  &current->signal->shared_pending.signal);
2042 	spin_unlock_irq(&current->sighand->siglock);
2043 
2044 	/* Outside the lock because only this thread touches it.  */
2045 	sigandsets(&pending, &current->blocked, &pending);
2046 
2047 	error = -EFAULT;
2048 	if (!copy_to_user(set, &pending, sigsetsize))
2049 		error = 0;
2050 
2051 out:
2052 	return error;
2053 }
2054 
2055 asmlinkage long
2056 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2057 {
2058 	return do_sigpending(set, sigsetsize);
2059 }
2060 
2061 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2062 
2063 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2064 {
2065 	int err;
2066 
2067 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2068 		return -EFAULT;
2069 	if (from->si_code < 0)
2070 		return __copy_to_user(to, from, sizeof(siginfo_t))
2071 			? -EFAULT : 0;
2072 	/*
2073 	 * If you change siginfo_t structure, please be sure
2074 	 * this code is fixed accordingly.
2075 	 * Please remember to update the signalfd_copyinfo() function
2076 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2077 	 * It should never copy any pad contained in the structure
2078 	 * to avoid security leaks, but must copy the generic
2079 	 * 3 ints plus the relevant union member.
2080 	 */
2081 	err = __put_user(from->si_signo, &to->si_signo);
2082 	err |= __put_user(from->si_errno, &to->si_errno);
2083 	err |= __put_user((short)from->si_code, &to->si_code);
2084 	switch (from->si_code & __SI_MASK) {
2085 	case __SI_KILL:
2086 		err |= __put_user(from->si_pid, &to->si_pid);
2087 		err |= __put_user(from->si_uid, &to->si_uid);
2088 		break;
2089 	case __SI_TIMER:
2090 		 err |= __put_user(from->si_tid, &to->si_tid);
2091 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2092 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2093 		break;
2094 	case __SI_POLL:
2095 		err |= __put_user(from->si_band, &to->si_band);
2096 		err |= __put_user(from->si_fd, &to->si_fd);
2097 		break;
2098 	case __SI_FAULT:
2099 		err |= __put_user(from->si_addr, &to->si_addr);
2100 #ifdef __ARCH_SI_TRAPNO
2101 		err |= __put_user(from->si_trapno, &to->si_trapno);
2102 #endif
2103 		break;
2104 	case __SI_CHLD:
2105 		err |= __put_user(from->si_pid, &to->si_pid);
2106 		err |= __put_user(from->si_uid, &to->si_uid);
2107 		err |= __put_user(from->si_status, &to->si_status);
2108 		err |= __put_user(from->si_utime, &to->si_utime);
2109 		err |= __put_user(from->si_stime, &to->si_stime);
2110 		break;
2111 	case __SI_RT: /* This is not generated by the kernel as of now. */
2112 	case __SI_MESGQ: /* But this is */
2113 		err |= __put_user(from->si_pid, &to->si_pid);
2114 		err |= __put_user(from->si_uid, &to->si_uid);
2115 		err |= __put_user(from->si_ptr, &to->si_ptr);
2116 		break;
2117 	default: /* this is just in case for now ... */
2118 		err |= __put_user(from->si_pid, &to->si_pid);
2119 		err |= __put_user(from->si_uid, &to->si_uid);
2120 		break;
2121 	}
2122 	return err;
2123 }
2124 
2125 #endif
2126 
2127 asmlinkage long
2128 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2129 		    siginfo_t __user *uinfo,
2130 		    const struct timespec __user *uts,
2131 		    size_t sigsetsize)
2132 {
2133 	int ret, sig;
2134 	sigset_t these;
2135 	struct timespec ts;
2136 	siginfo_t info;
2137 	long timeout = 0;
2138 
2139 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2140 	if (sigsetsize != sizeof(sigset_t))
2141 		return -EINVAL;
2142 
2143 	if (copy_from_user(&these, uthese, sizeof(these)))
2144 		return -EFAULT;
2145 
2146 	/*
2147 	 * Invert the set of allowed signals to get those we
2148 	 * want to block.
2149 	 */
2150 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2151 	signotset(&these);
2152 
2153 	if (uts) {
2154 		if (copy_from_user(&ts, uts, sizeof(ts)))
2155 			return -EFAULT;
2156 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2157 		    || ts.tv_sec < 0)
2158 			return -EINVAL;
2159 	}
2160 
2161 	spin_lock_irq(&current->sighand->siglock);
2162 	sig = dequeue_signal(current, &these, &info);
2163 	if (!sig) {
2164 		timeout = MAX_SCHEDULE_TIMEOUT;
2165 		if (uts)
2166 			timeout = (timespec_to_jiffies(&ts)
2167 				   + (ts.tv_sec || ts.tv_nsec));
2168 
2169 		if (timeout) {
2170 			/* None ready -- temporarily unblock those we're
2171 			 * interested while we are sleeping in so that we'll
2172 			 * be awakened when they arrive.  */
2173 			current->real_blocked = current->blocked;
2174 			sigandsets(&current->blocked, &current->blocked, &these);
2175 			recalc_sigpending();
2176 			spin_unlock_irq(&current->sighand->siglock);
2177 
2178 			timeout = schedule_timeout_interruptible(timeout);
2179 
2180 			spin_lock_irq(&current->sighand->siglock);
2181 			sig = dequeue_signal(current, &these, &info);
2182 			current->blocked = current->real_blocked;
2183 			siginitset(&current->real_blocked, 0);
2184 			recalc_sigpending();
2185 		}
2186 	}
2187 	spin_unlock_irq(&current->sighand->siglock);
2188 
2189 	if (sig) {
2190 		ret = sig;
2191 		if (uinfo) {
2192 			if (copy_siginfo_to_user(uinfo, &info))
2193 				ret = -EFAULT;
2194 		}
2195 	} else {
2196 		ret = -EAGAIN;
2197 		if (timeout)
2198 			ret = -EINTR;
2199 	}
2200 
2201 	return ret;
2202 }
2203 
2204 asmlinkage long
2205 sys_kill(pid_t pid, int sig)
2206 {
2207 	struct siginfo info;
2208 
2209 	info.si_signo = sig;
2210 	info.si_errno = 0;
2211 	info.si_code = SI_USER;
2212 	info.si_pid = task_tgid_vnr(current);
2213 	info.si_uid = current->uid;
2214 
2215 	return kill_something_info(sig, &info, pid);
2216 }
2217 
2218 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2219 {
2220 	int error;
2221 	struct siginfo info;
2222 	struct task_struct *p;
2223 	unsigned long flags;
2224 
2225 	error = -ESRCH;
2226 	info.si_signo = sig;
2227 	info.si_errno = 0;
2228 	info.si_code = SI_TKILL;
2229 	info.si_pid = task_tgid_vnr(current);
2230 	info.si_uid = current->uid;
2231 
2232 	rcu_read_lock();
2233 	p = find_task_by_vpid(pid);
2234 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2235 		error = check_kill_permission(sig, &info, p);
2236 		/*
2237 		 * The null signal is a permissions and process existence
2238 		 * probe.  No signal is actually delivered.
2239 		 *
2240 		 * If lock_task_sighand() fails we pretend the task dies
2241 		 * after receiving the signal. The window is tiny, and the
2242 		 * signal is private anyway.
2243 		 */
2244 		if (!error && sig && lock_task_sighand(p, &flags)) {
2245 			error = specific_send_sig_info(sig, &info, p);
2246 			unlock_task_sighand(p, &flags);
2247 		}
2248 	}
2249 	rcu_read_unlock();
2250 
2251 	return error;
2252 }
2253 
2254 /**
2255  *  sys_tgkill - send signal to one specific thread
2256  *  @tgid: the thread group ID of the thread
2257  *  @pid: the PID of the thread
2258  *  @sig: signal to be sent
2259  *
2260  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2261  *  exists but it's not belonging to the target process anymore. This
2262  *  method solves the problem of threads exiting and PIDs getting reused.
2263  */
2264 asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
2265 {
2266 	/* This is only valid for single tasks */
2267 	if (pid <= 0 || tgid <= 0)
2268 		return -EINVAL;
2269 
2270 	return do_tkill(tgid, pid, sig);
2271 }
2272 
2273 /*
2274  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2275  */
2276 asmlinkage long
2277 sys_tkill(pid_t pid, int sig)
2278 {
2279 	/* This is only valid for single tasks */
2280 	if (pid <= 0)
2281 		return -EINVAL;
2282 
2283 	return do_tkill(0, pid, sig);
2284 }
2285 
2286 asmlinkage long
2287 sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo)
2288 {
2289 	siginfo_t info;
2290 
2291 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2292 		return -EFAULT;
2293 
2294 	/* Not even root can pretend to send signals from the kernel.
2295 	   Nor can they impersonate a kill(), which adds source info.  */
2296 	if (info.si_code >= 0)
2297 		return -EPERM;
2298 	info.si_signo = sig;
2299 
2300 	/* POSIX.1b doesn't mention process groups.  */
2301 	return kill_proc_info(sig, &info, pid);
2302 }
2303 
2304 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2305 {
2306 	struct task_struct *t = current;
2307 	struct k_sigaction *k;
2308 	sigset_t mask;
2309 
2310 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2311 		return -EINVAL;
2312 
2313 	k = &t->sighand->action[sig-1];
2314 
2315 	spin_lock_irq(&current->sighand->siglock);
2316 	if (oact)
2317 		*oact = *k;
2318 
2319 	if (act) {
2320 		sigdelsetmask(&act->sa.sa_mask,
2321 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2322 		*k = *act;
2323 		/*
2324 		 * POSIX 3.3.1.3:
2325 		 *  "Setting a signal action to SIG_IGN for a signal that is
2326 		 *   pending shall cause the pending signal to be discarded,
2327 		 *   whether or not it is blocked."
2328 		 *
2329 		 *  "Setting a signal action to SIG_DFL for a signal that is
2330 		 *   pending and whose default action is to ignore the signal
2331 		 *   (for example, SIGCHLD), shall cause the pending signal to
2332 		 *   be discarded, whether or not it is blocked"
2333 		 */
2334 		if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2335 			sigemptyset(&mask);
2336 			sigaddset(&mask, sig);
2337 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2338 			do {
2339 				rm_from_queue_full(&mask, &t->pending);
2340 				t = next_thread(t);
2341 			} while (t != current);
2342 		}
2343 	}
2344 
2345 	spin_unlock_irq(&current->sighand->siglock);
2346 	return 0;
2347 }
2348 
2349 int
2350 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2351 {
2352 	stack_t oss;
2353 	int error;
2354 
2355 	if (uoss) {
2356 		oss.ss_sp = (void __user *) current->sas_ss_sp;
2357 		oss.ss_size = current->sas_ss_size;
2358 		oss.ss_flags = sas_ss_flags(sp);
2359 	}
2360 
2361 	if (uss) {
2362 		void __user *ss_sp;
2363 		size_t ss_size;
2364 		int ss_flags;
2365 
2366 		error = -EFAULT;
2367 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2368 		    || __get_user(ss_sp, &uss->ss_sp)
2369 		    || __get_user(ss_flags, &uss->ss_flags)
2370 		    || __get_user(ss_size, &uss->ss_size))
2371 			goto out;
2372 
2373 		error = -EPERM;
2374 		if (on_sig_stack(sp))
2375 			goto out;
2376 
2377 		error = -EINVAL;
2378 		/*
2379 		 *
2380 		 * Note - this code used to test ss_flags incorrectly
2381 		 *  	  old code may have been written using ss_flags==0
2382 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2383 		 *	  way that worked) - this fix preserves that older
2384 		 *	  mechanism
2385 		 */
2386 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2387 			goto out;
2388 
2389 		if (ss_flags == SS_DISABLE) {
2390 			ss_size = 0;
2391 			ss_sp = NULL;
2392 		} else {
2393 			error = -ENOMEM;
2394 			if (ss_size < MINSIGSTKSZ)
2395 				goto out;
2396 		}
2397 
2398 		current->sas_ss_sp = (unsigned long) ss_sp;
2399 		current->sas_ss_size = ss_size;
2400 	}
2401 
2402 	if (uoss) {
2403 		error = -EFAULT;
2404 		if (copy_to_user(uoss, &oss, sizeof(oss)))
2405 			goto out;
2406 	}
2407 
2408 	error = 0;
2409 out:
2410 	return error;
2411 }
2412 
2413 #ifdef __ARCH_WANT_SYS_SIGPENDING
2414 
2415 asmlinkage long
2416 sys_sigpending(old_sigset_t __user *set)
2417 {
2418 	return do_sigpending(set, sizeof(*set));
2419 }
2420 
2421 #endif
2422 
2423 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2424 /* Some platforms have their own version with special arguments others
2425    support only sys_rt_sigprocmask.  */
2426 
2427 asmlinkage long
2428 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2429 {
2430 	int error;
2431 	old_sigset_t old_set, new_set;
2432 
2433 	if (set) {
2434 		error = -EFAULT;
2435 		if (copy_from_user(&new_set, set, sizeof(*set)))
2436 			goto out;
2437 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2438 
2439 		spin_lock_irq(&current->sighand->siglock);
2440 		old_set = current->blocked.sig[0];
2441 
2442 		error = 0;
2443 		switch (how) {
2444 		default:
2445 			error = -EINVAL;
2446 			break;
2447 		case SIG_BLOCK:
2448 			sigaddsetmask(&current->blocked, new_set);
2449 			break;
2450 		case SIG_UNBLOCK:
2451 			sigdelsetmask(&current->blocked, new_set);
2452 			break;
2453 		case SIG_SETMASK:
2454 			current->blocked.sig[0] = new_set;
2455 			break;
2456 		}
2457 
2458 		recalc_sigpending();
2459 		spin_unlock_irq(&current->sighand->siglock);
2460 		if (error)
2461 			goto out;
2462 		if (oset)
2463 			goto set_old;
2464 	} else if (oset) {
2465 		old_set = current->blocked.sig[0];
2466 	set_old:
2467 		error = -EFAULT;
2468 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2469 			goto out;
2470 	}
2471 	error = 0;
2472 out:
2473 	return error;
2474 }
2475 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2476 
2477 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2478 asmlinkage long
2479 sys_rt_sigaction(int sig,
2480 		 const struct sigaction __user *act,
2481 		 struct sigaction __user *oact,
2482 		 size_t sigsetsize)
2483 {
2484 	struct k_sigaction new_sa, old_sa;
2485 	int ret = -EINVAL;
2486 
2487 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2488 	if (sigsetsize != sizeof(sigset_t))
2489 		goto out;
2490 
2491 	if (act) {
2492 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2493 			return -EFAULT;
2494 	}
2495 
2496 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2497 
2498 	if (!ret && oact) {
2499 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2500 			return -EFAULT;
2501 	}
2502 out:
2503 	return ret;
2504 }
2505 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2506 
2507 #ifdef __ARCH_WANT_SYS_SGETMASK
2508 
2509 /*
2510  * For backwards compatibility.  Functionality superseded by sigprocmask.
2511  */
2512 asmlinkage long
2513 sys_sgetmask(void)
2514 {
2515 	/* SMP safe */
2516 	return current->blocked.sig[0];
2517 }
2518 
2519 asmlinkage long
2520 sys_ssetmask(int newmask)
2521 {
2522 	int old;
2523 
2524 	spin_lock_irq(&current->sighand->siglock);
2525 	old = current->blocked.sig[0];
2526 
2527 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2528 						  sigmask(SIGSTOP)));
2529 	recalc_sigpending();
2530 	spin_unlock_irq(&current->sighand->siglock);
2531 
2532 	return old;
2533 }
2534 #endif /* __ARCH_WANT_SGETMASK */
2535 
2536 #ifdef __ARCH_WANT_SYS_SIGNAL
2537 /*
2538  * For backwards compatibility.  Functionality superseded by sigaction.
2539  */
2540 asmlinkage unsigned long
2541 sys_signal(int sig, __sighandler_t handler)
2542 {
2543 	struct k_sigaction new_sa, old_sa;
2544 	int ret;
2545 
2546 	new_sa.sa.sa_handler = handler;
2547 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2548 	sigemptyset(&new_sa.sa.sa_mask);
2549 
2550 	ret = do_sigaction(sig, &new_sa, &old_sa);
2551 
2552 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2553 }
2554 #endif /* __ARCH_WANT_SYS_SIGNAL */
2555 
2556 #ifdef __ARCH_WANT_SYS_PAUSE
2557 
2558 asmlinkage long
2559 sys_pause(void)
2560 {
2561 	current->state = TASK_INTERRUPTIBLE;
2562 	schedule();
2563 	return -ERESTARTNOHAND;
2564 }
2565 
2566 #endif
2567 
2568 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2569 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2570 {
2571 	sigset_t newset;
2572 
2573 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2574 	if (sigsetsize != sizeof(sigset_t))
2575 		return -EINVAL;
2576 
2577 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2578 		return -EFAULT;
2579 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2580 
2581 	spin_lock_irq(&current->sighand->siglock);
2582 	current->saved_sigmask = current->blocked;
2583 	current->blocked = newset;
2584 	recalc_sigpending();
2585 	spin_unlock_irq(&current->sighand->siglock);
2586 
2587 	current->state = TASK_INTERRUPTIBLE;
2588 	schedule();
2589 	set_restore_sigmask();
2590 	return -ERESTARTNOHAND;
2591 }
2592 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2593 
2594 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2595 {
2596 	return NULL;
2597 }
2598 
2599 void __init signals_init(void)
2600 {
2601 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2602 }
2603