xref: /linux/kernel/signal.c (revision 1795cf48b322b4d19230a40dbe7181acedd34a94)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/tracehook.h>
26 #include <linux/capability.h>
27 #include <linux/freezer.h>
28 #include <linux/pid_namespace.h>
29 #include <linux/nsproxy.h>
30 
31 #include <asm/param.h>
32 #include <asm/uaccess.h>
33 #include <asm/unistd.h>
34 #include <asm/siginfo.h>
35 #include "audit.h"	/* audit_signal_info() */
36 
37 /*
38  * SLAB caches for signal bits.
39  */
40 
41 static struct kmem_cache *sigqueue_cachep;
42 
43 static void __user *sig_handler(struct task_struct *t, int sig)
44 {
45 	return t->sighand->action[sig - 1].sa.sa_handler;
46 }
47 
48 static int sig_handler_ignored(void __user *handler, int sig)
49 {
50 	/* Is it explicitly or implicitly ignored? */
51 	return handler == SIG_IGN ||
52 		(handler == SIG_DFL && sig_kernel_ignore(sig));
53 }
54 
55 static int sig_ignored(struct task_struct *t, int sig)
56 {
57 	void __user *handler;
58 
59 	/*
60 	 * Blocked signals are never ignored, since the
61 	 * signal handler may change by the time it is
62 	 * unblocked.
63 	 */
64 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
65 		return 0;
66 
67 	handler = sig_handler(t, sig);
68 	if (!sig_handler_ignored(handler, sig))
69 		return 0;
70 
71 	/*
72 	 * Tracers may want to know about even ignored signals.
73 	 */
74 	return !tracehook_consider_ignored_signal(t, sig, handler);
75 }
76 
77 /*
78  * Re-calculate pending state from the set of locally pending
79  * signals, globally pending signals, and blocked signals.
80  */
81 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
82 {
83 	unsigned long ready;
84 	long i;
85 
86 	switch (_NSIG_WORDS) {
87 	default:
88 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
89 			ready |= signal->sig[i] &~ blocked->sig[i];
90 		break;
91 
92 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
93 		ready |= signal->sig[2] &~ blocked->sig[2];
94 		ready |= signal->sig[1] &~ blocked->sig[1];
95 		ready |= signal->sig[0] &~ blocked->sig[0];
96 		break;
97 
98 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
99 		ready |= signal->sig[0] &~ blocked->sig[0];
100 		break;
101 
102 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
103 	}
104 	return ready !=	0;
105 }
106 
107 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
108 
109 static int recalc_sigpending_tsk(struct task_struct *t)
110 {
111 	if (t->signal->group_stop_count > 0 ||
112 	    PENDING(&t->pending, &t->blocked) ||
113 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
114 		set_tsk_thread_flag(t, TIF_SIGPENDING);
115 		return 1;
116 	}
117 	/*
118 	 * We must never clear the flag in another thread, or in current
119 	 * when it's possible the current syscall is returning -ERESTART*.
120 	 * So we don't clear it here, and only callers who know they should do.
121 	 */
122 	return 0;
123 }
124 
125 /*
126  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
127  * This is superfluous when called on current, the wakeup is a harmless no-op.
128  */
129 void recalc_sigpending_and_wake(struct task_struct *t)
130 {
131 	if (recalc_sigpending_tsk(t))
132 		signal_wake_up(t, 0);
133 }
134 
135 void recalc_sigpending(void)
136 {
137 	if (unlikely(tracehook_force_sigpending()))
138 		set_thread_flag(TIF_SIGPENDING);
139 	else if (!recalc_sigpending_tsk(current) && !freezing(current))
140 		clear_thread_flag(TIF_SIGPENDING);
141 
142 }
143 
144 /* Given the mask, find the first available signal that should be serviced. */
145 
146 int next_signal(struct sigpending *pending, sigset_t *mask)
147 {
148 	unsigned long i, *s, *m, x;
149 	int sig = 0;
150 
151 	s = pending->signal.sig;
152 	m = mask->sig;
153 	switch (_NSIG_WORDS) {
154 	default:
155 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
156 			if ((x = *s &~ *m) != 0) {
157 				sig = ffz(~x) + i*_NSIG_BPW + 1;
158 				break;
159 			}
160 		break;
161 
162 	case 2: if ((x = s[0] &~ m[0]) != 0)
163 			sig = 1;
164 		else if ((x = s[1] &~ m[1]) != 0)
165 			sig = _NSIG_BPW + 1;
166 		else
167 			break;
168 		sig += ffz(~x);
169 		break;
170 
171 	case 1: if ((x = *s &~ *m) != 0)
172 			sig = ffz(~x) + 1;
173 		break;
174 	}
175 
176 	return sig;
177 }
178 
179 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
180 					 int override_rlimit)
181 {
182 	struct sigqueue *q = NULL;
183 	struct user_struct *user;
184 
185 	/*
186 	 * In order to avoid problems with "switch_user()", we want to make
187 	 * sure that the compiler doesn't re-load "t->user"
188 	 */
189 	user = t->user;
190 	barrier();
191 	atomic_inc(&user->sigpending);
192 	if (override_rlimit ||
193 	    atomic_read(&user->sigpending) <=
194 			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
195 		q = kmem_cache_alloc(sigqueue_cachep, flags);
196 	if (unlikely(q == NULL)) {
197 		atomic_dec(&user->sigpending);
198 	} else {
199 		INIT_LIST_HEAD(&q->list);
200 		q->flags = 0;
201 		q->user = get_uid(user);
202 	}
203 	return(q);
204 }
205 
206 static void __sigqueue_free(struct sigqueue *q)
207 {
208 	if (q->flags & SIGQUEUE_PREALLOC)
209 		return;
210 	atomic_dec(&q->user->sigpending);
211 	free_uid(q->user);
212 	kmem_cache_free(sigqueue_cachep, q);
213 }
214 
215 void flush_sigqueue(struct sigpending *queue)
216 {
217 	struct sigqueue *q;
218 
219 	sigemptyset(&queue->signal);
220 	while (!list_empty(&queue->list)) {
221 		q = list_entry(queue->list.next, struct sigqueue , list);
222 		list_del_init(&q->list);
223 		__sigqueue_free(q);
224 	}
225 }
226 
227 /*
228  * Flush all pending signals for a task.
229  */
230 void flush_signals(struct task_struct *t)
231 {
232 	unsigned long flags;
233 
234 	spin_lock_irqsave(&t->sighand->siglock, flags);
235 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
236 	flush_sigqueue(&t->pending);
237 	flush_sigqueue(&t->signal->shared_pending);
238 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
239 }
240 
241 static void __flush_itimer_signals(struct sigpending *pending)
242 {
243 	sigset_t signal, retain;
244 	struct sigqueue *q, *n;
245 
246 	signal = pending->signal;
247 	sigemptyset(&retain);
248 
249 	list_for_each_entry_safe(q, n, &pending->list, list) {
250 		int sig = q->info.si_signo;
251 
252 		if (likely(q->info.si_code != SI_TIMER)) {
253 			sigaddset(&retain, sig);
254 		} else {
255 			sigdelset(&signal, sig);
256 			list_del_init(&q->list);
257 			__sigqueue_free(q);
258 		}
259 	}
260 
261 	sigorsets(&pending->signal, &signal, &retain);
262 }
263 
264 void flush_itimer_signals(void)
265 {
266 	struct task_struct *tsk = current;
267 	unsigned long flags;
268 
269 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
270 	__flush_itimer_signals(&tsk->pending);
271 	__flush_itimer_signals(&tsk->signal->shared_pending);
272 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
273 }
274 
275 void ignore_signals(struct task_struct *t)
276 {
277 	int i;
278 
279 	for (i = 0; i < _NSIG; ++i)
280 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
281 
282 	flush_signals(t);
283 }
284 
285 /*
286  * Flush all handlers for a task.
287  */
288 
289 void
290 flush_signal_handlers(struct task_struct *t, int force_default)
291 {
292 	int i;
293 	struct k_sigaction *ka = &t->sighand->action[0];
294 	for (i = _NSIG ; i != 0 ; i--) {
295 		if (force_default || ka->sa.sa_handler != SIG_IGN)
296 			ka->sa.sa_handler = SIG_DFL;
297 		ka->sa.sa_flags = 0;
298 		sigemptyset(&ka->sa.sa_mask);
299 		ka++;
300 	}
301 }
302 
303 int unhandled_signal(struct task_struct *tsk, int sig)
304 {
305 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
306 	if (is_global_init(tsk))
307 		return 1;
308 	if (handler != SIG_IGN && handler != SIG_DFL)
309 		return 0;
310 	return !tracehook_consider_fatal_signal(tsk, sig, handler);
311 }
312 
313 
314 /* Notify the system that a driver wants to block all signals for this
315  * process, and wants to be notified if any signals at all were to be
316  * sent/acted upon.  If the notifier routine returns non-zero, then the
317  * signal will be acted upon after all.  If the notifier routine returns 0,
318  * then then signal will be blocked.  Only one block per process is
319  * allowed.  priv is a pointer to private data that the notifier routine
320  * can use to determine if the signal should be blocked or not.  */
321 
322 void
323 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
324 {
325 	unsigned long flags;
326 
327 	spin_lock_irqsave(&current->sighand->siglock, flags);
328 	current->notifier_mask = mask;
329 	current->notifier_data = priv;
330 	current->notifier = notifier;
331 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
332 }
333 
334 /* Notify the system that blocking has ended. */
335 
336 void
337 unblock_all_signals(void)
338 {
339 	unsigned long flags;
340 
341 	spin_lock_irqsave(&current->sighand->siglock, flags);
342 	current->notifier = NULL;
343 	current->notifier_data = NULL;
344 	recalc_sigpending();
345 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
346 }
347 
348 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
349 {
350 	struct sigqueue *q, *first = NULL;
351 
352 	/*
353 	 * Collect the siginfo appropriate to this signal.  Check if
354 	 * there is another siginfo for the same signal.
355 	*/
356 	list_for_each_entry(q, &list->list, list) {
357 		if (q->info.si_signo == sig) {
358 			if (first)
359 				goto still_pending;
360 			first = q;
361 		}
362 	}
363 
364 	sigdelset(&list->signal, sig);
365 
366 	if (first) {
367 still_pending:
368 		list_del_init(&first->list);
369 		copy_siginfo(info, &first->info);
370 		__sigqueue_free(first);
371 	} else {
372 		/* Ok, it wasn't in the queue.  This must be
373 		   a fast-pathed signal or we must have been
374 		   out of queue space.  So zero out the info.
375 		 */
376 		info->si_signo = sig;
377 		info->si_errno = 0;
378 		info->si_code = 0;
379 		info->si_pid = 0;
380 		info->si_uid = 0;
381 	}
382 }
383 
384 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
385 			siginfo_t *info)
386 {
387 	int sig = next_signal(pending, mask);
388 
389 	if (sig) {
390 		if (current->notifier) {
391 			if (sigismember(current->notifier_mask, sig)) {
392 				if (!(current->notifier)(current->notifier_data)) {
393 					clear_thread_flag(TIF_SIGPENDING);
394 					return 0;
395 				}
396 			}
397 		}
398 
399 		collect_signal(sig, pending, info);
400 	}
401 
402 	return sig;
403 }
404 
405 /*
406  * Dequeue a signal and return the element to the caller, which is
407  * expected to free it.
408  *
409  * All callers have to hold the siglock.
410  */
411 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
412 {
413 	int signr;
414 
415 	/* We only dequeue private signals from ourselves, we don't let
416 	 * signalfd steal them
417 	 */
418 	signr = __dequeue_signal(&tsk->pending, mask, info);
419 	if (!signr) {
420 		signr = __dequeue_signal(&tsk->signal->shared_pending,
421 					 mask, info);
422 		/*
423 		 * itimer signal ?
424 		 *
425 		 * itimers are process shared and we restart periodic
426 		 * itimers in the signal delivery path to prevent DoS
427 		 * attacks in the high resolution timer case. This is
428 		 * compliant with the old way of self restarting
429 		 * itimers, as the SIGALRM is a legacy signal and only
430 		 * queued once. Changing the restart behaviour to
431 		 * restart the timer in the signal dequeue path is
432 		 * reducing the timer noise on heavy loaded !highres
433 		 * systems too.
434 		 */
435 		if (unlikely(signr == SIGALRM)) {
436 			struct hrtimer *tmr = &tsk->signal->real_timer;
437 
438 			if (!hrtimer_is_queued(tmr) &&
439 			    tsk->signal->it_real_incr.tv64 != 0) {
440 				hrtimer_forward(tmr, tmr->base->get_time(),
441 						tsk->signal->it_real_incr);
442 				hrtimer_restart(tmr);
443 			}
444 		}
445 	}
446 
447 	recalc_sigpending();
448 	if (!signr)
449 		return 0;
450 
451 	if (unlikely(sig_kernel_stop(signr))) {
452 		/*
453 		 * Set a marker that we have dequeued a stop signal.  Our
454 		 * caller might release the siglock and then the pending
455 		 * stop signal it is about to process is no longer in the
456 		 * pending bitmasks, but must still be cleared by a SIGCONT
457 		 * (and overruled by a SIGKILL).  So those cases clear this
458 		 * shared flag after we've set it.  Note that this flag may
459 		 * remain set after the signal we return is ignored or
460 		 * handled.  That doesn't matter because its only purpose
461 		 * is to alert stop-signal processing code when another
462 		 * processor has come along and cleared the flag.
463 		 */
464 		tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
465 	}
466 	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
467 		/*
468 		 * Release the siglock to ensure proper locking order
469 		 * of timer locks outside of siglocks.  Note, we leave
470 		 * irqs disabled here, since the posix-timers code is
471 		 * about to disable them again anyway.
472 		 */
473 		spin_unlock(&tsk->sighand->siglock);
474 		do_schedule_next_timer(info);
475 		spin_lock(&tsk->sighand->siglock);
476 	}
477 	return signr;
478 }
479 
480 /*
481  * Tell a process that it has a new active signal..
482  *
483  * NOTE! we rely on the previous spin_lock to
484  * lock interrupts for us! We can only be called with
485  * "siglock" held, and the local interrupt must
486  * have been disabled when that got acquired!
487  *
488  * No need to set need_resched since signal event passing
489  * goes through ->blocked
490  */
491 void signal_wake_up(struct task_struct *t, int resume)
492 {
493 	unsigned int mask;
494 
495 	set_tsk_thread_flag(t, TIF_SIGPENDING);
496 
497 	/*
498 	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
499 	 * case. We don't check t->state here because there is a race with it
500 	 * executing another processor and just now entering stopped state.
501 	 * By using wake_up_state, we ensure the process will wake up and
502 	 * handle its death signal.
503 	 */
504 	mask = TASK_INTERRUPTIBLE;
505 	if (resume)
506 		mask |= TASK_WAKEKILL;
507 	if (!wake_up_state(t, mask))
508 		kick_process(t);
509 }
510 
511 /*
512  * Remove signals in mask from the pending set and queue.
513  * Returns 1 if any signals were found.
514  *
515  * All callers must be holding the siglock.
516  *
517  * This version takes a sigset mask and looks at all signals,
518  * not just those in the first mask word.
519  */
520 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
521 {
522 	struct sigqueue *q, *n;
523 	sigset_t m;
524 
525 	sigandsets(&m, mask, &s->signal);
526 	if (sigisemptyset(&m))
527 		return 0;
528 
529 	signandsets(&s->signal, &s->signal, mask);
530 	list_for_each_entry_safe(q, n, &s->list, list) {
531 		if (sigismember(mask, q->info.si_signo)) {
532 			list_del_init(&q->list);
533 			__sigqueue_free(q);
534 		}
535 	}
536 	return 1;
537 }
538 /*
539  * Remove signals in mask from the pending set and queue.
540  * Returns 1 if any signals were found.
541  *
542  * All callers must be holding the siglock.
543  */
544 static int rm_from_queue(unsigned long mask, struct sigpending *s)
545 {
546 	struct sigqueue *q, *n;
547 
548 	if (!sigtestsetmask(&s->signal, mask))
549 		return 0;
550 
551 	sigdelsetmask(&s->signal, mask);
552 	list_for_each_entry_safe(q, n, &s->list, list) {
553 		if (q->info.si_signo < SIGRTMIN &&
554 		    (mask & sigmask(q->info.si_signo))) {
555 			list_del_init(&q->list);
556 			__sigqueue_free(q);
557 		}
558 	}
559 	return 1;
560 }
561 
562 /*
563  * Bad permissions for sending the signal
564  */
565 static int check_kill_permission(int sig, struct siginfo *info,
566 				 struct task_struct *t)
567 {
568 	struct pid *sid;
569 	int error;
570 
571 	if (!valid_signal(sig))
572 		return -EINVAL;
573 
574 	if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
575 		return 0;
576 
577 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
578 	if (error)
579 		return error;
580 
581 	if ((current->euid ^ t->suid) && (current->euid ^ t->uid) &&
582 	    (current->uid  ^ t->suid) && (current->uid  ^ t->uid) &&
583 	    !capable(CAP_KILL)) {
584 		switch (sig) {
585 		case SIGCONT:
586 			sid = task_session(t);
587 			/*
588 			 * We don't return the error if sid == NULL. The
589 			 * task was unhashed, the caller must notice this.
590 			 */
591 			if (!sid || sid == task_session(current))
592 				break;
593 		default:
594 			return -EPERM;
595 		}
596 	}
597 
598 	return security_task_kill(t, info, sig, 0);
599 }
600 
601 /*
602  * Handle magic process-wide effects of stop/continue signals. Unlike
603  * the signal actions, these happen immediately at signal-generation
604  * time regardless of blocking, ignoring, or handling.  This does the
605  * actual continuing for SIGCONT, but not the actual stopping for stop
606  * signals. The process stop is done as a signal action for SIG_DFL.
607  *
608  * Returns true if the signal should be actually delivered, otherwise
609  * it should be dropped.
610  */
611 static int prepare_signal(int sig, struct task_struct *p)
612 {
613 	struct signal_struct *signal = p->signal;
614 	struct task_struct *t;
615 
616 	if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
617 		/*
618 		 * The process is in the middle of dying, nothing to do.
619 		 */
620 	} else if (sig_kernel_stop(sig)) {
621 		/*
622 		 * This is a stop signal.  Remove SIGCONT from all queues.
623 		 */
624 		rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
625 		t = p;
626 		do {
627 			rm_from_queue(sigmask(SIGCONT), &t->pending);
628 		} while_each_thread(p, t);
629 	} else if (sig == SIGCONT) {
630 		unsigned int why;
631 		/*
632 		 * Remove all stop signals from all queues,
633 		 * and wake all threads.
634 		 */
635 		rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
636 		t = p;
637 		do {
638 			unsigned int state;
639 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
640 			/*
641 			 * If there is a handler for SIGCONT, we must make
642 			 * sure that no thread returns to user mode before
643 			 * we post the signal, in case it was the only
644 			 * thread eligible to run the signal handler--then
645 			 * it must not do anything between resuming and
646 			 * running the handler.  With the TIF_SIGPENDING
647 			 * flag set, the thread will pause and acquire the
648 			 * siglock that we hold now and until we've queued
649 			 * the pending signal.
650 			 *
651 			 * Wake up the stopped thread _after_ setting
652 			 * TIF_SIGPENDING
653 			 */
654 			state = __TASK_STOPPED;
655 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
656 				set_tsk_thread_flag(t, TIF_SIGPENDING);
657 				state |= TASK_INTERRUPTIBLE;
658 			}
659 			wake_up_state(t, state);
660 		} while_each_thread(p, t);
661 
662 		/*
663 		 * Notify the parent with CLD_CONTINUED if we were stopped.
664 		 *
665 		 * If we were in the middle of a group stop, we pretend it
666 		 * was already finished, and then continued. Since SIGCHLD
667 		 * doesn't queue we report only CLD_STOPPED, as if the next
668 		 * CLD_CONTINUED was dropped.
669 		 */
670 		why = 0;
671 		if (signal->flags & SIGNAL_STOP_STOPPED)
672 			why |= SIGNAL_CLD_CONTINUED;
673 		else if (signal->group_stop_count)
674 			why |= SIGNAL_CLD_STOPPED;
675 
676 		if (why) {
677 			/*
678 			 * The first thread which returns from finish_stop()
679 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
680 			 * notify its parent. See get_signal_to_deliver().
681 			 */
682 			signal->flags = why | SIGNAL_STOP_CONTINUED;
683 			signal->group_stop_count = 0;
684 			signal->group_exit_code = 0;
685 		} else {
686 			/*
687 			 * We are not stopped, but there could be a stop
688 			 * signal in the middle of being processed after
689 			 * being removed from the queue.  Clear that too.
690 			 */
691 			signal->flags &= ~SIGNAL_STOP_DEQUEUED;
692 		}
693 	}
694 
695 	return !sig_ignored(p, sig);
696 }
697 
698 /*
699  * Test if P wants to take SIG.  After we've checked all threads with this,
700  * it's equivalent to finding no threads not blocking SIG.  Any threads not
701  * blocking SIG were ruled out because they are not running and already
702  * have pending signals.  Such threads will dequeue from the shared queue
703  * as soon as they're available, so putting the signal on the shared queue
704  * will be equivalent to sending it to one such thread.
705  */
706 static inline int wants_signal(int sig, struct task_struct *p)
707 {
708 	if (sigismember(&p->blocked, sig))
709 		return 0;
710 	if (p->flags & PF_EXITING)
711 		return 0;
712 	if (sig == SIGKILL)
713 		return 1;
714 	if (task_is_stopped_or_traced(p))
715 		return 0;
716 	return task_curr(p) || !signal_pending(p);
717 }
718 
719 static void complete_signal(int sig, struct task_struct *p, int group)
720 {
721 	struct signal_struct *signal = p->signal;
722 	struct task_struct *t;
723 
724 	/*
725 	 * Now find a thread we can wake up to take the signal off the queue.
726 	 *
727 	 * If the main thread wants the signal, it gets first crack.
728 	 * Probably the least surprising to the average bear.
729 	 */
730 	if (wants_signal(sig, p))
731 		t = p;
732 	else if (!group || thread_group_empty(p))
733 		/*
734 		 * There is just one thread and it does not need to be woken.
735 		 * It will dequeue unblocked signals before it runs again.
736 		 */
737 		return;
738 	else {
739 		/*
740 		 * Otherwise try to find a suitable thread.
741 		 */
742 		t = signal->curr_target;
743 		while (!wants_signal(sig, t)) {
744 			t = next_thread(t);
745 			if (t == signal->curr_target)
746 				/*
747 				 * No thread needs to be woken.
748 				 * Any eligible threads will see
749 				 * the signal in the queue soon.
750 				 */
751 				return;
752 		}
753 		signal->curr_target = t;
754 	}
755 
756 	/*
757 	 * Found a killable thread.  If the signal will be fatal,
758 	 * then start taking the whole group down immediately.
759 	 */
760 	if (sig_fatal(p, sig) &&
761 	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
762 	    !sigismember(&t->real_blocked, sig) &&
763 	    (sig == SIGKILL ||
764 	     !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
765 		/*
766 		 * This signal will be fatal to the whole group.
767 		 */
768 		if (!sig_kernel_coredump(sig)) {
769 			/*
770 			 * Start a group exit and wake everybody up.
771 			 * This way we don't have other threads
772 			 * running and doing things after a slower
773 			 * thread has the fatal signal pending.
774 			 */
775 			signal->flags = SIGNAL_GROUP_EXIT;
776 			signal->group_exit_code = sig;
777 			signal->group_stop_count = 0;
778 			t = p;
779 			do {
780 				sigaddset(&t->pending.signal, SIGKILL);
781 				signal_wake_up(t, 1);
782 			} while_each_thread(p, t);
783 			return;
784 		}
785 	}
786 
787 	/*
788 	 * The signal is already in the shared-pending queue.
789 	 * Tell the chosen thread to wake up and dequeue it.
790 	 */
791 	signal_wake_up(t, sig == SIGKILL);
792 	return;
793 }
794 
795 static inline int legacy_queue(struct sigpending *signals, int sig)
796 {
797 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
798 }
799 
800 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
801 			int group)
802 {
803 	struct sigpending *pending;
804 	struct sigqueue *q;
805 
806 	assert_spin_locked(&t->sighand->siglock);
807 	if (!prepare_signal(sig, t))
808 		return 0;
809 
810 	pending = group ? &t->signal->shared_pending : &t->pending;
811 	/*
812 	 * Short-circuit ignored signals and support queuing
813 	 * exactly one non-rt signal, so that we can get more
814 	 * detailed information about the cause of the signal.
815 	 */
816 	if (legacy_queue(pending, sig))
817 		return 0;
818 	/*
819 	 * fast-pathed signals for kernel-internal things like SIGSTOP
820 	 * or SIGKILL.
821 	 */
822 	if (info == SEND_SIG_FORCED)
823 		goto out_set;
824 
825 	/* Real-time signals must be queued if sent by sigqueue, or
826 	   some other real-time mechanism.  It is implementation
827 	   defined whether kill() does so.  We attempt to do so, on
828 	   the principle of least surprise, but since kill is not
829 	   allowed to fail with EAGAIN when low on memory we just
830 	   make sure at least one signal gets delivered and don't
831 	   pass on the info struct.  */
832 
833 	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
834 					     (is_si_special(info) ||
835 					      info->si_code >= 0)));
836 	if (q) {
837 		list_add_tail(&q->list, &pending->list);
838 		switch ((unsigned long) info) {
839 		case (unsigned long) SEND_SIG_NOINFO:
840 			q->info.si_signo = sig;
841 			q->info.si_errno = 0;
842 			q->info.si_code = SI_USER;
843 			q->info.si_pid = task_pid_vnr(current);
844 			q->info.si_uid = current->uid;
845 			break;
846 		case (unsigned long) SEND_SIG_PRIV:
847 			q->info.si_signo = sig;
848 			q->info.si_errno = 0;
849 			q->info.si_code = SI_KERNEL;
850 			q->info.si_pid = 0;
851 			q->info.si_uid = 0;
852 			break;
853 		default:
854 			copy_siginfo(&q->info, info);
855 			break;
856 		}
857 	} else if (!is_si_special(info)) {
858 		if (sig >= SIGRTMIN && info->si_code != SI_USER)
859 		/*
860 		 * Queue overflow, abort.  We may abort if the signal was rt
861 		 * and sent by user using something other than kill().
862 		 */
863 			return -EAGAIN;
864 	}
865 
866 out_set:
867 	signalfd_notify(t, sig);
868 	sigaddset(&pending->signal, sig);
869 	complete_signal(sig, t, group);
870 	return 0;
871 }
872 
873 int print_fatal_signals;
874 
875 static void print_fatal_signal(struct pt_regs *regs, int signr)
876 {
877 	printk("%s/%d: potentially unexpected fatal signal %d.\n",
878 		current->comm, task_pid_nr(current), signr);
879 
880 #if defined(__i386__) && !defined(__arch_um__)
881 	printk("code at %08lx: ", regs->ip);
882 	{
883 		int i;
884 		for (i = 0; i < 16; i++) {
885 			unsigned char insn;
886 
887 			__get_user(insn, (unsigned char *)(regs->ip + i));
888 			printk("%02x ", insn);
889 		}
890 	}
891 #endif
892 	printk("\n");
893 	show_regs(regs);
894 }
895 
896 static int __init setup_print_fatal_signals(char *str)
897 {
898 	get_option (&str, &print_fatal_signals);
899 
900 	return 1;
901 }
902 
903 __setup("print-fatal-signals=", setup_print_fatal_signals);
904 
905 int
906 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
907 {
908 	return send_signal(sig, info, p, 1);
909 }
910 
911 static int
912 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
913 {
914 	return send_signal(sig, info, t, 0);
915 }
916 
917 /*
918  * Force a signal that the process can't ignore: if necessary
919  * we unblock the signal and change any SIG_IGN to SIG_DFL.
920  *
921  * Note: If we unblock the signal, we always reset it to SIG_DFL,
922  * since we do not want to have a signal handler that was blocked
923  * be invoked when user space had explicitly blocked it.
924  *
925  * We don't want to have recursive SIGSEGV's etc, for example,
926  * that is why we also clear SIGNAL_UNKILLABLE.
927  */
928 int
929 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
930 {
931 	unsigned long int flags;
932 	int ret, blocked, ignored;
933 	struct k_sigaction *action;
934 
935 	spin_lock_irqsave(&t->sighand->siglock, flags);
936 	action = &t->sighand->action[sig-1];
937 	ignored = action->sa.sa_handler == SIG_IGN;
938 	blocked = sigismember(&t->blocked, sig);
939 	if (blocked || ignored) {
940 		action->sa.sa_handler = SIG_DFL;
941 		if (blocked) {
942 			sigdelset(&t->blocked, sig);
943 			recalc_sigpending_and_wake(t);
944 		}
945 	}
946 	if (action->sa.sa_handler == SIG_DFL)
947 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
948 	ret = specific_send_sig_info(sig, info, t);
949 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
950 
951 	return ret;
952 }
953 
954 void
955 force_sig_specific(int sig, struct task_struct *t)
956 {
957 	force_sig_info(sig, SEND_SIG_FORCED, t);
958 }
959 
960 /*
961  * Nuke all other threads in the group.
962  */
963 void zap_other_threads(struct task_struct *p)
964 {
965 	struct task_struct *t;
966 
967 	p->signal->group_stop_count = 0;
968 
969 	for (t = next_thread(p); t != p; t = next_thread(t)) {
970 		/*
971 		 * Don't bother with already dead threads
972 		 */
973 		if (t->exit_state)
974 			continue;
975 
976 		/* SIGKILL will be handled before any pending SIGSTOP */
977 		sigaddset(&t->pending.signal, SIGKILL);
978 		signal_wake_up(t, 1);
979 	}
980 }
981 
982 int __fatal_signal_pending(struct task_struct *tsk)
983 {
984 	return sigismember(&tsk->pending.signal, SIGKILL);
985 }
986 EXPORT_SYMBOL(__fatal_signal_pending);
987 
988 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
989 {
990 	struct sighand_struct *sighand;
991 
992 	rcu_read_lock();
993 	for (;;) {
994 		sighand = rcu_dereference(tsk->sighand);
995 		if (unlikely(sighand == NULL))
996 			break;
997 
998 		spin_lock_irqsave(&sighand->siglock, *flags);
999 		if (likely(sighand == tsk->sighand))
1000 			break;
1001 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1002 	}
1003 	rcu_read_unlock();
1004 
1005 	return sighand;
1006 }
1007 
1008 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1009 {
1010 	unsigned long flags;
1011 	int ret;
1012 
1013 	ret = check_kill_permission(sig, info, p);
1014 
1015 	if (!ret && sig) {
1016 		ret = -ESRCH;
1017 		if (lock_task_sighand(p, &flags)) {
1018 			ret = __group_send_sig_info(sig, info, p);
1019 			unlock_task_sighand(p, &flags);
1020 		}
1021 	}
1022 
1023 	return ret;
1024 }
1025 
1026 /*
1027  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1028  * control characters do (^C, ^Z etc)
1029  */
1030 
1031 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1032 {
1033 	struct task_struct *p = NULL;
1034 	int retval, success;
1035 
1036 	success = 0;
1037 	retval = -ESRCH;
1038 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1039 		int err = group_send_sig_info(sig, info, p);
1040 		success |= !err;
1041 		retval = err;
1042 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1043 	return success ? 0 : retval;
1044 }
1045 
1046 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1047 {
1048 	int error = -ESRCH;
1049 	struct task_struct *p;
1050 
1051 	rcu_read_lock();
1052 retry:
1053 	p = pid_task(pid, PIDTYPE_PID);
1054 	if (p) {
1055 		error = group_send_sig_info(sig, info, p);
1056 		if (unlikely(error == -ESRCH))
1057 			/*
1058 			 * The task was unhashed in between, try again.
1059 			 * If it is dead, pid_task() will return NULL,
1060 			 * if we race with de_thread() it will find the
1061 			 * new leader.
1062 			 */
1063 			goto retry;
1064 	}
1065 	rcu_read_unlock();
1066 
1067 	return error;
1068 }
1069 
1070 int
1071 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1072 {
1073 	int error;
1074 	rcu_read_lock();
1075 	error = kill_pid_info(sig, info, find_vpid(pid));
1076 	rcu_read_unlock();
1077 	return error;
1078 }
1079 
1080 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1081 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1082 		      uid_t uid, uid_t euid, u32 secid)
1083 {
1084 	int ret = -EINVAL;
1085 	struct task_struct *p;
1086 
1087 	if (!valid_signal(sig))
1088 		return ret;
1089 
1090 	read_lock(&tasklist_lock);
1091 	p = pid_task(pid, PIDTYPE_PID);
1092 	if (!p) {
1093 		ret = -ESRCH;
1094 		goto out_unlock;
1095 	}
1096 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1097 	    && (euid != p->suid) && (euid != p->uid)
1098 	    && (uid != p->suid) && (uid != p->uid)) {
1099 		ret = -EPERM;
1100 		goto out_unlock;
1101 	}
1102 	ret = security_task_kill(p, info, sig, secid);
1103 	if (ret)
1104 		goto out_unlock;
1105 	if (sig && p->sighand) {
1106 		unsigned long flags;
1107 		spin_lock_irqsave(&p->sighand->siglock, flags);
1108 		ret = __group_send_sig_info(sig, info, p);
1109 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1110 	}
1111 out_unlock:
1112 	read_unlock(&tasklist_lock);
1113 	return ret;
1114 }
1115 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1116 
1117 /*
1118  * kill_something_info() interprets pid in interesting ways just like kill(2).
1119  *
1120  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1121  * is probably wrong.  Should make it like BSD or SYSV.
1122  */
1123 
1124 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1125 {
1126 	int ret;
1127 
1128 	if (pid > 0) {
1129 		rcu_read_lock();
1130 		ret = kill_pid_info(sig, info, find_vpid(pid));
1131 		rcu_read_unlock();
1132 		return ret;
1133 	}
1134 
1135 	read_lock(&tasklist_lock);
1136 	if (pid != -1) {
1137 		ret = __kill_pgrp_info(sig, info,
1138 				pid ? find_vpid(-pid) : task_pgrp(current));
1139 	} else {
1140 		int retval = 0, count = 0;
1141 		struct task_struct * p;
1142 
1143 		for_each_process(p) {
1144 			if (p->pid > 1 && !same_thread_group(p, current)) {
1145 				int err = group_send_sig_info(sig, info, p);
1146 				++count;
1147 				if (err != -EPERM)
1148 					retval = err;
1149 			}
1150 		}
1151 		ret = count ? retval : -ESRCH;
1152 	}
1153 	read_unlock(&tasklist_lock);
1154 
1155 	return ret;
1156 }
1157 
1158 /*
1159  * These are for backward compatibility with the rest of the kernel source.
1160  */
1161 
1162 /*
1163  * The caller must ensure the task can't exit.
1164  */
1165 int
1166 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1167 {
1168 	int ret;
1169 	unsigned long flags;
1170 
1171 	/*
1172 	 * Make sure legacy kernel users don't send in bad values
1173 	 * (normal paths check this in check_kill_permission).
1174 	 */
1175 	if (!valid_signal(sig))
1176 		return -EINVAL;
1177 
1178 	spin_lock_irqsave(&p->sighand->siglock, flags);
1179 	ret = specific_send_sig_info(sig, info, p);
1180 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1181 	return ret;
1182 }
1183 
1184 #define __si_special(priv) \
1185 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1186 
1187 int
1188 send_sig(int sig, struct task_struct *p, int priv)
1189 {
1190 	return send_sig_info(sig, __si_special(priv), p);
1191 }
1192 
1193 void
1194 force_sig(int sig, struct task_struct *p)
1195 {
1196 	force_sig_info(sig, SEND_SIG_PRIV, p);
1197 }
1198 
1199 /*
1200  * When things go south during signal handling, we
1201  * will force a SIGSEGV. And if the signal that caused
1202  * the problem was already a SIGSEGV, we'll want to
1203  * make sure we don't even try to deliver the signal..
1204  */
1205 int
1206 force_sigsegv(int sig, struct task_struct *p)
1207 {
1208 	if (sig == SIGSEGV) {
1209 		unsigned long flags;
1210 		spin_lock_irqsave(&p->sighand->siglock, flags);
1211 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1212 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1213 	}
1214 	force_sig(SIGSEGV, p);
1215 	return 0;
1216 }
1217 
1218 int kill_pgrp(struct pid *pid, int sig, int priv)
1219 {
1220 	int ret;
1221 
1222 	read_lock(&tasklist_lock);
1223 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1224 	read_unlock(&tasklist_lock);
1225 
1226 	return ret;
1227 }
1228 EXPORT_SYMBOL(kill_pgrp);
1229 
1230 int kill_pid(struct pid *pid, int sig, int priv)
1231 {
1232 	return kill_pid_info(sig, __si_special(priv), pid);
1233 }
1234 EXPORT_SYMBOL(kill_pid);
1235 
1236 /*
1237  * These functions support sending signals using preallocated sigqueue
1238  * structures.  This is needed "because realtime applications cannot
1239  * afford to lose notifications of asynchronous events, like timer
1240  * expirations or I/O completions".  In the case of Posix Timers
1241  * we allocate the sigqueue structure from the timer_create.  If this
1242  * allocation fails we are able to report the failure to the application
1243  * with an EAGAIN error.
1244  */
1245 
1246 struct sigqueue *sigqueue_alloc(void)
1247 {
1248 	struct sigqueue *q;
1249 
1250 	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1251 		q->flags |= SIGQUEUE_PREALLOC;
1252 	return(q);
1253 }
1254 
1255 void sigqueue_free(struct sigqueue *q)
1256 {
1257 	unsigned long flags;
1258 	spinlock_t *lock = &current->sighand->siglock;
1259 
1260 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1261 	/*
1262 	 * We must hold ->siglock while testing q->list
1263 	 * to serialize with collect_signal() or with
1264 	 * __exit_signal()->flush_sigqueue().
1265 	 */
1266 	spin_lock_irqsave(lock, flags);
1267 	q->flags &= ~SIGQUEUE_PREALLOC;
1268 	/*
1269 	 * If it is queued it will be freed when dequeued,
1270 	 * like the "regular" sigqueue.
1271 	 */
1272 	if (!list_empty(&q->list))
1273 		q = NULL;
1274 	spin_unlock_irqrestore(lock, flags);
1275 
1276 	if (q)
1277 		__sigqueue_free(q);
1278 }
1279 
1280 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1281 {
1282 	int sig = q->info.si_signo;
1283 	struct sigpending *pending;
1284 	unsigned long flags;
1285 	int ret;
1286 
1287 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1288 
1289 	ret = -1;
1290 	if (!likely(lock_task_sighand(t, &flags)))
1291 		goto ret;
1292 
1293 	ret = 1; /* the signal is ignored */
1294 	if (!prepare_signal(sig, t))
1295 		goto out;
1296 
1297 	ret = 0;
1298 	if (unlikely(!list_empty(&q->list))) {
1299 		/*
1300 		 * If an SI_TIMER entry is already queue just increment
1301 		 * the overrun count.
1302 		 */
1303 		BUG_ON(q->info.si_code != SI_TIMER);
1304 		q->info.si_overrun++;
1305 		goto out;
1306 	}
1307 
1308 	signalfd_notify(t, sig);
1309 	pending = group ? &t->signal->shared_pending : &t->pending;
1310 	list_add_tail(&q->list, &pending->list);
1311 	sigaddset(&pending->signal, sig);
1312 	complete_signal(sig, t, group);
1313 out:
1314 	unlock_task_sighand(t, &flags);
1315 ret:
1316 	return ret;
1317 }
1318 
1319 /*
1320  * Wake up any threads in the parent blocked in wait* syscalls.
1321  */
1322 static inline void __wake_up_parent(struct task_struct *p,
1323 				    struct task_struct *parent)
1324 {
1325 	wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1326 }
1327 
1328 /*
1329  * Let a parent know about the death of a child.
1330  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1331  *
1332  * Returns -1 if our parent ignored us and so we've switched to
1333  * self-reaping, or else @sig.
1334  */
1335 int do_notify_parent(struct task_struct *tsk, int sig)
1336 {
1337 	struct siginfo info;
1338 	unsigned long flags;
1339 	struct sighand_struct *psig;
1340 
1341 	BUG_ON(sig == -1);
1342 
1343  	/* do_notify_parent_cldstop should have been called instead.  */
1344  	BUG_ON(task_is_stopped_or_traced(tsk));
1345 
1346 	BUG_ON(!tsk->ptrace &&
1347 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1348 
1349 	info.si_signo = sig;
1350 	info.si_errno = 0;
1351 	/*
1352 	 * we are under tasklist_lock here so our parent is tied to
1353 	 * us and cannot exit and release its namespace.
1354 	 *
1355 	 * the only it can is to switch its nsproxy with sys_unshare,
1356 	 * bu uncharing pid namespaces is not allowed, so we'll always
1357 	 * see relevant namespace
1358 	 *
1359 	 * write_lock() currently calls preempt_disable() which is the
1360 	 * same as rcu_read_lock(), but according to Oleg, this is not
1361 	 * correct to rely on this
1362 	 */
1363 	rcu_read_lock();
1364 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1365 	rcu_read_unlock();
1366 
1367 	info.si_uid = tsk->uid;
1368 
1369 	info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1370 						       tsk->signal->utime));
1371 	info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1372 						       tsk->signal->stime));
1373 
1374 	info.si_status = tsk->exit_code & 0x7f;
1375 	if (tsk->exit_code & 0x80)
1376 		info.si_code = CLD_DUMPED;
1377 	else if (tsk->exit_code & 0x7f)
1378 		info.si_code = CLD_KILLED;
1379 	else {
1380 		info.si_code = CLD_EXITED;
1381 		info.si_status = tsk->exit_code >> 8;
1382 	}
1383 
1384 	psig = tsk->parent->sighand;
1385 	spin_lock_irqsave(&psig->siglock, flags);
1386 	if (!tsk->ptrace && sig == SIGCHLD &&
1387 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1388 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1389 		/*
1390 		 * We are exiting and our parent doesn't care.  POSIX.1
1391 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1392 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1393 		 * automatically and not left for our parent's wait4 call.
1394 		 * Rather than having the parent do it as a magic kind of
1395 		 * signal handler, we just set this to tell do_exit that we
1396 		 * can be cleaned up without becoming a zombie.  Note that
1397 		 * we still call __wake_up_parent in this case, because a
1398 		 * blocked sys_wait4 might now return -ECHILD.
1399 		 *
1400 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1401 		 * is implementation-defined: we do (if you don't want
1402 		 * it, just use SIG_IGN instead).
1403 		 */
1404 		tsk->exit_signal = -1;
1405 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1406 			sig = -1;
1407 	}
1408 	if (valid_signal(sig) && sig > 0)
1409 		__group_send_sig_info(sig, &info, tsk->parent);
1410 	__wake_up_parent(tsk, tsk->parent);
1411 	spin_unlock_irqrestore(&psig->siglock, flags);
1412 
1413 	return sig;
1414 }
1415 
1416 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1417 {
1418 	struct siginfo info;
1419 	unsigned long flags;
1420 	struct task_struct *parent;
1421 	struct sighand_struct *sighand;
1422 
1423 	if (tsk->ptrace & PT_PTRACED)
1424 		parent = tsk->parent;
1425 	else {
1426 		tsk = tsk->group_leader;
1427 		parent = tsk->real_parent;
1428 	}
1429 
1430 	info.si_signo = SIGCHLD;
1431 	info.si_errno = 0;
1432 	/*
1433 	 * see comment in do_notify_parent() abot the following 3 lines
1434 	 */
1435 	rcu_read_lock();
1436 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1437 	rcu_read_unlock();
1438 
1439 	info.si_uid = tsk->uid;
1440 
1441 	info.si_utime = cputime_to_clock_t(tsk->utime);
1442 	info.si_stime = cputime_to_clock_t(tsk->stime);
1443 
1444  	info.si_code = why;
1445  	switch (why) {
1446  	case CLD_CONTINUED:
1447  		info.si_status = SIGCONT;
1448  		break;
1449  	case CLD_STOPPED:
1450  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1451  		break;
1452  	case CLD_TRAPPED:
1453  		info.si_status = tsk->exit_code & 0x7f;
1454  		break;
1455  	default:
1456  		BUG();
1457  	}
1458 
1459 	sighand = parent->sighand;
1460 	spin_lock_irqsave(&sighand->siglock, flags);
1461 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1462 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1463 		__group_send_sig_info(SIGCHLD, &info, parent);
1464 	/*
1465 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1466 	 */
1467 	__wake_up_parent(tsk, parent);
1468 	spin_unlock_irqrestore(&sighand->siglock, flags);
1469 }
1470 
1471 static inline int may_ptrace_stop(void)
1472 {
1473 	if (!likely(current->ptrace & PT_PTRACED))
1474 		return 0;
1475 	/*
1476 	 * Are we in the middle of do_coredump?
1477 	 * If so and our tracer is also part of the coredump stopping
1478 	 * is a deadlock situation, and pointless because our tracer
1479 	 * is dead so don't allow us to stop.
1480 	 * If SIGKILL was already sent before the caller unlocked
1481 	 * ->siglock we must see ->core_state != NULL. Otherwise it
1482 	 * is safe to enter schedule().
1483 	 */
1484 	if (unlikely(current->mm->core_state) &&
1485 	    unlikely(current->mm == current->parent->mm))
1486 		return 0;
1487 
1488 	return 1;
1489 }
1490 
1491 /*
1492  * Return nonzero if there is a SIGKILL that should be waking us up.
1493  * Called with the siglock held.
1494  */
1495 static int sigkill_pending(struct task_struct *tsk)
1496 {
1497 	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1498 		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1499 }
1500 
1501 /*
1502  * This must be called with current->sighand->siglock held.
1503  *
1504  * This should be the path for all ptrace stops.
1505  * We always set current->last_siginfo while stopped here.
1506  * That makes it a way to test a stopped process for
1507  * being ptrace-stopped vs being job-control-stopped.
1508  *
1509  * If we actually decide not to stop at all because the tracer
1510  * is gone, we keep current->exit_code unless clear_code.
1511  */
1512 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1513 {
1514 	if (arch_ptrace_stop_needed(exit_code, info)) {
1515 		/*
1516 		 * The arch code has something special to do before a
1517 		 * ptrace stop.  This is allowed to block, e.g. for faults
1518 		 * on user stack pages.  We can't keep the siglock while
1519 		 * calling arch_ptrace_stop, so we must release it now.
1520 		 * To preserve proper semantics, we must do this before
1521 		 * any signal bookkeeping like checking group_stop_count.
1522 		 * Meanwhile, a SIGKILL could come in before we retake the
1523 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1524 		 * So after regaining the lock, we must check for SIGKILL.
1525 		 */
1526 		spin_unlock_irq(&current->sighand->siglock);
1527 		arch_ptrace_stop(exit_code, info);
1528 		spin_lock_irq(&current->sighand->siglock);
1529 		if (sigkill_pending(current))
1530 			return;
1531 	}
1532 
1533 	/*
1534 	 * If there is a group stop in progress,
1535 	 * we must participate in the bookkeeping.
1536 	 */
1537 	if (current->signal->group_stop_count > 0)
1538 		--current->signal->group_stop_count;
1539 
1540 	current->last_siginfo = info;
1541 	current->exit_code = exit_code;
1542 
1543 	/* Let the debugger run.  */
1544 	__set_current_state(TASK_TRACED);
1545 	spin_unlock_irq(&current->sighand->siglock);
1546 	read_lock(&tasklist_lock);
1547 	if (may_ptrace_stop()) {
1548 		do_notify_parent_cldstop(current, CLD_TRAPPED);
1549 		read_unlock(&tasklist_lock);
1550 		schedule();
1551 	} else {
1552 		/*
1553 		 * By the time we got the lock, our tracer went away.
1554 		 * Don't drop the lock yet, another tracer may come.
1555 		 */
1556 		__set_current_state(TASK_RUNNING);
1557 		if (clear_code)
1558 			current->exit_code = 0;
1559 		read_unlock(&tasklist_lock);
1560 	}
1561 
1562 	/*
1563 	 * While in TASK_TRACED, we were considered "frozen enough".
1564 	 * Now that we woke up, it's crucial if we're supposed to be
1565 	 * frozen that we freeze now before running anything substantial.
1566 	 */
1567 	try_to_freeze();
1568 
1569 	/*
1570 	 * We are back.  Now reacquire the siglock before touching
1571 	 * last_siginfo, so that we are sure to have synchronized with
1572 	 * any signal-sending on another CPU that wants to examine it.
1573 	 */
1574 	spin_lock_irq(&current->sighand->siglock);
1575 	current->last_siginfo = NULL;
1576 
1577 	/*
1578 	 * Queued signals ignored us while we were stopped for tracing.
1579 	 * So check for any that we should take before resuming user mode.
1580 	 * This sets TIF_SIGPENDING, but never clears it.
1581 	 */
1582 	recalc_sigpending_tsk(current);
1583 }
1584 
1585 void ptrace_notify(int exit_code)
1586 {
1587 	siginfo_t info;
1588 
1589 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1590 
1591 	memset(&info, 0, sizeof info);
1592 	info.si_signo = SIGTRAP;
1593 	info.si_code = exit_code;
1594 	info.si_pid = task_pid_vnr(current);
1595 	info.si_uid = current->uid;
1596 
1597 	/* Let the debugger run.  */
1598 	spin_lock_irq(&current->sighand->siglock);
1599 	ptrace_stop(exit_code, 1, &info);
1600 	spin_unlock_irq(&current->sighand->siglock);
1601 }
1602 
1603 static void
1604 finish_stop(int stop_count)
1605 {
1606 	/*
1607 	 * If there are no other threads in the group, or if there is
1608 	 * a group stop in progress and we are the last to stop,
1609 	 * report to the parent.  When ptraced, every thread reports itself.
1610 	 */
1611 	if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1612 		read_lock(&tasklist_lock);
1613 		do_notify_parent_cldstop(current, CLD_STOPPED);
1614 		read_unlock(&tasklist_lock);
1615 	}
1616 
1617 	do {
1618 		schedule();
1619 	} while (try_to_freeze());
1620 	/*
1621 	 * Now we don't run again until continued.
1622 	 */
1623 	current->exit_code = 0;
1624 }
1625 
1626 /*
1627  * This performs the stopping for SIGSTOP and other stop signals.
1628  * We have to stop all threads in the thread group.
1629  * Returns nonzero if we've actually stopped and released the siglock.
1630  * Returns zero if we didn't stop and still hold the siglock.
1631  */
1632 static int do_signal_stop(int signr)
1633 {
1634 	struct signal_struct *sig = current->signal;
1635 	int stop_count;
1636 
1637 	if (sig->group_stop_count > 0) {
1638 		/*
1639 		 * There is a group stop in progress.  We don't need to
1640 		 * start another one.
1641 		 */
1642 		stop_count = --sig->group_stop_count;
1643 	} else {
1644 		struct task_struct *t;
1645 
1646 		if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1647 		    unlikely(signal_group_exit(sig)))
1648 			return 0;
1649 		/*
1650 		 * There is no group stop already in progress.
1651 		 * We must initiate one now.
1652 		 */
1653 		sig->group_exit_code = signr;
1654 
1655 		stop_count = 0;
1656 		for (t = next_thread(current); t != current; t = next_thread(t))
1657 			/*
1658 			 * Setting state to TASK_STOPPED for a group
1659 			 * stop is always done with the siglock held,
1660 			 * so this check has no races.
1661 			 */
1662 			if (!(t->flags & PF_EXITING) &&
1663 			    !task_is_stopped_or_traced(t)) {
1664 				stop_count++;
1665 				signal_wake_up(t, 0);
1666 			}
1667 		sig->group_stop_count = stop_count;
1668 	}
1669 
1670 	if (stop_count == 0)
1671 		sig->flags = SIGNAL_STOP_STOPPED;
1672 	current->exit_code = sig->group_exit_code;
1673 	__set_current_state(TASK_STOPPED);
1674 
1675 	spin_unlock_irq(&current->sighand->siglock);
1676 	finish_stop(stop_count);
1677 	return 1;
1678 }
1679 
1680 static int ptrace_signal(int signr, siginfo_t *info,
1681 			 struct pt_regs *regs, void *cookie)
1682 {
1683 	if (!(current->ptrace & PT_PTRACED))
1684 		return signr;
1685 
1686 	ptrace_signal_deliver(regs, cookie);
1687 
1688 	/* Let the debugger run.  */
1689 	ptrace_stop(signr, 0, info);
1690 
1691 	/* We're back.  Did the debugger cancel the sig?  */
1692 	signr = current->exit_code;
1693 	if (signr == 0)
1694 		return signr;
1695 
1696 	current->exit_code = 0;
1697 
1698 	/* Update the siginfo structure if the signal has
1699 	   changed.  If the debugger wanted something
1700 	   specific in the siginfo structure then it should
1701 	   have updated *info via PTRACE_SETSIGINFO.  */
1702 	if (signr != info->si_signo) {
1703 		info->si_signo = signr;
1704 		info->si_errno = 0;
1705 		info->si_code = SI_USER;
1706 		info->si_pid = task_pid_vnr(current->parent);
1707 		info->si_uid = current->parent->uid;
1708 	}
1709 
1710 	/* If the (new) signal is now blocked, requeue it.  */
1711 	if (sigismember(&current->blocked, signr)) {
1712 		specific_send_sig_info(signr, info, current);
1713 		signr = 0;
1714 	}
1715 
1716 	return signr;
1717 }
1718 
1719 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1720 			  struct pt_regs *regs, void *cookie)
1721 {
1722 	struct sighand_struct *sighand = current->sighand;
1723 	struct signal_struct *signal = current->signal;
1724 	int signr;
1725 
1726 relock:
1727 	/*
1728 	 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1729 	 * While in TASK_STOPPED, we were considered "frozen enough".
1730 	 * Now that we woke up, it's crucial if we're supposed to be
1731 	 * frozen that we freeze now before running anything substantial.
1732 	 */
1733 	try_to_freeze();
1734 
1735 	spin_lock_irq(&sighand->siglock);
1736 	/*
1737 	 * Every stopped thread goes here after wakeup. Check to see if
1738 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
1739 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1740 	 */
1741 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1742 		int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1743 				? CLD_CONTINUED : CLD_STOPPED;
1744 		signal->flags &= ~SIGNAL_CLD_MASK;
1745 		spin_unlock_irq(&sighand->siglock);
1746 
1747 		if (unlikely(!tracehook_notify_jctl(1, why)))
1748 			goto relock;
1749 
1750 		read_lock(&tasklist_lock);
1751 		do_notify_parent_cldstop(current->group_leader, why);
1752 		read_unlock(&tasklist_lock);
1753 		goto relock;
1754 	}
1755 
1756 	for (;;) {
1757 		struct k_sigaction *ka;
1758 
1759 		if (unlikely(signal->group_stop_count > 0) &&
1760 		    do_signal_stop(0))
1761 			goto relock;
1762 
1763 		/*
1764 		 * Tracing can induce an artifical signal and choose sigaction.
1765 		 * The return value in @signr determines the default action,
1766 		 * but @info->si_signo is the signal number we will report.
1767 		 */
1768 		signr = tracehook_get_signal(current, regs, info, return_ka);
1769 		if (unlikely(signr < 0))
1770 			goto relock;
1771 		if (unlikely(signr != 0))
1772 			ka = return_ka;
1773 		else {
1774 			signr = dequeue_signal(current, &current->blocked,
1775 					       info);
1776 
1777 			if (!signr)
1778 				break; /* will return 0 */
1779 
1780 			if (signr != SIGKILL) {
1781 				signr = ptrace_signal(signr, info,
1782 						      regs, cookie);
1783 				if (!signr)
1784 					continue;
1785 			}
1786 
1787 			ka = &sighand->action[signr-1];
1788 		}
1789 
1790 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1791 			continue;
1792 		if (ka->sa.sa_handler != SIG_DFL) {
1793 			/* Run the handler.  */
1794 			*return_ka = *ka;
1795 
1796 			if (ka->sa.sa_flags & SA_ONESHOT)
1797 				ka->sa.sa_handler = SIG_DFL;
1798 
1799 			break; /* will return non-zero "signr" value */
1800 		}
1801 
1802 		/*
1803 		 * Now we are doing the default action for this signal.
1804 		 */
1805 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1806 			continue;
1807 
1808 		/*
1809 		 * Global init gets no signals it doesn't want.
1810 		 */
1811 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1812 		    !signal_group_exit(signal))
1813 			continue;
1814 
1815 		if (sig_kernel_stop(signr)) {
1816 			/*
1817 			 * The default action is to stop all threads in
1818 			 * the thread group.  The job control signals
1819 			 * do nothing in an orphaned pgrp, but SIGSTOP
1820 			 * always works.  Note that siglock needs to be
1821 			 * dropped during the call to is_orphaned_pgrp()
1822 			 * because of lock ordering with tasklist_lock.
1823 			 * This allows an intervening SIGCONT to be posted.
1824 			 * We need to check for that and bail out if necessary.
1825 			 */
1826 			if (signr != SIGSTOP) {
1827 				spin_unlock_irq(&sighand->siglock);
1828 
1829 				/* signals can be posted during this window */
1830 
1831 				if (is_current_pgrp_orphaned())
1832 					goto relock;
1833 
1834 				spin_lock_irq(&sighand->siglock);
1835 			}
1836 
1837 			if (likely(do_signal_stop(info->si_signo))) {
1838 				/* It released the siglock.  */
1839 				goto relock;
1840 			}
1841 
1842 			/*
1843 			 * We didn't actually stop, due to a race
1844 			 * with SIGCONT or something like that.
1845 			 */
1846 			continue;
1847 		}
1848 
1849 		spin_unlock_irq(&sighand->siglock);
1850 
1851 		/*
1852 		 * Anything else is fatal, maybe with a core dump.
1853 		 */
1854 		current->flags |= PF_SIGNALED;
1855 
1856 		if (sig_kernel_coredump(signr)) {
1857 			if (print_fatal_signals)
1858 				print_fatal_signal(regs, info->si_signo);
1859 			/*
1860 			 * If it was able to dump core, this kills all
1861 			 * other threads in the group and synchronizes with
1862 			 * their demise.  If we lost the race with another
1863 			 * thread getting here, it set group_exit_code
1864 			 * first and our do_group_exit call below will use
1865 			 * that value and ignore the one we pass it.
1866 			 */
1867 			do_coredump(info->si_signo, info->si_signo, regs);
1868 		}
1869 
1870 		/*
1871 		 * Death signals, no core dump.
1872 		 */
1873 		do_group_exit(info->si_signo);
1874 		/* NOTREACHED */
1875 	}
1876 	spin_unlock_irq(&sighand->siglock);
1877 	return signr;
1878 }
1879 
1880 void exit_signals(struct task_struct *tsk)
1881 {
1882 	int group_stop = 0;
1883 	struct task_struct *t;
1884 
1885 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1886 		tsk->flags |= PF_EXITING;
1887 		return;
1888 	}
1889 
1890 	spin_lock_irq(&tsk->sighand->siglock);
1891 	/*
1892 	 * From now this task is not visible for group-wide signals,
1893 	 * see wants_signal(), do_signal_stop().
1894 	 */
1895 	tsk->flags |= PF_EXITING;
1896 	if (!signal_pending(tsk))
1897 		goto out;
1898 
1899 	/* It could be that __group_complete_signal() choose us to
1900 	 * notify about group-wide signal. Another thread should be
1901 	 * woken now to take the signal since we will not.
1902 	 */
1903 	for (t = tsk; (t = next_thread(t)) != tsk; )
1904 		if (!signal_pending(t) && !(t->flags & PF_EXITING))
1905 			recalc_sigpending_and_wake(t);
1906 
1907 	if (unlikely(tsk->signal->group_stop_count) &&
1908 			!--tsk->signal->group_stop_count) {
1909 		tsk->signal->flags = SIGNAL_STOP_STOPPED;
1910 		group_stop = 1;
1911 	}
1912 out:
1913 	spin_unlock_irq(&tsk->sighand->siglock);
1914 
1915 	if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
1916 		read_lock(&tasklist_lock);
1917 		do_notify_parent_cldstop(tsk, CLD_STOPPED);
1918 		read_unlock(&tasklist_lock);
1919 	}
1920 }
1921 
1922 EXPORT_SYMBOL(recalc_sigpending);
1923 EXPORT_SYMBOL_GPL(dequeue_signal);
1924 EXPORT_SYMBOL(flush_signals);
1925 EXPORT_SYMBOL(force_sig);
1926 EXPORT_SYMBOL(send_sig);
1927 EXPORT_SYMBOL(send_sig_info);
1928 EXPORT_SYMBOL(sigprocmask);
1929 EXPORT_SYMBOL(block_all_signals);
1930 EXPORT_SYMBOL(unblock_all_signals);
1931 
1932 
1933 /*
1934  * System call entry points.
1935  */
1936 
1937 asmlinkage long sys_restart_syscall(void)
1938 {
1939 	struct restart_block *restart = &current_thread_info()->restart_block;
1940 	return restart->fn(restart);
1941 }
1942 
1943 long do_no_restart_syscall(struct restart_block *param)
1944 {
1945 	return -EINTR;
1946 }
1947 
1948 /*
1949  * We don't need to get the kernel lock - this is all local to this
1950  * particular thread.. (and that's good, because this is _heavily_
1951  * used by various programs)
1952  */
1953 
1954 /*
1955  * This is also useful for kernel threads that want to temporarily
1956  * (or permanently) block certain signals.
1957  *
1958  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1959  * interface happily blocks "unblockable" signals like SIGKILL
1960  * and friends.
1961  */
1962 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1963 {
1964 	int error;
1965 
1966 	spin_lock_irq(&current->sighand->siglock);
1967 	if (oldset)
1968 		*oldset = current->blocked;
1969 
1970 	error = 0;
1971 	switch (how) {
1972 	case SIG_BLOCK:
1973 		sigorsets(&current->blocked, &current->blocked, set);
1974 		break;
1975 	case SIG_UNBLOCK:
1976 		signandsets(&current->blocked, &current->blocked, set);
1977 		break;
1978 	case SIG_SETMASK:
1979 		current->blocked = *set;
1980 		break;
1981 	default:
1982 		error = -EINVAL;
1983 	}
1984 	recalc_sigpending();
1985 	spin_unlock_irq(&current->sighand->siglock);
1986 
1987 	return error;
1988 }
1989 
1990 asmlinkage long
1991 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1992 {
1993 	int error = -EINVAL;
1994 	sigset_t old_set, new_set;
1995 
1996 	/* XXX: Don't preclude handling different sized sigset_t's.  */
1997 	if (sigsetsize != sizeof(sigset_t))
1998 		goto out;
1999 
2000 	if (set) {
2001 		error = -EFAULT;
2002 		if (copy_from_user(&new_set, set, sizeof(*set)))
2003 			goto out;
2004 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2005 
2006 		error = sigprocmask(how, &new_set, &old_set);
2007 		if (error)
2008 			goto out;
2009 		if (oset)
2010 			goto set_old;
2011 	} else if (oset) {
2012 		spin_lock_irq(&current->sighand->siglock);
2013 		old_set = current->blocked;
2014 		spin_unlock_irq(&current->sighand->siglock);
2015 
2016 	set_old:
2017 		error = -EFAULT;
2018 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2019 			goto out;
2020 	}
2021 	error = 0;
2022 out:
2023 	return error;
2024 }
2025 
2026 long do_sigpending(void __user *set, unsigned long sigsetsize)
2027 {
2028 	long error = -EINVAL;
2029 	sigset_t pending;
2030 
2031 	if (sigsetsize > sizeof(sigset_t))
2032 		goto out;
2033 
2034 	spin_lock_irq(&current->sighand->siglock);
2035 	sigorsets(&pending, &current->pending.signal,
2036 		  &current->signal->shared_pending.signal);
2037 	spin_unlock_irq(&current->sighand->siglock);
2038 
2039 	/* Outside the lock because only this thread touches it.  */
2040 	sigandsets(&pending, &current->blocked, &pending);
2041 
2042 	error = -EFAULT;
2043 	if (!copy_to_user(set, &pending, sigsetsize))
2044 		error = 0;
2045 
2046 out:
2047 	return error;
2048 }
2049 
2050 asmlinkage long
2051 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2052 {
2053 	return do_sigpending(set, sigsetsize);
2054 }
2055 
2056 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2057 
2058 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2059 {
2060 	int err;
2061 
2062 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2063 		return -EFAULT;
2064 	if (from->si_code < 0)
2065 		return __copy_to_user(to, from, sizeof(siginfo_t))
2066 			? -EFAULT : 0;
2067 	/*
2068 	 * If you change siginfo_t structure, please be sure
2069 	 * this code is fixed accordingly.
2070 	 * Please remember to update the signalfd_copyinfo() function
2071 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2072 	 * It should never copy any pad contained in the structure
2073 	 * to avoid security leaks, but must copy the generic
2074 	 * 3 ints plus the relevant union member.
2075 	 */
2076 	err = __put_user(from->si_signo, &to->si_signo);
2077 	err |= __put_user(from->si_errno, &to->si_errno);
2078 	err |= __put_user((short)from->si_code, &to->si_code);
2079 	switch (from->si_code & __SI_MASK) {
2080 	case __SI_KILL:
2081 		err |= __put_user(from->si_pid, &to->si_pid);
2082 		err |= __put_user(from->si_uid, &to->si_uid);
2083 		break;
2084 	case __SI_TIMER:
2085 		 err |= __put_user(from->si_tid, &to->si_tid);
2086 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2087 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2088 		break;
2089 	case __SI_POLL:
2090 		err |= __put_user(from->si_band, &to->si_band);
2091 		err |= __put_user(from->si_fd, &to->si_fd);
2092 		break;
2093 	case __SI_FAULT:
2094 		err |= __put_user(from->si_addr, &to->si_addr);
2095 #ifdef __ARCH_SI_TRAPNO
2096 		err |= __put_user(from->si_trapno, &to->si_trapno);
2097 #endif
2098 		break;
2099 	case __SI_CHLD:
2100 		err |= __put_user(from->si_pid, &to->si_pid);
2101 		err |= __put_user(from->si_uid, &to->si_uid);
2102 		err |= __put_user(from->si_status, &to->si_status);
2103 		err |= __put_user(from->si_utime, &to->si_utime);
2104 		err |= __put_user(from->si_stime, &to->si_stime);
2105 		break;
2106 	case __SI_RT: /* This is not generated by the kernel as of now. */
2107 	case __SI_MESGQ: /* But this is */
2108 		err |= __put_user(from->si_pid, &to->si_pid);
2109 		err |= __put_user(from->si_uid, &to->si_uid);
2110 		err |= __put_user(from->si_ptr, &to->si_ptr);
2111 		break;
2112 	default: /* this is just in case for now ... */
2113 		err |= __put_user(from->si_pid, &to->si_pid);
2114 		err |= __put_user(from->si_uid, &to->si_uid);
2115 		break;
2116 	}
2117 	return err;
2118 }
2119 
2120 #endif
2121 
2122 asmlinkage long
2123 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2124 		    siginfo_t __user *uinfo,
2125 		    const struct timespec __user *uts,
2126 		    size_t sigsetsize)
2127 {
2128 	int ret, sig;
2129 	sigset_t these;
2130 	struct timespec ts;
2131 	siginfo_t info;
2132 	long timeout = 0;
2133 
2134 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2135 	if (sigsetsize != sizeof(sigset_t))
2136 		return -EINVAL;
2137 
2138 	if (copy_from_user(&these, uthese, sizeof(these)))
2139 		return -EFAULT;
2140 
2141 	/*
2142 	 * Invert the set of allowed signals to get those we
2143 	 * want to block.
2144 	 */
2145 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2146 	signotset(&these);
2147 
2148 	if (uts) {
2149 		if (copy_from_user(&ts, uts, sizeof(ts)))
2150 			return -EFAULT;
2151 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2152 		    || ts.tv_sec < 0)
2153 			return -EINVAL;
2154 	}
2155 
2156 	spin_lock_irq(&current->sighand->siglock);
2157 	sig = dequeue_signal(current, &these, &info);
2158 	if (!sig) {
2159 		timeout = MAX_SCHEDULE_TIMEOUT;
2160 		if (uts)
2161 			timeout = (timespec_to_jiffies(&ts)
2162 				   + (ts.tv_sec || ts.tv_nsec));
2163 
2164 		if (timeout) {
2165 			/* None ready -- temporarily unblock those we're
2166 			 * interested while we are sleeping in so that we'll
2167 			 * be awakened when they arrive.  */
2168 			current->real_blocked = current->blocked;
2169 			sigandsets(&current->blocked, &current->blocked, &these);
2170 			recalc_sigpending();
2171 			spin_unlock_irq(&current->sighand->siglock);
2172 
2173 			timeout = schedule_timeout_interruptible(timeout);
2174 
2175 			spin_lock_irq(&current->sighand->siglock);
2176 			sig = dequeue_signal(current, &these, &info);
2177 			current->blocked = current->real_blocked;
2178 			siginitset(&current->real_blocked, 0);
2179 			recalc_sigpending();
2180 		}
2181 	}
2182 	spin_unlock_irq(&current->sighand->siglock);
2183 
2184 	if (sig) {
2185 		ret = sig;
2186 		if (uinfo) {
2187 			if (copy_siginfo_to_user(uinfo, &info))
2188 				ret = -EFAULT;
2189 		}
2190 	} else {
2191 		ret = -EAGAIN;
2192 		if (timeout)
2193 			ret = -EINTR;
2194 	}
2195 
2196 	return ret;
2197 }
2198 
2199 asmlinkage long
2200 sys_kill(pid_t pid, int sig)
2201 {
2202 	struct siginfo info;
2203 
2204 	info.si_signo = sig;
2205 	info.si_errno = 0;
2206 	info.si_code = SI_USER;
2207 	info.si_pid = task_tgid_vnr(current);
2208 	info.si_uid = current->uid;
2209 
2210 	return kill_something_info(sig, &info, pid);
2211 }
2212 
2213 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2214 {
2215 	int error;
2216 	struct siginfo info;
2217 	struct task_struct *p;
2218 	unsigned long flags;
2219 
2220 	error = -ESRCH;
2221 	info.si_signo = sig;
2222 	info.si_errno = 0;
2223 	info.si_code = SI_TKILL;
2224 	info.si_pid = task_tgid_vnr(current);
2225 	info.si_uid = current->uid;
2226 
2227 	rcu_read_lock();
2228 	p = find_task_by_vpid(pid);
2229 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2230 		error = check_kill_permission(sig, &info, p);
2231 		/*
2232 		 * The null signal is a permissions and process existence
2233 		 * probe.  No signal is actually delivered.
2234 		 *
2235 		 * If lock_task_sighand() fails we pretend the task dies
2236 		 * after receiving the signal. The window is tiny, and the
2237 		 * signal is private anyway.
2238 		 */
2239 		if (!error && sig && lock_task_sighand(p, &flags)) {
2240 			error = specific_send_sig_info(sig, &info, p);
2241 			unlock_task_sighand(p, &flags);
2242 		}
2243 	}
2244 	rcu_read_unlock();
2245 
2246 	return error;
2247 }
2248 
2249 /**
2250  *  sys_tgkill - send signal to one specific thread
2251  *  @tgid: the thread group ID of the thread
2252  *  @pid: the PID of the thread
2253  *  @sig: signal to be sent
2254  *
2255  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2256  *  exists but it's not belonging to the target process anymore. This
2257  *  method solves the problem of threads exiting and PIDs getting reused.
2258  */
2259 asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
2260 {
2261 	/* This is only valid for single tasks */
2262 	if (pid <= 0 || tgid <= 0)
2263 		return -EINVAL;
2264 
2265 	return do_tkill(tgid, pid, sig);
2266 }
2267 
2268 /*
2269  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2270  */
2271 asmlinkage long
2272 sys_tkill(pid_t pid, int sig)
2273 {
2274 	/* This is only valid for single tasks */
2275 	if (pid <= 0)
2276 		return -EINVAL;
2277 
2278 	return do_tkill(0, pid, sig);
2279 }
2280 
2281 asmlinkage long
2282 sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo)
2283 {
2284 	siginfo_t info;
2285 
2286 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2287 		return -EFAULT;
2288 
2289 	/* Not even root can pretend to send signals from the kernel.
2290 	   Nor can they impersonate a kill(), which adds source info.  */
2291 	if (info.si_code >= 0)
2292 		return -EPERM;
2293 	info.si_signo = sig;
2294 
2295 	/* POSIX.1b doesn't mention process groups.  */
2296 	return kill_proc_info(sig, &info, pid);
2297 }
2298 
2299 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2300 {
2301 	struct task_struct *t = current;
2302 	struct k_sigaction *k;
2303 	sigset_t mask;
2304 
2305 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2306 		return -EINVAL;
2307 
2308 	k = &t->sighand->action[sig-1];
2309 
2310 	spin_lock_irq(&current->sighand->siglock);
2311 	if (oact)
2312 		*oact = *k;
2313 
2314 	if (act) {
2315 		sigdelsetmask(&act->sa.sa_mask,
2316 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2317 		*k = *act;
2318 		/*
2319 		 * POSIX 3.3.1.3:
2320 		 *  "Setting a signal action to SIG_IGN for a signal that is
2321 		 *   pending shall cause the pending signal to be discarded,
2322 		 *   whether or not it is blocked."
2323 		 *
2324 		 *  "Setting a signal action to SIG_DFL for a signal that is
2325 		 *   pending and whose default action is to ignore the signal
2326 		 *   (for example, SIGCHLD), shall cause the pending signal to
2327 		 *   be discarded, whether or not it is blocked"
2328 		 */
2329 		if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2330 			sigemptyset(&mask);
2331 			sigaddset(&mask, sig);
2332 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2333 			do {
2334 				rm_from_queue_full(&mask, &t->pending);
2335 				t = next_thread(t);
2336 			} while (t != current);
2337 		}
2338 	}
2339 
2340 	spin_unlock_irq(&current->sighand->siglock);
2341 	return 0;
2342 }
2343 
2344 int
2345 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2346 {
2347 	stack_t oss;
2348 	int error;
2349 
2350 	if (uoss) {
2351 		oss.ss_sp = (void __user *) current->sas_ss_sp;
2352 		oss.ss_size = current->sas_ss_size;
2353 		oss.ss_flags = sas_ss_flags(sp);
2354 	}
2355 
2356 	if (uss) {
2357 		void __user *ss_sp;
2358 		size_t ss_size;
2359 		int ss_flags;
2360 
2361 		error = -EFAULT;
2362 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2363 		    || __get_user(ss_sp, &uss->ss_sp)
2364 		    || __get_user(ss_flags, &uss->ss_flags)
2365 		    || __get_user(ss_size, &uss->ss_size))
2366 			goto out;
2367 
2368 		error = -EPERM;
2369 		if (on_sig_stack(sp))
2370 			goto out;
2371 
2372 		error = -EINVAL;
2373 		/*
2374 		 *
2375 		 * Note - this code used to test ss_flags incorrectly
2376 		 *  	  old code may have been written using ss_flags==0
2377 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2378 		 *	  way that worked) - this fix preserves that older
2379 		 *	  mechanism
2380 		 */
2381 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2382 			goto out;
2383 
2384 		if (ss_flags == SS_DISABLE) {
2385 			ss_size = 0;
2386 			ss_sp = NULL;
2387 		} else {
2388 			error = -ENOMEM;
2389 			if (ss_size < MINSIGSTKSZ)
2390 				goto out;
2391 		}
2392 
2393 		current->sas_ss_sp = (unsigned long) ss_sp;
2394 		current->sas_ss_size = ss_size;
2395 	}
2396 
2397 	if (uoss) {
2398 		error = -EFAULT;
2399 		if (copy_to_user(uoss, &oss, sizeof(oss)))
2400 			goto out;
2401 	}
2402 
2403 	error = 0;
2404 out:
2405 	return error;
2406 }
2407 
2408 #ifdef __ARCH_WANT_SYS_SIGPENDING
2409 
2410 asmlinkage long
2411 sys_sigpending(old_sigset_t __user *set)
2412 {
2413 	return do_sigpending(set, sizeof(*set));
2414 }
2415 
2416 #endif
2417 
2418 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2419 /* Some platforms have their own version with special arguments others
2420    support only sys_rt_sigprocmask.  */
2421 
2422 asmlinkage long
2423 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2424 {
2425 	int error;
2426 	old_sigset_t old_set, new_set;
2427 
2428 	if (set) {
2429 		error = -EFAULT;
2430 		if (copy_from_user(&new_set, set, sizeof(*set)))
2431 			goto out;
2432 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2433 
2434 		spin_lock_irq(&current->sighand->siglock);
2435 		old_set = current->blocked.sig[0];
2436 
2437 		error = 0;
2438 		switch (how) {
2439 		default:
2440 			error = -EINVAL;
2441 			break;
2442 		case SIG_BLOCK:
2443 			sigaddsetmask(&current->blocked, new_set);
2444 			break;
2445 		case SIG_UNBLOCK:
2446 			sigdelsetmask(&current->blocked, new_set);
2447 			break;
2448 		case SIG_SETMASK:
2449 			current->blocked.sig[0] = new_set;
2450 			break;
2451 		}
2452 
2453 		recalc_sigpending();
2454 		spin_unlock_irq(&current->sighand->siglock);
2455 		if (error)
2456 			goto out;
2457 		if (oset)
2458 			goto set_old;
2459 	} else if (oset) {
2460 		old_set = current->blocked.sig[0];
2461 	set_old:
2462 		error = -EFAULT;
2463 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2464 			goto out;
2465 	}
2466 	error = 0;
2467 out:
2468 	return error;
2469 }
2470 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2471 
2472 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2473 asmlinkage long
2474 sys_rt_sigaction(int sig,
2475 		 const struct sigaction __user *act,
2476 		 struct sigaction __user *oact,
2477 		 size_t sigsetsize)
2478 {
2479 	struct k_sigaction new_sa, old_sa;
2480 	int ret = -EINVAL;
2481 
2482 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2483 	if (sigsetsize != sizeof(sigset_t))
2484 		goto out;
2485 
2486 	if (act) {
2487 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2488 			return -EFAULT;
2489 	}
2490 
2491 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2492 
2493 	if (!ret && oact) {
2494 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2495 			return -EFAULT;
2496 	}
2497 out:
2498 	return ret;
2499 }
2500 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2501 
2502 #ifdef __ARCH_WANT_SYS_SGETMASK
2503 
2504 /*
2505  * For backwards compatibility.  Functionality superseded by sigprocmask.
2506  */
2507 asmlinkage long
2508 sys_sgetmask(void)
2509 {
2510 	/* SMP safe */
2511 	return current->blocked.sig[0];
2512 }
2513 
2514 asmlinkage long
2515 sys_ssetmask(int newmask)
2516 {
2517 	int old;
2518 
2519 	spin_lock_irq(&current->sighand->siglock);
2520 	old = current->blocked.sig[0];
2521 
2522 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2523 						  sigmask(SIGSTOP)));
2524 	recalc_sigpending();
2525 	spin_unlock_irq(&current->sighand->siglock);
2526 
2527 	return old;
2528 }
2529 #endif /* __ARCH_WANT_SGETMASK */
2530 
2531 #ifdef __ARCH_WANT_SYS_SIGNAL
2532 /*
2533  * For backwards compatibility.  Functionality superseded by sigaction.
2534  */
2535 asmlinkage unsigned long
2536 sys_signal(int sig, __sighandler_t handler)
2537 {
2538 	struct k_sigaction new_sa, old_sa;
2539 	int ret;
2540 
2541 	new_sa.sa.sa_handler = handler;
2542 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2543 	sigemptyset(&new_sa.sa.sa_mask);
2544 
2545 	ret = do_sigaction(sig, &new_sa, &old_sa);
2546 
2547 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2548 }
2549 #endif /* __ARCH_WANT_SYS_SIGNAL */
2550 
2551 #ifdef __ARCH_WANT_SYS_PAUSE
2552 
2553 asmlinkage long
2554 sys_pause(void)
2555 {
2556 	current->state = TASK_INTERRUPTIBLE;
2557 	schedule();
2558 	return -ERESTARTNOHAND;
2559 }
2560 
2561 #endif
2562 
2563 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2564 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2565 {
2566 	sigset_t newset;
2567 
2568 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2569 	if (sigsetsize != sizeof(sigset_t))
2570 		return -EINVAL;
2571 
2572 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2573 		return -EFAULT;
2574 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2575 
2576 	spin_lock_irq(&current->sighand->siglock);
2577 	current->saved_sigmask = current->blocked;
2578 	current->blocked = newset;
2579 	recalc_sigpending();
2580 	spin_unlock_irq(&current->sighand->siglock);
2581 
2582 	current->state = TASK_INTERRUPTIBLE;
2583 	schedule();
2584 	set_restore_sigmask();
2585 	return -ERESTARTNOHAND;
2586 }
2587 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2588 
2589 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2590 {
2591 	return NULL;
2592 }
2593 
2594 void __init signals_init(void)
2595 {
2596 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2597 }
2598