xref: /linux/kernel/signal.c (revision 3252b11fc4790d046b93f300c898df2f7cd7c176)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/ratelimit.h>
26 #include <linux/tracehook.h>
27 #include <linux/capability.h>
28 #include <linux/freezer.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/nsproxy.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/signal.h>
33 
34 #include <asm/param.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/siginfo.h>
38 #include "audit.h"	/* audit_signal_info() */
39 
40 /*
41  * SLAB caches for signal bits.
42  */
43 
44 static struct kmem_cache *sigqueue_cachep;
45 
46 int print_fatal_signals __read_mostly;
47 
48 static void __user *sig_handler(struct task_struct *t, int sig)
49 {
50 	return t->sighand->action[sig - 1].sa.sa_handler;
51 }
52 
53 static int sig_handler_ignored(void __user *handler, int sig)
54 {
55 	/* Is it explicitly or implicitly ignored? */
56 	return handler == SIG_IGN ||
57 		(handler == SIG_DFL && sig_kernel_ignore(sig));
58 }
59 
60 static int sig_task_ignored(struct task_struct *t, int sig,
61 		int from_ancestor_ns)
62 {
63 	void __user *handler;
64 
65 	handler = sig_handler(t, sig);
66 
67 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
68 			handler == SIG_DFL && !from_ancestor_ns)
69 		return 1;
70 
71 	return sig_handler_ignored(handler, sig);
72 }
73 
74 static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
75 {
76 	/*
77 	 * Blocked signals are never ignored, since the
78 	 * signal handler may change by the time it is
79 	 * unblocked.
80 	 */
81 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
82 		return 0;
83 
84 	if (!sig_task_ignored(t, sig, from_ancestor_ns))
85 		return 0;
86 
87 	/*
88 	 * Tracers may want to know about even ignored signals.
89 	 */
90 	return !tracehook_consider_ignored_signal(t, sig);
91 }
92 
93 /*
94  * Re-calculate pending state from the set of locally pending
95  * signals, globally pending signals, and blocked signals.
96  */
97 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
98 {
99 	unsigned long ready;
100 	long i;
101 
102 	switch (_NSIG_WORDS) {
103 	default:
104 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105 			ready |= signal->sig[i] &~ blocked->sig[i];
106 		break;
107 
108 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
109 		ready |= signal->sig[2] &~ blocked->sig[2];
110 		ready |= signal->sig[1] &~ blocked->sig[1];
111 		ready |= signal->sig[0] &~ blocked->sig[0];
112 		break;
113 
114 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
115 		ready |= signal->sig[0] &~ blocked->sig[0];
116 		break;
117 
118 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
119 	}
120 	return ready !=	0;
121 }
122 
123 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
124 
125 static int recalc_sigpending_tsk(struct task_struct *t)
126 {
127 	if (t->signal->group_stop_count > 0 ||
128 	    PENDING(&t->pending, &t->blocked) ||
129 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
130 		set_tsk_thread_flag(t, TIF_SIGPENDING);
131 		return 1;
132 	}
133 	/*
134 	 * We must never clear the flag in another thread, or in current
135 	 * when it's possible the current syscall is returning -ERESTART*.
136 	 * So we don't clear it here, and only callers who know they should do.
137 	 */
138 	return 0;
139 }
140 
141 /*
142  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143  * This is superfluous when called on current, the wakeup is a harmless no-op.
144  */
145 void recalc_sigpending_and_wake(struct task_struct *t)
146 {
147 	if (recalc_sigpending_tsk(t))
148 		signal_wake_up(t, 0);
149 }
150 
151 void recalc_sigpending(void)
152 {
153 	if (unlikely(tracehook_force_sigpending()))
154 		set_thread_flag(TIF_SIGPENDING);
155 	else if (!recalc_sigpending_tsk(current) && !freezing(current))
156 		clear_thread_flag(TIF_SIGPENDING);
157 
158 }
159 
160 /* Given the mask, find the first available signal that should be serviced. */
161 
162 int next_signal(struct sigpending *pending, sigset_t *mask)
163 {
164 	unsigned long i, *s, *m, x;
165 	int sig = 0;
166 
167 	s = pending->signal.sig;
168 	m = mask->sig;
169 	switch (_NSIG_WORDS) {
170 	default:
171 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
172 			if ((x = *s &~ *m) != 0) {
173 				sig = ffz(~x) + i*_NSIG_BPW + 1;
174 				break;
175 			}
176 		break;
177 
178 	case 2: if ((x = s[0] &~ m[0]) != 0)
179 			sig = 1;
180 		else if ((x = s[1] &~ m[1]) != 0)
181 			sig = _NSIG_BPW + 1;
182 		else
183 			break;
184 		sig += ffz(~x);
185 		break;
186 
187 	case 1: if ((x = *s &~ *m) != 0)
188 			sig = ffz(~x) + 1;
189 		break;
190 	}
191 
192 	return sig;
193 }
194 
195 static inline void print_dropped_signal(int sig)
196 {
197 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
198 
199 	if (!print_fatal_signals)
200 		return;
201 
202 	if (!__ratelimit(&ratelimit_state))
203 		return;
204 
205 	printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
206 				current->comm, current->pid, sig);
207 }
208 
209 /*
210  * allocate a new signal queue record
211  * - this may be called without locks if and only if t == current, otherwise an
212  *   appopriate lock must be held to stop the target task from exiting
213  */
214 static struct sigqueue *
215 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
216 {
217 	struct sigqueue *q = NULL;
218 	struct user_struct *user;
219 
220 	/*
221 	 * We won't get problems with the target's UID changing under us
222 	 * because changing it requires RCU be used, and if t != current, the
223 	 * caller must be holding the RCU readlock (by way of a spinlock) and
224 	 * we use RCU protection here
225 	 */
226 	user = get_uid(__task_cred(t)->user);
227 	atomic_inc(&user->sigpending);
228 
229 	if (override_rlimit ||
230 	    atomic_read(&user->sigpending) <=
231 			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) {
232 		q = kmem_cache_alloc(sigqueue_cachep, flags);
233 	} else {
234 		print_dropped_signal(sig);
235 	}
236 
237 	if (unlikely(q == NULL)) {
238 		atomic_dec(&user->sigpending);
239 		free_uid(user);
240 	} else {
241 		INIT_LIST_HEAD(&q->list);
242 		q->flags = 0;
243 		q->user = user;
244 	}
245 
246 	return q;
247 }
248 
249 static void __sigqueue_free(struct sigqueue *q)
250 {
251 	if (q->flags & SIGQUEUE_PREALLOC)
252 		return;
253 	atomic_dec(&q->user->sigpending);
254 	free_uid(q->user);
255 	kmem_cache_free(sigqueue_cachep, q);
256 }
257 
258 void flush_sigqueue(struct sigpending *queue)
259 {
260 	struct sigqueue *q;
261 
262 	sigemptyset(&queue->signal);
263 	while (!list_empty(&queue->list)) {
264 		q = list_entry(queue->list.next, struct sigqueue , list);
265 		list_del_init(&q->list);
266 		__sigqueue_free(q);
267 	}
268 }
269 
270 /*
271  * Flush all pending signals for a task.
272  */
273 void __flush_signals(struct task_struct *t)
274 {
275 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
276 	flush_sigqueue(&t->pending);
277 	flush_sigqueue(&t->signal->shared_pending);
278 }
279 
280 void flush_signals(struct task_struct *t)
281 {
282 	unsigned long flags;
283 
284 	spin_lock_irqsave(&t->sighand->siglock, flags);
285 	__flush_signals(t);
286 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
287 }
288 
289 static void __flush_itimer_signals(struct sigpending *pending)
290 {
291 	sigset_t signal, retain;
292 	struct sigqueue *q, *n;
293 
294 	signal = pending->signal;
295 	sigemptyset(&retain);
296 
297 	list_for_each_entry_safe(q, n, &pending->list, list) {
298 		int sig = q->info.si_signo;
299 
300 		if (likely(q->info.si_code != SI_TIMER)) {
301 			sigaddset(&retain, sig);
302 		} else {
303 			sigdelset(&signal, sig);
304 			list_del_init(&q->list);
305 			__sigqueue_free(q);
306 		}
307 	}
308 
309 	sigorsets(&pending->signal, &signal, &retain);
310 }
311 
312 void flush_itimer_signals(void)
313 {
314 	struct task_struct *tsk = current;
315 	unsigned long flags;
316 
317 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
318 	__flush_itimer_signals(&tsk->pending);
319 	__flush_itimer_signals(&tsk->signal->shared_pending);
320 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
321 }
322 
323 void ignore_signals(struct task_struct *t)
324 {
325 	int i;
326 
327 	for (i = 0; i < _NSIG; ++i)
328 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
329 
330 	flush_signals(t);
331 }
332 
333 /*
334  * Flush all handlers for a task.
335  */
336 
337 void
338 flush_signal_handlers(struct task_struct *t, int force_default)
339 {
340 	int i;
341 	struct k_sigaction *ka = &t->sighand->action[0];
342 	for (i = _NSIG ; i != 0 ; i--) {
343 		if (force_default || ka->sa.sa_handler != SIG_IGN)
344 			ka->sa.sa_handler = SIG_DFL;
345 		ka->sa.sa_flags = 0;
346 		sigemptyset(&ka->sa.sa_mask);
347 		ka++;
348 	}
349 }
350 
351 int unhandled_signal(struct task_struct *tsk, int sig)
352 {
353 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
354 	if (is_global_init(tsk))
355 		return 1;
356 	if (handler != SIG_IGN && handler != SIG_DFL)
357 		return 0;
358 	return !tracehook_consider_fatal_signal(tsk, sig);
359 }
360 
361 
362 /* Notify the system that a driver wants to block all signals for this
363  * process, and wants to be notified if any signals at all were to be
364  * sent/acted upon.  If the notifier routine returns non-zero, then the
365  * signal will be acted upon after all.  If the notifier routine returns 0,
366  * then then signal will be blocked.  Only one block per process is
367  * allowed.  priv is a pointer to private data that the notifier routine
368  * can use to determine if the signal should be blocked or not.  */
369 
370 void
371 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
372 {
373 	unsigned long flags;
374 
375 	spin_lock_irqsave(&current->sighand->siglock, flags);
376 	current->notifier_mask = mask;
377 	current->notifier_data = priv;
378 	current->notifier = notifier;
379 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
380 }
381 
382 /* Notify the system that blocking has ended. */
383 
384 void
385 unblock_all_signals(void)
386 {
387 	unsigned long flags;
388 
389 	spin_lock_irqsave(&current->sighand->siglock, flags);
390 	current->notifier = NULL;
391 	current->notifier_data = NULL;
392 	recalc_sigpending();
393 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
394 }
395 
396 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
397 {
398 	struct sigqueue *q, *first = NULL;
399 
400 	/*
401 	 * Collect the siginfo appropriate to this signal.  Check if
402 	 * there is another siginfo for the same signal.
403 	*/
404 	list_for_each_entry(q, &list->list, list) {
405 		if (q->info.si_signo == sig) {
406 			if (first)
407 				goto still_pending;
408 			first = q;
409 		}
410 	}
411 
412 	sigdelset(&list->signal, sig);
413 
414 	if (first) {
415 still_pending:
416 		list_del_init(&first->list);
417 		copy_siginfo(info, &first->info);
418 		__sigqueue_free(first);
419 	} else {
420 		/* Ok, it wasn't in the queue.  This must be
421 		   a fast-pathed signal or we must have been
422 		   out of queue space.  So zero out the info.
423 		 */
424 		info->si_signo = sig;
425 		info->si_errno = 0;
426 		info->si_code = 0;
427 		info->si_pid = 0;
428 		info->si_uid = 0;
429 	}
430 }
431 
432 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
433 			siginfo_t *info)
434 {
435 	int sig = next_signal(pending, mask);
436 
437 	if (sig) {
438 		if (current->notifier) {
439 			if (sigismember(current->notifier_mask, sig)) {
440 				if (!(current->notifier)(current->notifier_data)) {
441 					clear_thread_flag(TIF_SIGPENDING);
442 					return 0;
443 				}
444 			}
445 		}
446 
447 		collect_signal(sig, pending, info);
448 	}
449 
450 	return sig;
451 }
452 
453 /*
454  * Dequeue a signal and return the element to the caller, which is
455  * expected to free it.
456  *
457  * All callers have to hold the siglock.
458  */
459 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
460 {
461 	int signr;
462 
463 	/* We only dequeue private signals from ourselves, we don't let
464 	 * signalfd steal them
465 	 */
466 	signr = __dequeue_signal(&tsk->pending, mask, info);
467 	if (!signr) {
468 		signr = __dequeue_signal(&tsk->signal->shared_pending,
469 					 mask, info);
470 		/*
471 		 * itimer signal ?
472 		 *
473 		 * itimers are process shared and we restart periodic
474 		 * itimers in the signal delivery path to prevent DoS
475 		 * attacks in the high resolution timer case. This is
476 		 * compliant with the old way of self restarting
477 		 * itimers, as the SIGALRM is a legacy signal and only
478 		 * queued once. Changing the restart behaviour to
479 		 * restart the timer in the signal dequeue path is
480 		 * reducing the timer noise on heavy loaded !highres
481 		 * systems too.
482 		 */
483 		if (unlikely(signr == SIGALRM)) {
484 			struct hrtimer *tmr = &tsk->signal->real_timer;
485 
486 			if (!hrtimer_is_queued(tmr) &&
487 			    tsk->signal->it_real_incr.tv64 != 0) {
488 				hrtimer_forward(tmr, tmr->base->get_time(),
489 						tsk->signal->it_real_incr);
490 				hrtimer_restart(tmr);
491 			}
492 		}
493 	}
494 
495 	recalc_sigpending();
496 	if (!signr)
497 		return 0;
498 
499 	if (unlikely(sig_kernel_stop(signr))) {
500 		/*
501 		 * Set a marker that we have dequeued a stop signal.  Our
502 		 * caller might release the siglock and then the pending
503 		 * stop signal it is about to process is no longer in the
504 		 * pending bitmasks, but must still be cleared by a SIGCONT
505 		 * (and overruled by a SIGKILL).  So those cases clear this
506 		 * shared flag after we've set it.  Note that this flag may
507 		 * remain set after the signal we return is ignored or
508 		 * handled.  That doesn't matter because its only purpose
509 		 * is to alert stop-signal processing code when another
510 		 * processor has come along and cleared the flag.
511 		 */
512 		tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
513 	}
514 	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
515 		/*
516 		 * Release the siglock to ensure proper locking order
517 		 * of timer locks outside of siglocks.  Note, we leave
518 		 * irqs disabled here, since the posix-timers code is
519 		 * about to disable them again anyway.
520 		 */
521 		spin_unlock(&tsk->sighand->siglock);
522 		do_schedule_next_timer(info);
523 		spin_lock(&tsk->sighand->siglock);
524 	}
525 	return signr;
526 }
527 
528 /*
529  * Tell a process that it has a new active signal..
530  *
531  * NOTE! we rely on the previous spin_lock to
532  * lock interrupts for us! We can only be called with
533  * "siglock" held, and the local interrupt must
534  * have been disabled when that got acquired!
535  *
536  * No need to set need_resched since signal event passing
537  * goes through ->blocked
538  */
539 void signal_wake_up(struct task_struct *t, int resume)
540 {
541 	unsigned int mask;
542 
543 	set_tsk_thread_flag(t, TIF_SIGPENDING);
544 
545 	/*
546 	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
547 	 * case. We don't check t->state here because there is a race with it
548 	 * executing another processor and just now entering stopped state.
549 	 * By using wake_up_state, we ensure the process will wake up and
550 	 * handle its death signal.
551 	 */
552 	mask = TASK_INTERRUPTIBLE;
553 	if (resume)
554 		mask |= TASK_WAKEKILL;
555 	if (!wake_up_state(t, mask))
556 		kick_process(t);
557 }
558 
559 /*
560  * Remove signals in mask from the pending set and queue.
561  * Returns 1 if any signals were found.
562  *
563  * All callers must be holding the siglock.
564  *
565  * This version takes a sigset mask and looks at all signals,
566  * not just those in the first mask word.
567  */
568 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
569 {
570 	struct sigqueue *q, *n;
571 	sigset_t m;
572 
573 	sigandsets(&m, mask, &s->signal);
574 	if (sigisemptyset(&m))
575 		return 0;
576 
577 	signandsets(&s->signal, &s->signal, mask);
578 	list_for_each_entry_safe(q, n, &s->list, list) {
579 		if (sigismember(mask, q->info.si_signo)) {
580 			list_del_init(&q->list);
581 			__sigqueue_free(q);
582 		}
583 	}
584 	return 1;
585 }
586 /*
587  * Remove signals in mask from the pending set and queue.
588  * Returns 1 if any signals were found.
589  *
590  * All callers must be holding the siglock.
591  */
592 static int rm_from_queue(unsigned long mask, struct sigpending *s)
593 {
594 	struct sigqueue *q, *n;
595 
596 	if (!sigtestsetmask(&s->signal, mask))
597 		return 0;
598 
599 	sigdelsetmask(&s->signal, mask);
600 	list_for_each_entry_safe(q, n, &s->list, list) {
601 		if (q->info.si_signo < SIGRTMIN &&
602 		    (mask & sigmask(q->info.si_signo))) {
603 			list_del_init(&q->list);
604 			__sigqueue_free(q);
605 		}
606 	}
607 	return 1;
608 }
609 
610 /*
611  * Bad permissions for sending the signal
612  * - the caller must hold at least the RCU read lock
613  */
614 static int check_kill_permission(int sig, struct siginfo *info,
615 				 struct task_struct *t)
616 {
617 	const struct cred *cred = current_cred(), *tcred;
618 	struct pid *sid;
619 	int error;
620 
621 	if (!valid_signal(sig))
622 		return -EINVAL;
623 
624 	if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
625 		return 0;
626 
627 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
628 	if (error)
629 		return error;
630 
631 	tcred = __task_cred(t);
632 	if ((cred->euid ^ tcred->suid) &&
633 	    (cred->euid ^ tcred->uid) &&
634 	    (cred->uid  ^ tcred->suid) &&
635 	    (cred->uid  ^ tcred->uid) &&
636 	    !capable(CAP_KILL)) {
637 		switch (sig) {
638 		case SIGCONT:
639 			sid = task_session(t);
640 			/*
641 			 * We don't return the error if sid == NULL. The
642 			 * task was unhashed, the caller must notice this.
643 			 */
644 			if (!sid || sid == task_session(current))
645 				break;
646 		default:
647 			return -EPERM;
648 		}
649 	}
650 
651 	return security_task_kill(t, info, sig, 0);
652 }
653 
654 /*
655  * Handle magic process-wide effects of stop/continue signals. Unlike
656  * the signal actions, these happen immediately at signal-generation
657  * time regardless of blocking, ignoring, or handling.  This does the
658  * actual continuing for SIGCONT, but not the actual stopping for stop
659  * signals. The process stop is done as a signal action for SIG_DFL.
660  *
661  * Returns true if the signal should be actually delivered, otherwise
662  * it should be dropped.
663  */
664 static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
665 {
666 	struct signal_struct *signal = p->signal;
667 	struct task_struct *t;
668 
669 	if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
670 		/*
671 		 * The process is in the middle of dying, nothing to do.
672 		 */
673 	} else if (sig_kernel_stop(sig)) {
674 		/*
675 		 * This is a stop signal.  Remove SIGCONT from all queues.
676 		 */
677 		rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
678 		t = p;
679 		do {
680 			rm_from_queue(sigmask(SIGCONT), &t->pending);
681 		} while_each_thread(p, t);
682 	} else if (sig == SIGCONT) {
683 		unsigned int why;
684 		/*
685 		 * Remove all stop signals from all queues,
686 		 * and wake all threads.
687 		 */
688 		rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
689 		t = p;
690 		do {
691 			unsigned int state;
692 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
693 			/*
694 			 * If there is a handler for SIGCONT, we must make
695 			 * sure that no thread returns to user mode before
696 			 * we post the signal, in case it was the only
697 			 * thread eligible to run the signal handler--then
698 			 * it must not do anything between resuming and
699 			 * running the handler.  With the TIF_SIGPENDING
700 			 * flag set, the thread will pause and acquire the
701 			 * siglock that we hold now and until we've queued
702 			 * the pending signal.
703 			 *
704 			 * Wake up the stopped thread _after_ setting
705 			 * TIF_SIGPENDING
706 			 */
707 			state = __TASK_STOPPED;
708 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
709 				set_tsk_thread_flag(t, TIF_SIGPENDING);
710 				state |= TASK_INTERRUPTIBLE;
711 			}
712 			wake_up_state(t, state);
713 		} while_each_thread(p, t);
714 
715 		/*
716 		 * Notify the parent with CLD_CONTINUED if we were stopped.
717 		 *
718 		 * If we were in the middle of a group stop, we pretend it
719 		 * was already finished, and then continued. Since SIGCHLD
720 		 * doesn't queue we report only CLD_STOPPED, as if the next
721 		 * CLD_CONTINUED was dropped.
722 		 */
723 		why = 0;
724 		if (signal->flags & SIGNAL_STOP_STOPPED)
725 			why |= SIGNAL_CLD_CONTINUED;
726 		else if (signal->group_stop_count)
727 			why |= SIGNAL_CLD_STOPPED;
728 
729 		if (why) {
730 			/*
731 			 * The first thread which returns from do_signal_stop()
732 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
733 			 * notify its parent. See get_signal_to_deliver().
734 			 */
735 			signal->flags = why | SIGNAL_STOP_CONTINUED;
736 			signal->group_stop_count = 0;
737 			signal->group_exit_code = 0;
738 		} else {
739 			/*
740 			 * We are not stopped, but there could be a stop
741 			 * signal in the middle of being processed after
742 			 * being removed from the queue.  Clear that too.
743 			 */
744 			signal->flags &= ~SIGNAL_STOP_DEQUEUED;
745 		}
746 	}
747 
748 	return !sig_ignored(p, sig, from_ancestor_ns);
749 }
750 
751 /*
752  * Test if P wants to take SIG.  After we've checked all threads with this,
753  * it's equivalent to finding no threads not blocking SIG.  Any threads not
754  * blocking SIG were ruled out because they are not running and already
755  * have pending signals.  Such threads will dequeue from the shared queue
756  * as soon as they're available, so putting the signal on the shared queue
757  * will be equivalent to sending it to one such thread.
758  */
759 static inline int wants_signal(int sig, struct task_struct *p)
760 {
761 	if (sigismember(&p->blocked, sig))
762 		return 0;
763 	if (p->flags & PF_EXITING)
764 		return 0;
765 	if (sig == SIGKILL)
766 		return 1;
767 	if (task_is_stopped_or_traced(p))
768 		return 0;
769 	return task_curr(p) || !signal_pending(p);
770 }
771 
772 static void complete_signal(int sig, struct task_struct *p, int group)
773 {
774 	struct signal_struct *signal = p->signal;
775 	struct task_struct *t;
776 
777 	/*
778 	 * Now find a thread we can wake up to take the signal off the queue.
779 	 *
780 	 * If the main thread wants the signal, it gets first crack.
781 	 * Probably the least surprising to the average bear.
782 	 */
783 	if (wants_signal(sig, p))
784 		t = p;
785 	else if (!group || thread_group_empty(p))
786 		/*
787 		 * There is just one thread and it does not need to be woken.
788 		 * It will dequeue unblocked signals before it runs again.
789 		 */
790 		return;
791 	else {
792 		/*
793 		 * Otherwise try to find a suitable thread.
794 		 */
795 		t = signal->curr_target;
796 		while (!wants_signal(sig, t)) {
797 			t = next_thread(t);
798 			if (t == signal->curr_target)
799 				/*
800 				 * No thread needs to be woken.
801 				 * Any eligible threads will see
802 				 * the signal in the queue soon.
803 				 */
804 				return;
805 		}
806 		signal->curr_target = t;
807 	}
808 
809 	/*
810 	 * Found a killable thread.  If the signal will be fatal,
811 	 * then start taking the whole group down immediately.
812 	 */
813 	if (sig_fatal(p, sig) &&
814 	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
815 	    !sigismember(&t->real_blocked, sig) &&
816 	    (sig == SIGKILL ||
817 	     !tracehook_consider_fatal_signal(t, sig))) {
818 		/*
819 		 * This signal will be fatal to the whole group.
820 		 */
821 		if (!sig_kernel_coredump(sig)) {
822 			/*
823 			 * Start a group exit and wake everybody up.
824 			 * This way we don't have other threads
825 			 * running and doing things after a slower
826 			 * thread has the fatal signal pending.
827 			 */
828 			signal->flags = SIGNAL_GROUP_EXIT;
829 			signal->group_exit_code = sig;
830 			signal->group_stop_count = 0;
831 			t = p;
832 			do {
833 				sigaddset(&t->pending.signal, SIGKILL);
834 				signal_wake_up(t, 1);
835 			} while_each_thread(p, t);
836 			return;
837 		}
838 	}
839 
840 	/*
841 	 * The signal is already in the shared-pending queue.
842 	 * Tell the chosen thread to wake up and dequeue it.
843 	 */
844 	signal_wake_up(t, sig == SIGKILL);
845 	return;
846 }
847 
848 static inline int legacy_queue(struct sigpending *signals, int sig)
849 {
850 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
851 }
852 
853 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
854 			int group, int from_ancestor_ns)
855 {
856 	struct sigpending *pending;
857 	struct sigqueue *q;
858 	int override_rlimit;
859 
860 	trace_signal_generate(sig, info, t);
861 
862 	assert_spin_locked(&t->sighand->siglock);
863 
864 	if (!prepare_signal(sig, t, from_ancestor_ns))
865 		return 0;
866 
867 	pending = group ? &t->signal->shared_pending : &t->pending;
868 	/*
869 	 * Short-circuit ignored signals and support queuing
870 	 * exactly one non-rt signal, so that we can get more
871 	 * detailed information about the cause of the signal.
872 	 */
873 	if (legacy_queue(pending, sig))
874 		return 0;
875 	/*
876 	 * fast-pathed signals for kernel-internal things like SIGSTOP
877 	 * or SIGKILL.
878 	 */
879 	if (info == SEND_SIG_FORCED)
880 		goto out_set;
881 
882 	/* Real-time signals must be queued if sent by sigqueue, or
883 	   some other real-time mechanism.  It is implementation
884 	   defined whether kill() does so.  We attempt to do so, on
885 	   the principle of least surprise, but since kill is not
886 	   allowed to fail with EAGAIN when low on memory we just
887 	   make sure at least one signal gets delivered and don't
888 	   pass on the info struct.  */
889 
890 	if (sig < SIGRTMIN)
891 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
892 	else
893 		override_rlimit = 0;
894 
895 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
896 		override_rlimit);
897 	if (q) {
898 		list_add_tail(&q->list, &pending->list);
899 		switch ((unsigned long) info) {
900 		case (unsigned long) SEND_SIG_NOINFO:
901 			q->info.si_signo = sig;
902 			q->info.si_errno = 0;
903 			q->info.si_code = SI_USER;
904 			q->info.si_pid = task_tgid_nr_ns(current,
905 							task_active_pid_ns(t));
906 			q->info.si_uid = current_uid();
907 			break;
908 		case (unsigned long) SEND_SIG_PRIV:
909 			q->info.si_signo = sig;
910 			q->info.si_errno = 0;
911 			q->info.si_code = SI_KERNEL;
912 			q->info.si_pid = 0;
913 			q->info.si_uid = 0;
914 			break;
915 		default:
916 			copy_siginfo(&q->info, info);
917 			if (from_ancestor_ns)
918 				q->info.si_pid = 0;
919 			break;
920 		}
921 	} else if (!is_si_special(info)) {
922 		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
923 			/*
924 			 * Queue overflow, abort.  We may abort if the
925 			 * signal was rt and sent by user using something
926 			 * other than kill().
927 			 */
928 			trace_signal_overflow_fail(sig, group, info);
929 			return -EAGAIN;
930 		} else {
931 			/*
932 			 * This is a silent loss of information.  We still
933 			 * send the signal, but the *info bits are lost.
934 			 */
935 			trace_signal_lose_info(sig, group, info);
936 		}
937 	}
938 
939 out_set:
940 	signalfd_notify(t, sig);
941 	sigaddset(&pending->signal, sig);
942 	complete_signal(sig, t, group);
943 	return 0;
944 }
945 
946 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
947 			int group)
948 {
949 	int from_ancestor_ns = 0;
950 
951 #ifdef CONFIG_PID_NS
952 	if (!is_si_special(info) && SI_FROMUSER(info) &&
953 			task_pid_nr_ns(current, task_active_pid_ns(t)) <= 0)
954 		from_ancestor_ns = 1;
955 #endif
956 
957 	return __send_signal(sig, info, t, group, from_ancestor_ns);
958 }
959 
960 static void print_fatal_signal(struct pt_regs *regs, int signr)
961 {
962 	printk("%s/%d: potentially unexpected fatal signal %d.\n",
963 		current->comm, task_pid_nr(current), signr);
964 
965 #if defined(__i386__) && !defined(__arch_um__)
966 	printk("code at %08lx: ", regs->ip);
967 	{
968 		int i;
969 		for (i = 0; i < 16; i++) {
970 			unsigned char insn;
971 
972 			__get_user(insn, (unsigned char *)(regs->ip + i));
973 			printk("%02x ", insn);
974 		}
975 	}
976 #endif
977 	printk("\n");
978 	preempt_disable();
979 	show_regs(regs);
980 	preempt_enable();
981 }
982 
983 static int __init setup_print_fatal_signals(char *str)
984 {
985 	get_option (&str, &print_fatal_signals);
986 
987 	return 1;
988 }
989 
990 __setup("print-fatal-signals=", setup_print_fatal_signals);
991 
992 int
993 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
994 {
995 	return send_signal(sig, info, p, 1);
996 }
997 
998 static int
999 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1000 {
1001 	return send_signal(sig, info, t, 0);
1002 }
1003 
1004 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1005 			bool group)
1006 {
1007 	unsigned long flags;
1008 	int ret = -ESRCH;
1009 
1010 	if (lock_task_sighand(p, &flags)) {
1011 		ret = send_signal(sig, info, p, group);
1012 		unlock_task_sighand(p, &flags);
1013 	}
1014 
1015 	return ret;
1016 }
1017 
1018 /*
1019  * Force a signal that the process can't ignore: if necessary
1020  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1021  *
1022  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1023  * since we do not want to have a signal handler that was blocked
1024  * be invoked when user space had explicitly blocked it.
1025  *
1026  * We don't want to have recursive SIGSEGV's etc, for example,
1027  * that is why we also clear SIGNAL_UNKILLABLE.
1028  */
1029 int
1030 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1031 {
1032 	unsigned long int flags;
1033 	int ret, blocked, ignored;
1034 	struct k_sigaction *action;
1035 
1036 	spin_lock_irqsave(&t->sighand->siglock, flags);
1037 	action = &t->sighand->action[sig-1];
1038 	ignored = action->sa.sa_handler == SIG_IGN;
1039 	blocked = sigismember(&t->blocked, sig);
1040 	if (blocked || ignored) {
1041 		action->sa.sa_handler = SIG_DFL;
1042 		if (blocked) {
1043 			sigdelset(&t->blocked, sig);
1044 			recalc_sigpending_and_wake(t);
1045 		}
1046 	}
1047 	if (action->sa.sa_handler == SIG_DFL)
1048 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1049 	ret = specific_send_sig_info(sig, info, t);
1050 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1051 
1052 	return ret;
1053 }
1054 
1055 void
1056 force_sig_specific(int sig, struct task_struct *t)
1057 {
1058 	force_sig_info(sig, SEND_SIG_FORCED, t);
1059 }
1060 
1061 /*
1062  * Nuke all other threads in the group.
1063  */
1064 void zap_other_threads(struct task_struct *p)
1065 {
1066 	struct task_struct *t;
1067 
1068 	p->signal->group_stop_count = 0;
1069 
1070 	for (t = next_thread(p); t != p; t = next_thread(t)) {
1071 		/*
1072 		 * Don't bother with already dead threads
1073 		 */
1074 		if (t->exit_state)
1075 			continue;
1076 
1077 		/* SIGKILL will be handled before any pending SIGSTOP */
1078 		sigaddset(&t->pending.signal, SIGKILL);
1079 		signal_wake_up(t, 1);
1080 	}
1081 }
1082 
1083 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1084 {
1085 	struct sighand_struct *sighand;
1086 
1087 	rcu_read_lock();
1088 	for (;;) {
1089 		sighand = rcu_dereference(tsk->sighand);
1090 		if (unlikely(sighand == NULL))
1091 			break;
1092 
1093 		spin_lock_irqsave(&sighand->siglock, *flags);
1094 		if (likely(sighand == tsk->sighand))
1095 			break;
1096 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1097 	}
1098 	rcu_read_unlock();
1099 
1100 	return sighand;
1101 }
1102 
1103 /*
1104  * send signal info to all the members of a group
1105  * - the caller must hold the RCU read lock at least
1106  */
1107 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1108 {
1109 	int ret = check_kill_permission(sig, info, p);
1110 
1111 	if (!ret && sig)
1112 		ret = do_send_sig_info(sig, info, p, true);
1113 
1114 	return ret;
1115 }
1116 
1117 /*
1118  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1119  * control characters do (^C, ^Z etc)
1120  * - the caller must hold at least a readlock on tasklist_lock
1121  */
1122 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1123 {
1124 	struct task_struct *p = NULL;
1125 	int retval, success;
1126 
1127 	success = 0;
1128 	retval = -ESRCH;
1129 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1130 		int err = group_send_sig_info(sig, info, p);
1131 		success |= !err;
1132 		retval = err;
1133 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1134 	return success ? 0 : retval;
1135 }
1136 
1137 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1138 {
1139 	int error = -ESRCH;
1140 	struct task_struct *p;
1141 
1142 	rcu_read_lock();
1143 retry:
1144 	p = pid_task(pid, PIDTYPE_PID);
1145 	if (p) {
1146 		error = group_send_sig_info(sig, info, p);
1147 		if (unlikely(error == -ESRCH))
1148 			/*
1149 			 * The task was unhashed in between, try again.
1150 			 * If it is dead, pid_task() will return NULL,
1151 			 * if we race with de_thread() it will find the
1152 			 * new leader.
1153 			 */
1154 			goto retry;
1155 	}
1156 	rcu_read_unlock();
1157 
1158 	return error;
1159 }
1160 
1161 int
1162 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1163 {
1164 	int error;
1165 	rcu_read_lock();
1166 	error = kill_pid_info(sig, info, find_vpid(pid));
1167 	rcu_read_unlock();
1168 	return error;
1169 }
1170 
1171 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1172 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1173 		      uid_t uid, uid_t euid, u32 secid)
1174 {
1175 	int ret = -EINVAL;
1176 	struct task_struct *p;
1177 	const struct cred *pcred;
1178 
1179 	if (!valid_signal(sig))
1180 		return ret;
1181 
1182 	read_lock(&tasklist_lock);
1183 	p = pid_task(pid, PIDTYPE_PID);
1184 	if (!p) {
1185 		ret = -ESRCH;
1186 		goto out_unlock;
1187 	}
1188 	pcred = __task_cred(p);
1189 	if ((info == SEND_SIG_NOINFO ||
1190 	     (!is_si_special(info) && SI_FROMUSER(info))) &&
1191 	    euid != pcred->suid && euid != pcred->uid &&
1192 	    uid  != pcred->suid && uid  != pcred->uid) {
1193 		ret = -EPERM;
1194 		goto out_unlock;
1195 	}
1196 	ret = security_task_kill(p, info, sig, secid);
1197 	if (ret)
1198 		goto out_unlock;
1199 	if (sig && p->sighand) {
1200 		unsigned long flags;
1201 		spin_lock_irqsave(&p->sighand->siglock, flags);
1202 		ret = __send_signal(sig, info, p, 1, 0);
1203 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1204 	}
1205 out_unlock:
1206 	read_unlock(&tasklist_lock);
1207 	return ret;
1208 }
1209 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1210 
1211 /*
1212  * kill_something_info() interprets pid in interesting ways just like kill(2).
1213  *
1214  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1215  * is probably wrong.  Should make it like BSD or SYSV.
1216  */
1217 
1218 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1219 {
1220 	int ret;
1221 
1222 	if (pid > 0) {
1223 		rcu_read_lock();
1224 		ret = kill_pid_info(sig, info, find_vpid(pid));
1225 		rcu_read_unlock();
1226 		return ret;
1227 	}
1228 
1229 	read_lock(&tasklist_lock);
1230 	if (pid != -1) {
1231 		ret = __kill_pgrp_info(sig, info,
1232 				pid ? find_vpid(-pid) : task_pgrp(current));
1233 	} else {
1234 		int retval = 0, count = 0;
1235 		struct task_struct * p;
1236 
1237 		for_each_process(p) {
1238 			if (task_pid_vnr(p) > 1 &&
1239 					!same_thread_group(p, current)) {
1240 				int err = group_send_sig_info(sig, info, p);
1241 				++count;
1242 				if (err != -EPERM)
1243 					retval = err;
1244 			}
1245 		}
1246 		ret = count ? retval : -ESRCH;
1247 	}
1248 	read_unlock(&tasklist_lock);
1249 
1250 	return ret;
1251 }
1252 
1253 /*
1254  * These are for backward compatibility with the rest of the kernel source.
1255  */
1256 
1257 int
1258 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1259 {
1260 	/*
1261 	 * Make sure legacy kernel users don't send in bad values
1262 	 * (normal paths check this in check_kill_permission).
1263 	 */
1264 	if (!valid_signal(sig))
1265 		return -EINVAL;
1266 
1267 	return do_send_sig_info(sig, info, p, false);
1268 }
1269 
1270 #define __si_special(priv) \
1271 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1272 
1273 int
1274 send_sig(int sig, struct task_struct *p, int priv)
1275 {
1276 	return send_sig_info(sig, __si_special(priv), p);
1277 }
1278 
1279 void
1280 force_sig(int sig, struct task_struct *p)
1281 {
1282 	force_sig_info(sig, SEND_SIG_PRIV, p);
1283 }
1284 
1285 /*
1286  * When things go south during signal handling, we
1287  * will force a SIGSEGV. And if the signal that caused
1288  * the problem was already a SIGSEGV, we'll want to
1289  * make sure we don't even try to deliver the signal..
1290  */
1291 int
1292 force_sigsegv(int sig, struct task_struct *p)
1293 {
1294 	if (sig == SIGSEGV) {
1295 		unsigned long flags;
1296 		spin_lock_irqsave(&p->sighand->siglock, flags);
1297 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1298 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1299 	}
1300 	force_sig(SIGSEGV, p);
1301 	return 0;
1302 }
1303 
1304 int kill_pgrp(struct pid *pid, int sig, int priv)
1305 {
1306 	int ret;
1307 
1308 	read_lock(&tasklist_lock);
1309 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1310 	read_unlock(&tasklist_lock);
1311 
1312 	return ret;
1313 }
1314 EXPORT_SYMBOL(kill_pgrp);
1315 
1316 int kill_pid(struct pid *pid, int sig, int priv)
1317 {
1318 	return kill_pid_info(sig, __si_special(priv), pid);
1319 }
1320 EXPORT_SYMBOL(kill_pid);
1321 
1322 /*
1323  * These functions support sending signals using preallocated sigqueue
1324  * structures.  This is needed "because realtime applications cannot
1325  * afford to lose notifications of asynchronous events, like timer
1326  * expirations or I/O completions".  In the case of Posix Timers
1327  * we allocate the sigqueue structure from the timer_create.  If this
1328  * allocation fails we are able to report the failure to the application
1329  * with an EAGAIN error.
1330  */
1331 struct sigqueue *sigqueue_alloc(void)
1332 {
1333 	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1334 
1335 	if (q)
1336 		q->flags |= SIGQUEUE_PREALLOC;
1337 
1338 	return q;
1339 }
1340 
1341 void sigqueue_free(struct sigqueue *q)
1342 {
1343 	unsigned long flags;
1344 	spinlock_t *lock = &current->sighand->siglock;
1345 
1346 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1347 	/*
1348 	 * We must hold ->siglock while testing q->list
1349 	 * to serialize with collect_signal() or with
1350 	 * __exit_signal()->flush_sigqueue().
1351 	 */
1352 	spin_lock_irqsave(lock, flags);
1353 	q->flags &= ~SIGQUEUE_PREALLOC;
1354 	/*
1355 	 * If it is queued it will be freed when dequeued,
1356 	 * like the "regular" sigqueue.
1357 	 */
1358 	if (!list_empty(&q->list))
1359 		q = NULL;
1360 	spin_unlock_irqrestore(lock, flags);
1361 
1362 	if (q)
1363 		__sigqueue_free(q);
1364 }
1365 
1366 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1367 {
1368 	int sig = q->info.si_signo;
1369 	struct sigpending *pending;
1370 	unsigned long flags;
1371 	int ret;
1372 
1373 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1374 
1375 	ret = -1;
1376 	if (!likely(lock_task_sighand(t, &flags)))
1377 		goto ret;
1378 
1379 	ret = 1; /* the signal is ignored */
1380 	if (!prepare_signal(sig, t, 0))
1381 		goto out;
1382 
1383 	ret = 0;
1384 	if (unlikely(!list_empty(&q->list))) {
1385 		/*
1386 		 * If an SI_TIMER entry is already queue just increment
1387 		 * the overrun count.
1388 		 */
1389 		BUG_ON(q->info.si_code != SI_TIMER);
1390 		q->info.si_overrun++;
1391 		goto out;
1392 	}
1393 	q->info.si_overrun = 0;
1394 
1395 	signalfd_notify(t, sig);
1396 	pending = group ? &t->signal->shared_pending : &t->pending;
1397 	list_add_tail(&q->list, &pending->list);
1398 	sigaddset(&pending->signal, sig);
1399 	complete_signal(sig, t, group);
1400 out:
1401 	unlock_task_sighand(t, &flags);
1402 ret:
1403 	return ret;
1404 }
1405 
1406 /*
1407  * Let a parent know about the death of a child.
1408  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1409  *
1410  * Returns -1 if our parent ignored us and so we've switched to
1411  * self-reaping, or else @sig.
1412  */
1413 int do_notify_parent(struct task_struct *tsk, int sig)
1414 {
1415 	struct siginfo info;
1416 	unsigned long flags;
1417 	struct sighand_struct *psig;
1418 	int ret = sig;
1419 
1420 	BUG_ON(sig == -1);
1421 
1422  	/* do_notify_parent_cldstop should have been called instead.  */
1423  	BUG_ON(task_is_stopped_or_traced(tsk));
1424 
1425 	BUG_ON(!task_ptrace(tsk) &&
1426 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1427 
1428 	info.si_signo = sig;
1429 	info.si_errno = 0;
1430 	/*
1431 	 * we are under tasklist_lock here so our parent is tied to
1432 	 * us and cannot exit and release its namespace.
1433 	 *
1434 	 * the only it can is to switch its nsproxy with sys_unshare,
1435 	 * bu uncharing pid namespaces is not allowed, so we'll always
1436 	 * see relevant namespace
1437 	 *
1438 	 * write_lock() currently calls preempt_disable() which is the
1439 	 * same as rcu_read_lock(), but according to Oleg, this is not
1440 	 * correct to rely on this
1441 	 */
1442 	rcu_read_lock();
1443 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1444 	info.si_uid = __task_cred(tsk)->uid;
1445 	rcu_read_unlock();
1446 
1447 	info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1448 				tsk->signal->utime));
1449 	info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1450 				tsk->signal->stime));
1451 
1452 	info.si_status = tsk->exit_code & 0x7f;
1453 	if (tsk->exit_code & 0x80)
1454 		info.si_code = CLD_DUMPED;
1455 	else if (tsk->exit_code & 0x7f)
1456 		info.si_code = CLD_KILLED;
1457 	else {
1458 		info.si_code = CLD_EXITED;
1459 		info.si_status = tsk->exit_code >> 8;
1460 	}
1461 
1462 	psig = tsk->parent->sighand;
1463 	spin_lock_irqsave(&psig->siglock, flags);
1464 	if (!task_ptrace(tsk) && sig == SIGCHLD &&
1465 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1466 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1467 		/*
1468 		 * We are exiting and our parent doesn't care.  POSIX.1
1469 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1470 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1471 		 * automatically and not left for our parent's wait4 call.
1472 		 * Rather than having the parent do it as a magic kind of
1473 		 * signal handler, we just set this to tell do_exit that we
1474 		 * can be cleaned up without becoming a zombie.  Note that
1475 		 * we still call __wake_up_parent in this case, because a
1476 		 * blocked sys_wait4 might now return -ECHILD.
1477 		 *
1478 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1479 		 * is implementation-defined: we do (if you don't want
1480 		 * it, just use SIG_IGN instead).
1481 		 */
1482 		ret = tsk->exit_signal = -1;
1483 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1484 			sig = -1;
1485 	}
1486 	if (valid_signal(sig) && sig > 0)
1487 		__group_send_sig_info(sig, &info, tsk->parent);
1488 	__wake_up_parent(tsk, tsk->parent);
1489 	spin_unlock_irqrestore(&psig->siglock, flags);
1490 
1491 	return ret;
1492 }
1493 
1494 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1495 {
1496 	struct siginfo info;
1497 	unsigned long flags;
1498 	struct task_struct *parent;
1499 	struct sighand_struct *sighand;
1500 
1501 	if (task_ptrace(tsk))
1502 		parent = tsk->parent;
1503 	else {
1504 		tsk = tsk->group_leader;
1505 		parent = tsk->real_parent;
1506 	}
1507 
1508 	info.si_signo = SIGCHLD;
1509 	info.si_errno = 0;
1510 	/*
1511 	 * see comment in do_notify_parent() abot the following 3 lines
1512 	 */
1513 	rcu_read_lock();
1514 	info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1515 	info.si_uid = __task_cred(tsk)->uid;
1516 	rcu_read_unlock();
1517 
1518 	info.si_utime = cputime_to_clock_t(tsk->utime);
1519 	info.si_stime = cputime_to_clock_t(tsk->stime);
1520 
1521  	info.si_code = why;
1522  	switch (why) {
1523  	case CLD_CONTINUED:
1524  		info.si_status = SIGCONT;
1525  		break;
1526  	case CLD_STOPPED:
1527  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1528  		break;
1529  	case CLD_TRAPPED:
1530  		info.si_status = tsk->exit_code & 0x7f;
1531  		break;
1532  	default:
1533  		BUG();
1534  	}
1535 
1536 	sighand = parent->sighand;
1537 	spin_lock_irqsave(&sighand->siglock, flags);
1538 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1539 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1540 		__group_send_sig_info(SIGCHLD, &info, parent);
1541 	/*
1542 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1543 	 */
1544 	__wake_up_parent(tsk, parent);
1545 	spin_unlock_irqrestore(&sighand->siglock, flags);
1546 }
1547 
1548 static inline int may_ptrace_stop(void)
1549 {
1550 	if (!likely(task_ptrace(current)))
1551 		return 0;
1552 	/*
1553 	 * Are we in the middle of do_coredump?
1554 	 * If so and our tracer is also part of the coredump stopping
1555 	 * is a deadlock situation, and pointless because our tracer
1556 	 * is dead so don't allow us to stop.
1557 	 * If SIGKILL was already sent before the caller unlocked
1558 	 * ->siglock we must see ->core_state != NULL. Otherwise it
1559 	 * is safe to enter schedule().
1560 	 */
1561 	if (unlikely(current->mm->core_state) &&
1562 	    unlikely(current->mm == current->parent->mm))
1563 		return 0;
1564 
1565 	return 1;
1566 }
1567 
1568 /*
1569  * Return nonzero if there is a SIGKILL that should be waking us up.
1570  * Called with the siglock held.
1571  */
1572 static int sigkill_pending(struct task_struct *tsk)
1573 {
1574 	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1575 		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1576 }
1577 
1578 /*
1579  * This must be called with current->sighand->siglock held.
1580  *
1581  * This should be the path for all ptrace stops.
1582  * We always set current->last_siginfo while stopped here.
1583  * That makes it a way to test a stopped process for
1584  * being ptrace-stopped vs being job-control-stopped.
1585  *
1586  * If we actually decide not to stop at all because the tracer
1587  * is gone, we keep current->exit_code unless clear_code.
1588  */
1589 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1590 {
1591 	if (arch_ptrace_stop_needed(exit_code, info)) {
1592 		/*
1593 		 * The arch code has something special to do before a
1594 		 * ptrace stop.  This is allowed to block, e.g. for faults
1595 		 * on user stack pages.  We can't keep the siglock while
1596 		 * calling arch_ptrace_stop, so we must release it now.
1597 		 * To preserve proper semantics, we must do this before
1598 		 * any signal bookkeeping like checking group_stop_count.
1599 		 * Meanwhile, a SIGKILL could come in before we retake the
1600 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1601 		 * So after regaining the lock, we must check for SIGKILL.
1602 		 */
1603 		spin_unlock_irq(&current->sighand->siglock);
1604 		arch_ptrace_stop(exit_code, info);
1605 		spin_lock_irq(&current->sighand->siglock);
1606 		if (sigkill_pending(current))
1607 			return;
1608 	}
1609 
1610 	/*
1611 	 * If there is a group stop in progress,
1612 	 * we must participate in the bookkeeping.
1613 	 */
1614 	if (current->signal->group_stop_count > 0)
1615 		--current->signal->group_stop_count;
1616 
1617 	current->last_siginfo = info;
1618 	current->exit_code = exit_code;
1619 
1620 	/* Let the debugger run.  */
1621 	__set_current_state(TASK_TRACED);
1622 	spin_unlock_irq(&current->sighand->siglock);
1623 	read_lock(&tasklist_lock);
1624 	if (may_ptrace_stop()) {
1625 		do_notify_parent_cldstop(current, CLD_TRAPPED);
1626 		/*
1627 		 * Don't want to allow preemption here, because
1628 		 * sys_ptrace() needs this task to be inactive.
1629 		 *
1630 		 * XXX: implement read_unlock_no_resched().
1631 		 */
1632 		preempt_disable();
1633 		read_unlock(&tasklist_lock);
1634 		preempt_enable_no_resched();
1635 		schedule();
1636 	} else {
1637 		/*
1638 		 * By the time we got the lock, our tracer went away.
1639 		 * Don't drop the lock yet, another tracer may come.
1640 		 */
1641 		__set_current_state(TASK_RUNNING);
1642 		if (clear_code)
1643 			current->exit_code = 0;
1644 		read_unlock(&tasklist_lock);
1645 	}
1646 
1647 	/*
1648 	 * While in TASK_TRACED, we were considered "frozen enough".
1649 	 * Now that we woke up, it's crucial if we're supposed to be
1650 	 * frozen that we freeze now before running anything substantial.
1651 	 */
1652 	try_to_freeze();
1653 
1654 	/*
1655 	 * We are back.  Now reacquire the siglock before touching
1656 	 * last_siginfo, so that we are sure to have synchronized with
1657 	 * any signal-sending on another CPU that wants to examine it.
1658 	 */
1659 	spin_lock_irq(&current->sighand->siglock);
1660 	current->last_siginfo = NULL;
1661 
1662 	/*
1663 	 * Queued signals ignored us while we were stopped for tracing.
1664 	 * So check for any that we should take before resuming user mode.
1665 	 * This sets TIF_SIGPENDING, but never clears it.
1666 	 */
1667 	recalc_sigpending_tsk(current);
1668 }
1669 
1670 void ptrace_notify(int exit_code)
1671 {
1672 	siginfo_t info;
1673 
1674 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1675 
1676 	memset(&info, 0, sizeof info);
1677 	info.si_signo = SIGTRAP;
1678 	info.si_code = exit_code;
1679 	info.si_pid = task_pid_vnr(current);
1680 	info.si_uid = current_uid();
1681 
1682 	/* Let the debugger run.  */
1683 	spin_lock_irq(&current->sighand->siglock);
1684 	ptrace_stop(exit_code, 1, &info);
1685 	spin_unlock_irq(&current->sighand->siglock);
1686 }
1687 
1688 /*
1689  * This performs the stopping for SIGSTOP and other stop signals.
1690  * We have to stop all threads in the thread group.
1691  * Returns nonzero if we've actually stopped and released the siglock.
1692  * Returns zero if we didn't stop and still hold the siglock.
1693  */
1694 static int do_signal_stop(int signr)
1695 {
1696 	struct signal_struct *sig = current->signal;
1697 	int notify;
1698 
1699 	if (!sig->group_stop_count) {
1700 		struct task_struct *t;
1701 
1702 		if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1703 		    unlikely(signal_group_exit(sig)))
1704 			return 0;
1705 		/*
1706 		 * There is no group stop already in progress.
1707 		 * We must initiate one now.
1708 		 */
1709 		sig->group_exit_code = signr;
1710 
1711 		sig->group_stop_count = 1;
1712 		for (t = next_thread(current); t != current; t = next_thread(t))
1713 			/*
1714 			 * Setting state to TASK_STOPPED for a group
1715 			 * stop is always done with the siglock held,
1716 			 * so this check has no races.
1717 			 */
1718 			if (!(t->flags & PF_EXITING) &&
1719 			    !task_is_stopped_or_traced(t)) {
1720 				sig->group_stop_count++;
1721 				signal_wake_up(t, 0);
1722 			}
1723 	}
1724 	/*
1725 	 * If there are no other threads in the group, or if there is
1726 	 * a group stop in progress and we are the last to stop, report
1727 	 * to the parent.  When ptraced, every thread reports itself.
1728 	 */
1729 	notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
1730 	notify = tracehook_notify_jctl(notify, CLD_STOPPED);
1731 	/*
1732 	 * tracehook_notify_jctl() can drop and reacquire siglock, so
1733 	 * we keep ->group_stop_count != 0 before the call. If SIGCONT
1734 	 * or SIGKILL comes in between ->group_stop_count == 0.
1735 	 */
1736 	if (sig->group_stop_count) {
1737 		if (!--sig->group_stop_count)
1738 			sig->flags = SIGNAL_STOP_STOPPED;
1739 		current->exit_code = sig->group_exit_code;
1740 		__set_current_state(TASK_STOPPED);
1741 	}
1742 	spin_unlock_irq(&current->sighand->siglock);
1743 
1744 	if (notify) {
1745 		read_lock(&tasklist_lock);
1746 		do_notify_parent_cldstop(current, notify);
1747 		read_unlock(&tasklist_lock);
1748 	}
1749 
1750 	/* Now we don't run again until woken by SIGCONT or SIGKILL */
1751 	do {
1752 		schedule();
1753 	} while (try_to_freeze());
1754 
1755 	tracehook_finish_jctl();
1756 	current->exit_code = 0;
1757 
1758 	return 1;
1759 }
1760 
1761 static int ptrace_signal(int signr, siginfo_t *info,
1762 			 struct pt_regs *regs, void *cookie)
1763 {
1764 	if (!task_ptrace(current))
1765 		return signr;
1766 
1767 	ptrace_signal_deliver(regs, cookie);
1768 
1769 	/* Let the debugger run.  */
1770 	ptrace_stop(signr, 0, info);
1771 
1772 	/* We're back.  Did the debugger cancel the sig?  */
1773 	signr = current->exit_code;
1774 	if (signr == 0)
1775 		return signr;
1776 
1777 	current->exit_code = 0;
1778 
1779 	/* Update the siginfo structure if the signal has
1780 	   changed.  If the debugger wanted something
1781 	   specific in the siginfo structure then it should
1782 	   have updated *info via PTRACE_SETSIGINFO.  */
1783 	if (signr != info->si_signo) {
1784 		info->si_signo = signr;
1785 		info->si_errno = 0;
1786 		info->si_code = SI_USER;
1787 		info->si_pid = task_pid_vnr(current->parent);
1788 		info->si_uid = task_uid(current->parent);
1789 	}
1790 
1791 	/* If the (new) signal is now blocked, requeue it.  */
1792 	if (sigismember(&current->blocked, signr)) {
1793 		specific_send_sig_info(signr, info, current);
1794 		signr = 0;
1795 	}
1796 
1797 	return signr;
1798 }
1799 
1800 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1801 			  struct pt_regs *regs, void *cookie)
1802 {
1803 	struct sighand_struct *sighand = current->sighand;
1804 	struct signal_struct *signal = current->signal;
1805 	int signr;
1806 
1807 relock:
1808 	/*
1809 	 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1810 	 * While in TASK_STOPPED, we were considered "frozen enough".
1811 	 * Now that we woke up, it's crucial if we're supposed to be
1812 	 * frozen that we freeze now before running anything substantial.
1813 	 */
1814 	try_to_freeze();
1815 
1816 	spin_lock_irq(&sighand->siglock);
1817 	/*
1818 	 * Every stopped thread goes here after wakeup. Check to see if
1819 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
1820 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1821 	 */
1822 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1823 		int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1824 				? CLD_CONTINUED : CLD_STOPPED;
1825 		signal->flags &= ~SIGNAL_CLD_MASK;
1826 
1827 		why = tracehook_notify_jctl(why, CLD_CONTINUED);
1828 		spin_unlock_irq(&sighand->siglock);
1829 
1830 		if (why) {
1831 			read_lock(&tasklist_lock);
1832 			do_notify_parent_cldstop(current->group_leader, why);
1833 			read_unlock(&tasklist_lock);
1834 		}
1835 		goto relock;
1836 	}
1837 
1838 	for (;;) {
1839 		struct k_sigaction *ka;
1840 
1841 		if (unlikely(signal->group_stop_count > 0) &&
1842 		    do_signal_stop(0))
1843 			goto relock;
1844 
1845 		/*
1846 		 * Tracing can induce an artifical signal and choose sigaction.
1847 		 * The return value in @signr determines the default action,
1848 		 * but @info->si_signo is the signal number we will report.
1849 		 */
1850 		signr = tracehook_get_signal(current, regs, info, return_ka);
1851 		if (unlikely(signr < 0))
1852 			goto relock;
1853 		if (unlikely(signr != 0))
1854 			ka = return_ka;
1855 		else {
1856 			signr = dequeue_signal(current, &current->blocked,
1857 					       info);
1858 
1859 			if (!signr)
1860 				break; /* will return 0 */
1861 
1862 			if (signr != SIGKILL) {
1863 				signr = ptrace_signal(signr, info,
1864 						      regs, cookie);
1865 				if (!signr)
1866 					continue;
1867 			}
1868 
1869 			ka = &sighand->action[signr-1];
1870 		}
1871 
1872 		/* Trace actually delivered signals. */
1873 		trace_signal_deliver(signr, info, ka);
1874 
1875 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1876 			continue;
1877 		if (ka->sa.sa_handler != SIG_DFL) {
1878 			/* Run the handler.  */
1879 			*return_ka = *ka;
1880 
1881 			if (ka->sa.sa_flags & SA_ONESHOT)
1882 				ka->sa.sa_handler = SIG_DFL;
1883 
1884 			break; /* will return non-zero "signr" value */
1885 		}
1886 
1887 		/*
1888 		 * Now we are doing the default action for this signal.
1889 		 */
1890 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1891 			continue;
1892 
1893 		/*
1894 		 * Global init gets no signals it doesn't want.
1895 		 * Container-init gets no signals it doesn't want from same
1896 		 * container.
1897 		 *
1898 		 * Note that if global/container-init sees a sig_kernel_only()
1899 		 * signal here, the signal must have been generated internally
1900 		 * or must have come from an ancestor namespace. In either
1901 		 * case, the signal cannot be dropped.
1902 		 */
1903 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1904 				!sig_kernel_only(signr))
1905 			continue;
1906 
1907 		if (sig_kernel_stop(signr)) {
1908 			/*
1909 			 * The default action is to stop all threads in
1910 			 * the thread group.  The job control signals
1911 			 * do nothing in an orphaned pgrp, but SIGSTOP
1912 			 * always works.  Note that siglock needs to be
1913 			 * dropped during the call to is_orphaned_pgrp()
1914 			 * because of lock ordering with tasklist_lock.
1915 			 * This allows an intervening SIGCONT to be posted.
1916 			 * We need to check for that and bail out if necessary.
1917 			 */
1918 			if (signr != SIGSTOP) {
1919 				spin_unlock_irq(&sighand->siglock);
1920 
1921 				/* signals can be posted during this window */
1922 
1923 				if (is_current_pgrp_orphaned())
1924 					goto relock;
1925 
1926 				spin_lock_irq(&sighand->siglock);
1927 			}
1928 
1929 			if (likely(do_signal_stop(info->si_signo))) {
1930 				/* It released the siglock.  */
1931 				goto relock;
1932 			}
1933 
1934 			/*
1935 			 * We didn't actually stop, due to a race
1936 			 * with SIGCONT or something like that.
1937 			 */
1938 			continue;
1939 		}
1940 
1941 		spin_unlock_irq(&sighand->siglock);
1942 
1943 		/*
1944 		 * Anything else is fatal, maybe with a core dump.
1945 		 */
1946 		current->flags |= PF_SIGNALED;
1947 
1948 		if (sig_kernel_coredump(signr)) {
1949 			if (print_fatal_signals)
1950 				print_fatal_signal(regs, info->si_signo);
1951 			/*
1952 			 * If it was able to dump core, this kills all
1953 			 * other threads in the group and synchronizes with
1954 			 * their demise.  If we lost the race with another
1955 			 * thread getting here, it set group_exit_code
1956 			 * first and our do_group_exit call below will use
1957 			 * that value and ignore the one we pass it.
1958 			 */
1959 			do_coredump(info->si_signo, info->si_signo, regs);
1960 		}
1961 
1962 		/*
1963 		 * Death signals, no core dump.
1964 		 */
1965 		do_group_exit(info->si_signo);
1966 		/* NOTREACHED */
1967 	}
1968 	spin_unlock_irq(&sighand->siglock);
1969 	return signr;
1970 }
1971 
1972 void exit_signals(struct task_struct *tsk)
1973 {
1974 	int group_stop = 0;
1975 	struct task_struct *t;
1976 
1977 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1978 		tsk->flags |= PF_EXITING;
1979 		return;
1980 	}
1981 
1982 	spin_lock_irq(&tsk->sighand->siglock);
1983 	/*
1984 	 * From now this task is not visible for group-wide signals,
1985 	 * see wants_signal(), do_signal_stop().
1986 	 */
1987 	tsk->flags |= PF_EXITING;
1988 	if (!signal_pending(tsk))
1989 		goto out;
1990 
1991 	/* It could be that __group_complete_signal() choose us to
1992 	 * notify about group-wide signal. Another thread should be
1993 	 * woken now to take the signal since we will not.
1994 	 */
1995 	for (t = tsk; (t = next_thread(t)) != tsk; )
1996 		if (!signal_pending(t) && !(t->flags & PF_EXITING))
1997 			recalc_sigpending_and_wake(t);
1998 
1999 	if (unlikely(tsk->signal->group_stop_count) &&
2000 			!--tsk->signal->group_stop_count) {
2001 		tsk->signal->flags = SIGNAL_STOP_STOPPED;
2002 		group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
2003 	}
2004 out:
2005 	spin_unlock_irq(&tsk->sighand->siglock);
2006 
2007 	if (unlikely(group_stop)) {
2008 		read_lock(&tasklist_lock);
2009 		do_notify_parent_cldstop(tsk, group_stop);
2010 		read_unlock(&tasklist_lock);
2011 	}
2012 }
2013 
2014 EXPORT_SYMBOL(recalc_sigpending);
2015 EXPORT_SYMBOL_GPL(dequeue_signal);
2016 EXPORT_SYMBOL(flush_signals);
2017 EXPORT_SYMBOL(force_sig);
2018 EXPORT_SYMBOL(send_sig);
2019 EXPORT_SYMBOL(send_sig_info);
2020 EXPORT_SYMBOL(sigprocmask);
2021 EXPORT_SYMBOL(block_all_signals);
2022 EXPORT_SYMBOL(unblock_all_signals);
2023 
2024 
2025 /*
2026  * System call entry points.
2027  */
2028 
2029 SYSCALL_DEFINE0(restart_syscall)
2030 {
2031 	struct restart_block *restart = &current_thread_info()->restart_block;
2032 	return restart->fn(restart);
2033 }
2034 
2035 long do_no_restart_syscall(struct restart_block *param)
2036 {
2037 	return -EINTR;
2038 }
2039 
2040 /*
2041  * We don't need to get the kernel lock - this is all local to this
2042  * particular thread.. (and that's good, because this is _heavily_
2043  * used by various programs)
2044  */
2045 
2046 /*
2047  * This is also useful for kernel threads that want to temporarily
2048  * (or permanently) block certain signals.
2049  *
2050  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2051  * interface happily blocks "unblockable" signals like SIGKILL
2052  * and friends.
2053  */
2054 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2055 {
2056 	int error;
2057 
2058 	spin_lock_irq(&current->sighand->siglock);
2059 	if (oldset)
2060 		*oldset = current->blocked;
2061 
2062 	error = 0;
2063 	switch (how) {
2064 	case SIG_BLOCK:
2065 		sigorsets(&current->blocked, &current->blocked, set);
2066 		break;
2067 	case SIG_UNBLOCK:
2068 		signandsets(&current->blocked, &current->blocked, set);
2069 		break;
2070 	case SIG_SETMASK:
2071 		current->blocked = *set;
2072 		break;
2073 	default:
2074 		error = -EINVAL;
2075 	}
2076 	recalc_sigpending();
2077 	spin_unlock_irq(&current->sighand->siglock);
2078 
2079 	return error;
2080 }
2081 
2082 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2083 		sigset_t __user *, oset, size_t, sigsetsize)
2084 {
2085 	int error = -EINVAL;
2086 	sigset_t old_set, new_set;
2087 
2088 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2089 	if (sigsetsize != sizeof(sigset_t))
2090 		goto out;
2091 
2092 	if (set) {
2093 		error = -EFAULT;
2094 		if (copy_from_user(&new_set, set, sizeof(*set)))
2095 			goto out;
2096 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2097 
2098 		error = sigprocmask(how, &new_set, &old_set);
2099 		if (error)
2100 			goto out;
2101 		if (oset)
2102 			goto set_old;
2103 	} else if (oset) {
2104 		spin_lock_irq(&current->sighand->siglock);
2105 		old_set = current->blocked;
2106 		spin_unlock_irq(&current->sighand->siglock);
2107 
2108 	set_old:
2109 		error = -EFAULT;
2110 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2111 			goto out;
2112 	}
2113 	error = 0;
2114 out:
2115 	return error;
2116 }
2117 
2118 long do_sigpending(void __user *set, unsigned long sigsetsize)
2119 {
2120 	long error = -EINVAL;
2121 	sigset_t pending;
2122 
2123 	if (sigsetsize > sizeof(sigset_t))
2124 		goto out;
2125 
2126 	spin_lock_irq(&current->sighand->siglock);
2127 	sigorsets(&pending, &current->pending.signal,
2128 		  &current->signal->shared_pending.signal);
2129 	spin_unlock_irq(&current->sighand->siglock);
2130 
2131 	/* Outside the lock because only this thread touches it.  */
2132 	sigandsets(&pending, &current->blocked, &pending);
2133 
2134 	error = -EFAULT;
2135 	if (!copy_to_user(set, &pending, sigsetsize))
2136 		error = 0;
2137 
2138 out:
2139 	return error;
2140 }
2141 
2142 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2143 {
2144 	return do_sigpending(set, sigsetsize);
2145 }
2146 
2147 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2148 
2149 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2150 {
2151 	int err;
2152 
2153 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2154 		return -EFAULT;
2155 	if (from->si_code < 0)
2156 		return __copy_to_user(to, from, sizeof(siginfo_t))
2157 			? -EFAULT : 0;
2158 	/*
2159 	 * If you change siginfo_t structure, please be sure
2160 	 * this code is fixed accordingly.
2161 	 * Please remember to update the signalfd_copyinfo() function
2162 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2163 	 * It should never copy any pad contained in the structure
2164 	 * to avoid security leaks, but must copy the generic
2165 	 * 3 ints plus the relevant union member.
2166 	 */
2167 	err = __put_user(from->si_signo, &to->si_signo);
2168 	err |= __put_user(from->si_errno, &to->si_errno);
2169 	err |= __put_user((short)from->si_code, &to->si_code);
2170 	switch (from->si_code & __SI_MASK) {
2171 	case __SI_KILL:
2172 		err |= __put_user(from->si_pid, &to->si_pid);
2173 		err |= __put_user(from->si_uid, &to->si_uid);
2174 		break;
2175 	case __SI_TIMER:
2176 		 err |= __put_user(from->si_tid, &to->si_tid);
2177 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2178 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2179 		break;
2180 	case __SI_POLL:
2181 		err |= __put_user(from->si_band, &to->si_band);
2182 		err |= __put_user(from->si_fd, &to->si_fd);
2183 		break;
2184 	case __SI_FAULT:
2185 		err |= __put_user(from->si_addr, &to->si_addr);
2186 #ifdef __ARCH_SI_TRAPNO
2187 		err |= __put_user(from->si_trapno, &to->si_trapno);
2188 #endif
2189 		break;
2190 	case __SI_CHLD:
2191 		err |= __put_user(from->si_pid, &to->si_pid);
2192 		err |= __put_user(from->si_uid, &to->si_uid);
2193 		err |= __put_user(from->si_status, &to->si_status);
2194 		err |= __put_user(from->si_utime, &to->si_utime);
2195 		err |= __put_user(from->si_stime, &to->si_stime);
2196 		break;
2197 	case __SI_RT: /* This is not generated by the kernel as of now. */
2198 	case __SI_MESGQ: /* But this is */
2199 		err |= __put_user(from->si_pid, &to->si_pid);
2200 		err |= __put_user(from->si_uid, &to->si_uid);
2201 		err |= __put_user(from->si_ptr, &to->si_ptr);
2202 		break;
2203 	default: /* this is just in case for now ... */
2204 		err |= __put_user(from->si_pid, &to->si_pid);
2205 		err |= __put_user(from->si_uid, &to->si_uid);
2206 		break;
2207 	}
2208 	return err;
2209 }
2210 
2211 #endif
2212 
2213 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2214 		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2215 		size_t, sigsetsize)
2216 {
2217 	int ret, sig;
2218 	sigset_t these;
2219 	struct timespec ts;
2220 	siginfo_t info;
2221 	long timeout = 0;
2222 
2223 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2224 	if (sigsetsize != sizeof(sigset_t))
2225 		return -EINVAL;
2226 
2227 	if (copy_from_user(&these, uthese, sizeof(these)))
2228 		return -EFAULT;
2229 
2230 	/*
2231 	 * Invert the set of allowed signals to get those we
2232 	 * want to block.
2233 	 */
2234 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2235 	signotset(&these);
2236 
2237 	if (uts) {
2238 		if (copy_from_user(&ts, uts, sizeof(ts)))
2239 			return -EFAULT;
2240 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2241 		    || ts.tv_sec < 0)
2242 			return -EINVAL;
2243 	}
2244 
2245 	spin_lock_irq(&current->sighand->siglock);
2246 	sig = dequeue_signal(current, &these, &info);
2247 	if (!sig) {
2248 		timeout = MAX_SCHEDULE_TIMEOUT;
2249 		if (uts)
2250 			timeout = (timespec_to_jiffies(&ts)
2251 				   + (ts.tv_sec || ts.tv_nsec));
2252 
2253 		if (timeout) {
2254 			/* None ready -- temporarily unblock those we're
2255 			 * interested while we are sleeping in so that we'll
2256 			 * be awakened when they arrive.  */
2257 			current->real_blocked = current->blocked;
2258 			sigandsets(&current->blocked, &current->blocked, &these);
2259 			recalc_sigpending();
2260 			spin_unlock_irq(&current->sighand->siglock);
2261 
2262 			timeout = schedule_timeout_interruptible(timeout);
2263 
2264 			spin_lock_irq(&current->sighand->siglock);
2265 			sig = dequeue_signal(current, &these, &info);
2266 			current->blocked = current->real_blocked;
2267 			siginitset(&current->real_blocked, 0);
2268 			recalc_sigpending();
2269 		}
2270 	}
2271 	spin_unlock_irq(&current->sighand->siglock);
2272 
2273 	if (sig) {
2274 		ret = sig;
2275 		if (uinfo) {
2276 			if (copy_siginfo_to_user(uinfo, &info))
2277 				ret = -EFAULT;
2278 		}
2279 	} else {
2280 		ret = -EAGAIN;
2281 		if (timeout)
2282 			ret = -EINTR;
2283 	}
2284 
2285 	return ret;
2286 }
2287 
2288 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2289 {
2290 	struct siginfo info;
2291 
2292 	info.si_signo = sig;
2293 	info.si_errno = 0;
2294 	info.si_code = SI_USER;
2295 	info.si_pid = task_tgid_vnr(current);
2296 	info.si_uid = current_uid();
2297 
2298 	return kill_something_info(sig, &info, pid);
2299 }
2300 
2301 static int
2302 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2303 {
2304 	struct task_struct *p;
2305 	int error = -ESRCH;
2306 
2307 	rcu_read_lock();
2308 	p = find_task_by_vpid(pid);
2309 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2310 		error = check_kill_permission(sig, info, p);
2311 		/*
2312 		 * The null signal is a permissions and process existence
2313 		 * probe.  No signal is actually delivered.
2314 		 */
2315 		if (!error && sig) {
2316 			error = do_send_sig_info(sig, info, p, false);
2317 			/*
2318 			 * If lock_task_sighand() failed we pretend the task
2319 			 * dies after receiving the signal. The window is tiny,
2320 			 * and the signal is private anyway.
2321 			 */
2322 			if (unlikely(error == -ESRCH))
2323 				error = 0;
2324 		}
2325 	}
2326 	rcu_read_unlock();
2327 
2328 	return error;
2329 }
2330 
2331 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2332 {
2333 	struct siginfo info;
2334 
2335 	info.si_signo = sig;
2336 	info.si_errno = 0;
2337 	info.si_code = SI_TKILL;
2338 	info.si_pid = task_tgid_vnr(current);
2339 	info.si_uid = current_uid();
2340 
2341 	return do_send_specific(tgid, pid, sig, &info);
2342 }
2343 
2344 /**
2345  *  sys_tgkill - send signal to one specific thread
2346  *  @tgid: the thread group ID of the thread
2347  *  @pid: the PID of the thread
2348  *  @sig: signal to be sent
2349  *
2350  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2351  *  exists but it's not belonging to the target process anymore. This
2352  *  method solves the problem of threads exiting and PIDs getting reused.
2353  */
2354 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2355 {
2356 	/* This is only valid for single tasks */
2357 	if (pid <= 0 || tgid <= 0)
2358 		return -EINVAL;
2359 
2360 	return do_tkill(tgid, pid, sig);
2361 }
2362 
2363 /*
2364  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2365  */
2366 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2367 {
2368 	/* This is only valid for single tasks */
2369 	if (pid <= 0)
2370 		return -EINVAL;
2371 
2372 	return do_tkill(0, pid, sig);
2373 }
2374 
2375 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2376 		siginfo_t __user *, uinfo)
2377 {
2378 	siginfo_t info;
2379 
2380 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2381 		return -EFAULT;
2382 
2383 	/* Not even root can pretend to send signals from the kernel.
2384 	   Nor can they impersonate a kill(), which adds source info.  */
2385 	if (info.si_code >= 0)
2386 		return -EPERM;
2387 	info.si_signo = sig;
2388 
2389 	/* POSIX.1b doesn't mention process groups.  */
2390 	return kill_proc_info(sig, &info, pid);
2391 }
2392 
2393 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2394 {
2395 	/* This is only valid for single tasks */
2396 	if (pid <= 0 || tgid <= 0)
2397 		return -EINVAL;
2398 
2399 	/* Not even root can pretend to send signals from the kernel.
2400 	   Nor can they impersonate a kill(), which adds source info.  */
2401 	if (info->si_code >= 0)
2402 		return -EPERM;
2403 	info->si_signo = sig;
2404 
2405 	return do_send_specific(tgid, pid, sig, info);
2406 }
2407 
2408 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2409 		siginfo_t __user *, uinfo)
2410 {
2411 	siginfo_t info;
2412 
2413 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2414 		return -EFAULT;
2415 
2416 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2417 }
2418 
2419 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2420 {
2421 	struct task_struct *t = current;
2422 	struct k_sigaction *k;
2423 	sigset_t mask;
2424 
2425 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2426 		return -EINVAL;
2427 
2428 	k = &t->sighand->action[sig-1];
2429 
2430 	spin_lock_irq(&current->sighand->siglock);
2431 	if (oact)
2432 		*oact = *k;
2433 
2434 	if (act) {
2435 		sigdelsetmask(&act->sa.sa_mask,
2436 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2437 		*k = *act;
2438 		/*
2439 		 * POSIX 3.3.1.3:
2440 		 *  "Setting a signal action to SIG_IGN for a signal that is
2441 		 *   pending shall cause the pending signal to be discarded,
2442 		 *   whether or not it is blocked."
2443 		 *
2444 		 *  "Setting a signal action to SIG_DFL for a signal that is
2445 		 *   pending and whose default action is to ignore the signal
2446 		 *   (for example, SIGCHLD), shall cause the pending signal to
2447 		 *   be discarded, whether or not it is blocked"
2448 		 */
2449 		if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2450 			sigemptyset(&mask);
2451 			sigaddset(&mask, sig);
2452 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2453 			do {
2454 				rm_from_queue_full(&mask, &t->pending);
2455 				t = next_thread(t);
2456 			} while (t != current);
2457 		}
2458 	}
2459 
2460 	spin_unlock_irq(&current->sighand->siglock);
2461 	return 0;
2462 }
2463 
2464 int
2465 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2466 {
2467 	stack_t oss;
2468 	int error;
2469 
2470 	oss.ss_sp = (void __user *) current->sas_ss_sp;
2471 	oss.ss_size = current->sas_ss_size;
2472 	oss.ss_flags = sas_ss_flags(sp);
2473 
2474 	if (uss) {
2475 		void __user *ss_sp;
2476 		size_t ss_size;
2477 		int ss_flags;
2478 
2479 		error = -EFAULT;
2480 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2481 			goto out;
2482 		error = __get_user(ss_sp, &uss->ss_sp) |
2483 			__get_user(ss_flags, &uss->ss_flags) |
2484 			__get_user(ss_size, &uss->ss_size);
2485 		if (error)
2486 			goto out;
2487 
2488 		error = -EPERM;
2489 		if (on_sig_stack(sp))
2490 			goto out;
2491 
2492 		error = -EINVAL;
2493 		/*
2494 		 *
2495 		 * Note - this code used to test ss_flags incorrectly
2496 		 *  	  old code may have been written using ss_flags==0
2497 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2498 		 *	  way that worked) - this fix preserves that older
2499 		 *	  mechanism
2500 		 */
2501 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2502 			goto out;
2503 
2504 		if (ss_flags == SS_DISABLE) {
2505 			ss_size = 0;
2506 			ss_sp = NULL;
2507 		} else {
2508 			error = -ENOMEM;
2509 			if (ss_size < MINSIGSTKSZ)
2510 				goto out;
2511 		}
2512 
2513 		current->sas_ss_sp = (unsigned long) ss_sp;
2514 		current->sas_ss_size = ss_size;
2515 	}
2516 
2517 	error = 0;
2518 	if (uoss) {
2519 		error = -EFAULT;
2520 		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2521 			goto out;
2522 		error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2523 			__put_user(oss.ss_size, &uoss->ss_size) |
2524 			__put_user(oss.ss_flags, &uoss->ss_flags);
2525 	}
2526 
2527 out:
2528 	return error;
2529 }
2530 
2531 #ifdef __ARCH_WANT_SYS_SIGPENDING
2532 
2533 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2534 {
2535 	return do_sigpending(set, sizeof(*set));
2536 }
2537 
2538 #endif
2539 
2540 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2541 /* Some platforms have their own version with special arguments others
2542    support only sys_rt_sigprocmask.  */
2543 
2544 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2545 		old_sigset_t __user *, oset)
2546 {
2547 	int error;
2548 	old_sigset_t old_set, new_set;
2549 
2550 	if (set) {
2551 		error = -EFAULT;
2552 		if (copy_from_user(&new_set, set, sizeof(*set)))
2553 			goto out;
2554 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2555 
2556 		spin_lock_irq(&current->sighand->siglock);
2557 		old_set = current->blocked.sig[0];
2558 
2559 		error = 0;
2560 		switch (how) {
2561 		default:
2562 			error = -EINVAL;
2563 			break;
2564 		case SIG_BLOCK:
2565 			sigaddsetmask(&current->blocked, new_set);
2566 			break;
2567 		case SIG_UNBLOCK:
2568 			sigdelsetmask(&current->blocked, new_set);
2569 			break;
2570 		case SIG_SETMASK:
2571 			current->blocked.sig[0] = new_set;
2572 			break;
2573 		}
2574 
2575 		recalc_sigpending();
2576 		spin_unlock_irq(&current->sighand->siglock);
2577 		if (error)
2578 			goto out;
2579 		if (oset)
2580 			goto set_old;
2581 	} else if (oset) {
2582 		old_set = current->blocked.sig[0];
2583 	set_old:
2584 		error = -EFAULT;
2585 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2586 			goto out;
2587 	}
2588 	error = 0;
2589 out:
2590 	return error;
2591 }
2592 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2593 
2594 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2595 SYSCALL_DEFINE4(rt_sigaction, int, sig,
2596 		const struct sigaction __user *, act,
2597 		struct sigaction __user *, oact,
2598 		size_t, sigsetsize)
2599 {
2600 	struct k_sigaction new_sa, old_sa;
2601 	int ret = -EINVAL;
2602 
2603 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2604 	if (sigsetsize != sizeof(sigset_t))
2605 		goto out;
2606 
2607 	if (act) {
2608 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2609 			return -EFAULT;
2610 	}
2611 
2612 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2613 
2614 	if (!ret && oact) {
2615 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2616 			return -EFAULT;
2617 	}
2618 out:
2619 	return ret;
2620 }
2621 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2622 
2623 #ifdef __ARCH_WANT_SYS_SGETMASK
2624 
2625 /*
2626  * For backwards compatibility.  Functionality superseded by sigprocmask.
2627  */
2628 SYSCALL_DEFINE0(sgetmask)
2629 {
2630 	/* SMP safe */
2631 	return current->blocked.sig[0];
2632 }
2633 
2634 SYSCALL_DEFINE1(ssetmask, int, newmask)
2635 {
2636 	int old;
2637 
2638 	spin_lock_irq(&current->sighand->siglock);
2639 	old = current->blocked.sig[0];
2640 
2641 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2642 						  sigmask(SIGSTOP)));
2643 	recalc_sigpending();
2644 	spin_unlock_irq(&current->sighand->siglock);
2645 
2646 	return old;
2647 }
2648 #endif /* __ARCH_WANT_SGETMASK */
2649 
2650 #ifdef __ARCH_WANT_SYS_SIGNAL
2651 /*
2652  * For backwards compatibility.  Functionality superseded by sigaction.
2653  */
2654 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
2655 {
2656 	struct k_sigaction new_sa, old_sa;
2657 	int ret;
2658 
2659 	new_sa.sa.sa_handler = handler;
2660 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2661 	sigemptyset(&new_sa.sa.sa_mask);
2662 
2663 	ret = do_sigaction(sig, &new_sa, &old_sa);
2664 
2665 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2666 }
2667 #endif /* __ARCH_WANT_SYS_SIGNAL */
2668 
2669 #ifdef __ARCH_WANT_SYS_PAUSE
2670 
2671 SYSCALL_DEFINE0(pause)
2672 {
2673 	current->state = TASK_INTERRUPTIBLE;
2674 	schedule();
2675 	return -ERESTARTNOHAND;
2676 }
2677 
2678 #endif
2679 
2680 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2681 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2682 {
2683 	sigset_t newset;
2684 
2685 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2686 	if (sigsetsize != sizeof(sigset_t))
2687 		return -EINVAL;
2688 
2689 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2690 		return -EFAULT;
2691 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2692 
2693 	spin_lock_irq(&current->sighand->siglock);
2694 	current->saved_sigmask = current->blocked;
2695 	current->blocked = newset;
2696 	recalc_sigpending();
2697 	spin_unlock_irq(&current->sighand->siglock);
2698 
2699 	current->state = TASK_INTERRUPTIBLE;
2700 	schedule();
2701 	set_restore_sigmask();
2702 	return -ERESTARTNOHAND;
2703 }
2704 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2705 
2706 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2707 {
2708 	return NULL;
2709 }
2710 
2711 void __init signals_init(void)
2712 {
2713 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2714 }
2715