xref: /linux/kernel/signal.c (revision 5a0e3ad6af8660be21ca98a971cd00f331318c05)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/ratelimit.h>
26 #include <linux/tracehook.h>
27 #include <linux/capability.h>
28 #include <linux/freezer.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/nsproxy.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/signal.h>
33 
34 #include <asm/param.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/siginfo.h>
38 #include "audit.h"	/* audit_signal_info() */
39 
40 /*
41  * SLAB caches for signal bits.
42  */
43 
44 static struct kmem_cache *sigqueue_cachep;
45 
46 int print_fatal_signals __read_mostly;
47 
48 static void __user *sig_handler(struct task_struct *t, int sig)
49 {
50 	return t->sighand->action[sig - 1].sa.sa_handler;
51 }
52 
53 static int sig_handler_ignored(void __user *handler, int sig)
54 {
55 	/* Is it explicitly or implicitly ignored? */
56 	return handler == SIG_IGN ||
57 		(handler == SIG_DFL && sig_kernel_ignore(sig));
58 }
59 
60 static int sig_task_ignored(struct task_struct *t, int sig,
61 		int from_ancestor_ns)
62 {
63 	void __user *handler;
64 
65 	handler = sig_handler(t, sig);
66 
67 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
68 			handler == SIG_DFL && !from_ancestor_ns)
69 		return 1;
70 
71 	return sig_handler_ignored(handler, sig);
72 }
73 
74 static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
75 {
76 	/*
77 	 * Blocked signals are never ignored, since the
78 	 * signal handler may change by the time it is
79 	 * unblocked.
80 	 */
81 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
82 		return 0;
83 
84 	if (!sig_task_ignored(t, sig, from_ancestor_ns))
85 		return 0;
86 
87 	/*
88 	 * Tracers may want to know about even ignored signals.
89 	 */
90 	return !tracehook_consider_ignored_signal(t, sig);
91 }
92 
93 /*
94  * Re-calculate pending state from the set of locally pending
95  * signals, globally pending signals, and blocked signals.
96  */
97 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
98 {
99 	unsigned long ready;
100 	long i;
101 
102 	switch (_NSIG_WORDS) {
103 	default:
104 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105 			ready |= signal->sig[i] &~ blocked->sig[i];
106 		break;
107 
108 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
109 		ready |= signal->sig[2] &~ blocked->sig[2];
110 		ready |= signal->sig[1] &~ blocked->sig[1];
111 		ready |= signal->sig[0] &~ blocked->sig[0];
112 		break;
113 
114 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
115 		ready |= signal->sig[0] &~ blocked->sig[0];
116 		break;
117 
118 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
119 	}
120 	return ready !=	0;
121 }
122 
123 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
124 
125 static int recalc_sigpending_tsk(struct task_struct *t)
126 {
127 	if (t->signal->group_stop_count > 0 ||
128 	    PENDING(&t->pending, &t->blocked) ||
129 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
130 		set_tsk_thread_flag(t, TIF_SIGPENDING);
131 		return 1;
132 	}
133 	/*
134 	 * We must never clear the flag in another thread, or in current
135 	 * when it's possible the current syscall is returning -ERESTART*.
136 	 * So we don't clear it here, and only callers who know they should do.
137 	 */
138 	return 0;
139 }
140 
141 /*
142  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143  * This is superfluous when called on current, the wakeup is a harmless no-op.
144  */
145 void recalc_sigpending_and_wake(struct task_struct *t)
146 {
147 	if (recalc_sigpending_tsk(t))
148 		signal_wake_up(t, 0);
149 }
150 
151 void recalc_sigpending(void)
152 {
153 	if (unlikely(tracehook_force_sigpending()))
154 		set_thread_flag(TIF_SIGPENDING);
155 	else if (!recalc_sigpending_tsk(current) && !freezing(current))
156 		clear_thread_flag(TIF_SIGPENDING);
157 
158 }
159 
160 /* Given the mask, find the first available signal that should be serviced. */
161 
162 #define SYNCHRONOUS_MASK \
163 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164 	 sigmask(SIGTRAP) | sigmask(SIGFPE))
165 
166 int next_signal(struct sigpending *pending, sigset_t *mask)
167 {
168 	unsigned long i, *s, *m, x;
169 	int sig = 0;
170 
171 	s = pending->signal.sig;
172 	m = mask->sig;
173 
174 	/*
175 	 * Handle the first word specially: it contains the
176 	 * synchronous signals that need to be dequeued first.
177 	 */
178 	x = *s &~ *m;
179 	if (x) {
180 		if (x & SYNCHRONOUS_MASK)
181 			x &= SYNCHRONOUS_MASK;
182 		sig = ffz(~x) + 1;
183 		return sig;
184 	}
185 
186 	switch (_NSIG_WORDS) {
187 	default:
188 		for (i = 1; i < _NSIG_WORDS; ++i) {
189 			x = *++s &~ *++m;
190 			if (!x)
191 				continue;
192 			sig = ffz(~x) + i*_NSIG_BPW + 1;
193 			break;
194 		}
195 		break;
196 
197 	case 2:
198 		x = s[1] &~ m[1];
199 		if (!x)
200 			break;
201 		sig = ffz(~x) + _NSIG_BPW + 1;
202 		break;
203 
204 	case 1:
205 		/* Nothing to do */
206 		break;
207 	}
208 
209 	return sig;
210 }
211 
212 static inline void print_dropped_signal(int sig)
213 {
214 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
215 
216 	if (!print_fatal_signals)
217 		return;
218 
219 	if (!__ratelimit(&ratelimit_state))
220 		return;
221 
222 	printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223 				current->comm, current->pid, sig);
224 }
225 
226 /*
227  * allocate a new signal queue record
228  * - this may be called without locks if and only if t == current, otherwise an
229  *   appopriate lock must be held to stop the target task from exiting
230  */
231 static struct sigqueue *
232 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
233 {
234 	struct sigqueue *q = NULL;
235 	struct user_struct *user;
236 
237 	/*
238 	 * Protect access to @t credentials. This can go away when all
239 	 * callers hold rcu read lock.
240 	 */
241 	rcu_read_lock();
242 	user = get_uid(__task_cred(t)->user);
243 	atomic_inc(&user->sigpending);
244 	rcu_read_unlock();
245 
246 	if (override_rlimit ||
247 	    atomic_read(&user->sigpending) <=
248 			task_rlimit(t, RLIMIT_SIGPENDING)) {
249 		q = kmem_cache_alloc(sigqueue_cachep, flags);
250 	} else {
251 		print_dropped_signal(sig);
252 	}
253 
254 	if (unlikely(q == NULL)) {
255 		atomic_dec(&user->sigpending);
256 		free_uid(user);
257 	} else {
258 		INIT_LIST_HEAD(&q->list);
259 		q->flags = 0;
260 		q->user = user;
261 	}
262 
263 	return q;
264 }
265 
266 static void __sigqueue_free(struct sigqueue *q)
267 {
268 	if (q->flags & SIGQUEUE_PREALLOC)
269 		return;
270 	atomic_dec(&q->user->sigpending);
271 	free_uid(q->user);
272 	kmem_cache_free(sigqueue_cachep, q);
273 }
274 
275 void flush_sigqueue(struct sigpending *queue)
276 {
277 	struct sigqueue *q;
278 
279 	sigemptyset(&queue->signal);
280 	while (!list_empty(&queue->list)) {
281 		q = list_entry(queue->list.next, struct sigqueue , list);
282 		list_del_init(&q->list);
283 		__sigqueue_free(q);
284 	}
285 }
286 
287 /*
288  * Flush all pending signals for a task.
289  */
290 void __flush_signals(struct task_struct *t)
291 {
292 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
293 	flush_sigqueue(&t->pending);
294 	flush_sigqueue(&t->signal->shared_pending);
295 }
296 
297 void flush_signals(struct task_struct *t)
298 {
299 	unsigned long flags;
300 
301 	spin_lock_irqsave(&t->sighand->siglock, flags);
302 	__flush_signals(t);
303 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
304 }
305 
306 static void __flush_itimer_signals(struct sigpending *pending)
307 {
308 	sigset_t signal, retain;
309 	struct sigqueue *q, *n;
310 
311 	signal = pending->signal;
312 	sigemptyset(&retain);
313 
314 	list_for_each_entry_safe(q, n, &pending->list, list) {
315 		int sig = q->info.si_signo;
316 
317 		if (likely(q->info.si_code != SI_TIMER)) {
318 			sigaddset(&retain, sig);
319 		} else {
320 			sigdelset(&signal, sig);
321 			list_del_init(&q->list);
322 			__sigqueue_free(q);
323 		}
324 	}
325 
326 	sigorsets(&pending->signal, &signal, &retain);
327 }
328 
329 void flush_itimer_signals(void)
330 {
331 	struct task_struct *tsk = current;
332 	unsigned long flags;
333 
334 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
335 	__flush_itimer_signals(&tsk->pending);
336 	__flush_itimer_signals(&tsk->signal->shared_pending);
337 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
338 }
339 
340 void ignore_signals(struct task_struct *t)
341 {
342 	int i;
343 
344 	for (i = 0; i < _NSIG; ++i)
345 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
346 
347 	flush_signals(t);
348 }
349 
350 /*
351  * Flush all handlers for a task.
352  */
353 
354 void
355 flush_signal_handlers(struct task_struct *t, int force_default)
356 {
357 	int i;
358 	struct k_sigaction *ka = &t->sighand->action[0];
359 	for (i = _NSIG ; i != 0 ; i--) {
360 		if (force_default || ka->sa.sa_handler != SIG_IGN)
361 			ka->sa.sa_handler = SIG_DFL;
362 		ka->sa.sa_flags = 0;
363 		sigemptyset(&ka->sa.sa_mask);
364 		ka++;
365 	}
366 }
367 
368 int unhandled_signal(struct task_struct *tsk, int sig)
369 {
370 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
371 	if (is_global_init(tsk))
372 		return 1;
373 	if (handler != SIG_IGN && handler != SIG_DFL)
374 		return 0;
375 	return !tracehook_consider_fatal_signal(tsk, sig);
376 }
377 
378 
379 /* Notify the system that a driver wants to block all signals for this
380  * process, and wants to be notified if any signals at all were to be
381  * sent/acted upon.  If the notifier routine returns non-zero, then the
382  * signal will be acted upon after all.  If the notifier routine returns 0,
383  * then then signal will be blocked.  Only one block per process is
384  * allowed.  priv is a pointer to private data that the notifier routine
385  * can use to determine if the signal should be blocked or not.  */
386 
387 void
388 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
389 {
390 	unsigned long flags;
391 
392 	spin_lock_irqsave(&current->sighand->siglock, flags);
393 	current->notifier_mask = mask;
394 	current->notifier_data = priv;
395 	current->notifier = notifier;
396 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
397 }
398 
399 /* Notify the system that blocking has ended. */
400 
401 void
402 unblock_all_signals(void)
403 {
404 	unsigned long flags;
405 
406 	spin_lock_irqsave(&current->sighand->siglock, flags);
407 	current->notifier = NULL;
408 	current->notifier_data = NULL;
409 	recalc_sigpending();
410 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
411 }
412 
413 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
414 {
415 	struct sigqueue *q, *first = NULL;
416 
417 	/*
418 	 * Collect the siginfo appropriate to this signal.  Check if
419 	 * there is another siginfo for the same signal.
420 	*/
421 	list_for_each_entry(q, &list->list, list) {
422 		if (q->info.si_signo == sig) {
423 			if (first)
424 				goto still_pending;
425 			first = q;
426 		}
427 	}
428 
429 	sigdelset(&list->signal, sig);
430 
431 	if (first) {
432 still_pending:
433 		list_del_init(&first->list);
434 		copy_siginfo(info, &first->info);
435 		__sigqueue_free(first);
436 	} else {
437 		/* Ok, it wasn't in the queue.  This must be
438 		   a fast-pathed signal or we must have been
439 		   out of queue space.  So zero out the info.
440 		 */
441 		info->si_signo = sig;
442 		info->si_errno = 0;
443 		info->si_code = SI_USER;
444 		info->si_pid = 0;
445 		info->si_uid = 0;
446 	}
447 }
448 
449 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
450 			siginfo_t *info)
451 {
452 	int sig = next_signal(pending, mask);
453 
454 	if (sig) {
455 		if (current->notifier) {
456 			if (sigismember(current->notifier_mask, sig)) {
457 				if (!(current->notifier)(current->notifier_data)) {
458 					clear_thread_flag(TIF_SIGPENDING);
459 					return 0;
460 				}
461 			}
462 		}
463 
464 		collect_signal(sig, pending, info);
465 	}
466 
467 	return sig;
468 }
469 
470 /*
471  * Dequeue a signal and return the element to the caller, which is
472  * expected to free it.
473  *
474  * All callers have to hold the siglock.
475  */
476 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
477 {
478 	int signr;
479 
480 	/* We only dequeue private signals from ourselves, we don't let
481 	 * signalfd steal them
482 	 */
483 	signr = __dequeue_signal(&tsk->pending, mask, info);
484 	if (!signr) {
485 		signr = __dequeue_signal(&tsk->signal->shared_pending,
486 					 mask, info);
487 		/*
488 		 * itimer signal ?
489 		 *
490 		 * itimers are process shared and we restart periodic
491 		 * itimers in the signal delivery path to prevent DoS
492 		 * attacks in the high resolution timer case. This is
493 		 * compliant with the old way of self restarting
494 		 * itimers, as the SIGALRM is a legacy signal and only
495 		 * queued once. Changing the restart behaviour to
496 		 * restart the timer in the signal dequeue path is
497 		 * reducing the timer noise on heavy loaded !highres
498 		 * systems too.
499 		 */
500 		if (unlikely(signr == SIGALRM)) {
501 			struct hrtimer *tmr = &tsk->signal->real_timer;
502 
503 			if (!hrtimer_is_queued(tmr) &&
504 			    tsk->signal->it_real_incr.tv64 != 0) {
505 				hrtimer_forward(tmr, tmr->base->get_time(),
506 						tsk->signal->it_real_incr);
507 				hrtimer_restart(tmr);
508 			}
509 		}
510 	}
511 
512 	recalc_sigpending();
513 	if (!signr)
514 		return 0;
515 
516 	if (unlikely(sig_kernel_stop(signr))) {
517 		/*
518 		 * Set a marker that we have dequeued a stop signal.  Our
519 		 * caller might release the siglock and then the pending
520 		 * stop signal it is about to process is no longer in the
521 		 * pending bitmasks, but must still be cleared by a SIGCONT
522 		 * (and overruled by a SIGKILL).  So those cases clear this
523 		 * shared flag after we've set it.  Note that this flag may
524 		 * remain set after the signal we return is ignored or
525 		 * handled.  That doesn't matter because its only purpose
526 		 * is to alert stop-signal processing code when another
527 		 * processor has come along and cleared the flag.
528 		 */
529 		tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
530 	}
531 	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
532 		/*
533 		 * Release the siglock to ensure proper locking order
534 		 * of timer locks outside of siglocks.  Note, we leave
535 		 * irqs disabled here, since the posix-timers code is
536 		 * about to disable them again anyway.
537 		 */
538 		spin_unlock(&tsk->sighand->siglock);
539 		do_schedule_next_timer(info);
540 		spin_lock(&tsk->sighand->siglock);
541 	}
542 	return signr;
543 }
544 
545 /*
546  * Tell a process that it has a new active signal..
547  *
548  * NOTE! we rely on the previous spin_lock to
549  * lock interrupts for us! We can only be called with
550  * "siglock" held, and the local interrupt must
551  * have been disabled when that got acquired!
552  *
553  * No need to set need_resched since signal event passing
554  * goes through ->blocked
555  */
556 void signal_wake_up(struct task_struct *t, int resume)
557 {
558 	unsigned int mask;
559 
560 	set_tsk_thread_flag(t, TIF_SIGPENDING);
561 
562 	/*
563 	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
564 	 * case. We don't check t->state here because there is a race with it
565 	 * executing another processor and just now entering stopped state.
566 	 * By using wake_up_state, we ensure the process will wake up and
567 	 * handle its death signal.
568 	 */
569 	mask = TASK_INTERRUPTIBLE;
570 	if (resume)
571 		mask |= TASK_WAKEKILL;
572 	if (!wake_up_state(t, mask))
573 		kick_process(t);
574 }
575 
576 /*
577  * Remove signals in mask from the pending set and queue.
578  * Returns 1 if any signals were found.
579  *
580  * All callers must be holding the siglock.
581  *
582  * This version takes a sigset mask and looks at all signals,
583  * not just those in the first mask word.
584  */
585 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
586 {
587 	struct sigqueue *q, *n;
588 	sigset_t m;
589 
590 	sigandsets(&m, mask, &s->signal);
591 	if (sigisemptyset(&m))
592 		return 0;
593 
594 	signandsets(&s->signal, &s->signal, mask);
595 	list_for_each_entry_safe(q, n, &s->list, list) {
596 		if (sigismember(mask, q->info.si_signo)) {
597 			list_del_init(&q->list);
598 			__sigqueue_free(q);
599 		}
600 	}
601 	return 1;
602 }
603 /*
604  * Remove signals in mask from the pending set and queue.
605  * Returns 1 if any signals were found.
606  *
607  * All callers must be holding the siglock.
608  */
609 static int rm_from_queue(unsigned long mask, struct sigpending *s)
610 {
611 	struct sigqueue *q, *n;
612 
613 	if (!sigtestsetmask(&s->signal, mask))
614 		return 0;
615 
616 	sigdelsetmask(&s->signal, mask);
617 	list_for_each_entry_safe(q, n, &s->list, list) {
618 		if (q->info.si_signo < SIGRTMIN &&
619 		    (mask & sigmask(q->info.si_signo))) {
620 			list_del_init(&q->list);
621 			__sigqueue_free(q);
622 		}
623 	}
624 	return 1;
625 }
626 
627 static inline int is_si_special(const struct siginfo *info)
628 {
629 	return info <= SEND_SIG_FORCED;
630 }
631 
632 static inline bool si_fromuser(const struct siginfo *info)
633 {
634 	return info == SEND_SIG_NOINFO ||
635 		(!is_si_special(info) && SI_FROMUSER(info));
636 }
637 
638 /*
639  * Bad permissions for sending the signal
640  * - the caller must hold at least the RCU read lock
641  */
642 static int check_kill_permission(int sig, struct siginfo *info,
643 				 struct task_struct *t)
644 {
645 	const struct cred *cred = current_cred(), *tcred;
646 	struct pid *sid;
647 	int error;
648 
649 	if (!valid_signal(sig))
650 		return -EINVAL;
651 
652 	if (!si_fromuser(info))
653 		return 0;
654 
655 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
656 	if (error)
657 		return error;
658 
659 	tcred = __task_cred(t);
660 	if ((cred->euid ^ tcred->suid) &&
661 	    (cred->euid ^ tcred->uid) &&
662 	    (cred->uid  ^ tcred->suid) &&
663 	    (cred->uid  ^ tcred->uid) &&
664 	    !capable(CAP_KILL)) {
665 		switch (sig) {
666 		case SIGCONT:
667 			sid = task_session(t);
668 			/*
669 			 * We don't return the error if sid == NULL. The
670 			 * task was unhashed, the caller must notice this.
671 			 */
672 			if (!sid || sid == task_session(current))
673 				break;
674 		default:
675 			return -EPERM;
676 		}
677 	}
678 
679 	return security_task_kill(t, info, sig, 0);
680 }
681 
682 /*
683  * Handle magic process-wide effects of stop/continue signals. Unlike
684  * the signal actions, these happen immediately at signal-generation
685  * time regardless of blocking, ignoring, or handling.  This does the
686  * actual continuing for SIGCONT, but not the actual stopping for stop
687  * signals. The process stop is done as a signal action for SIG_DFL.
688  *
689  * Returns true if the signal should be actually delivered, otherwise
690  * it should be dropped.
691  */
692 static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
693 {
694 	struct signal_struct *signal = p->signal;
695 	struct task_struct *t;
696 
697 	if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
698 		/*
699 		 * The process is in the middle of dying, nothing to do.
700 		 */
701 	} else if (sig_kernel_stop(sig)) {
702 		/*
703 		 * This is a stop signal.  Remove SIGCONT from all queues.
704 		 */
705 		rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
706 		t = p;
707 		do {
708 			rm_from_queue(sigmask(SIGCONT), &t->pending);
709 		} while_each_thread(p, t);
710 	} else if (sig == SIGCONT) {
711 		unsigned int why;
712 		/*
713 		 * Remove all stop signals from all queues,
714 		 * and wake all threads.
715 		 */
716 		rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
717 		t = p;
718 		do {
719 			unsigned int state;
720 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
721 			/*
722 			 * If there is a handler for SIGCONT, we must make
723 			 * sure that no thread returns to user mode before
724 			 * we post the signal, in case it was the only
725 			 * thread eligible to run the signal handler--then
726 			 * it must not do anything between resuming and
727 			 * running the handler.  With the TIF_SIGPENDING
728 			 * flag set, the thread will pause and acquire the
729 			 * siglock that we hold now and until we've queued
730 			 * the pending signal.
731 			 *
732 			 * Wake up the stopped thread _after_ setting
733 			 * TIF_SIGPENDING
734 			 */
735 			state = __TASK_STOPPED;
736 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
737 				set_tsk_thread_flag(t, TIF_SIGPENDING);
738 				state |= TASK_INTERRUPTIBLE;
739 			}
740 			wake_up_state(t, state);
741 		} while_each_thread(p, t);
742 
743 		/*
744 		 * Notify the parent with CLD_CONTINUED if we were stopped.
745 		 *
746 		 * If we were in the middle of a group stop, we pretend it
747 		 * was already finished, and then continued. Since SIGCHLD
748 		 * doesn't queue we report only CLD_STOPPED, as if the next
749 		 * CLD_CONTINUED was dropped.
750 		 */
751 		why = 0;
752 		if (signal->flags & SIGNAL_STOP_STOPPED)
753 			why |= SIGNAL_CLD_CONTINUED;
754 		else if (signal->group_stop_count)
755 			why |= SIGNAL_CLD_STOPPED;
756 
757 		if (why) {
758 			/*
759 			 * The first thread which returns from do_signal_stop()
760 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
761 			 * notify its parent. See get_signal_to_deliver().
762 			 */
763 			signal->flags = why | SIGNAL_STOP_CONTINUED;
764 			signal->group_stop_count = 0;
765 			signal->group_exit_code = 0;
766 		} else {
767 			/*
768 			 * We are not stopped, but there could be a stop
769 			 * signal in the middle of being processed after
770 			 * being removed from the queue.  Clear that too.
771 			 */
772 			signal->flags &= ~SIGNAL_STOP_DEQUEUED;
773 		}
774 	}
775 
776 	return !sig_ignored(p, sig, from_ancestor_ns);
777 }
778 
779 /*
780  * Test if P wants to take SIG.  After we've checked all threads with this,
781  * it's equivalent to finding no threads not blocking SIG.  Any threads not
782  * blocking SIG were ruled out because they are not running and already
783  * have pending signals.  Such threads will dequeue from the shared queue
784  * as soon as they're available, so putting the signal on the shared queue
785  * will be equivalent to sending it to one such thread.
786  */
787 static inline int wants_signal(int sig, struct task_struct *p)
788 {
789 	if (sigismember(&p->blocked, sig))
790 		return 0;
791 	if (p->flags & PF_EXITING)
792 		return 0;
793 	if (sig == SIGKILL)
794 		return 1;
795 	if (task_is_stopped_or_traced(p))
796 		return 0;
797 	return task_curr(p) || !signal_pending(p);
798 }
799 
800 static void complete_signal(int sig, struct task_struct *p, int group)
801 {
802 	struct signal_struct *signal = p->signal;
803 	struct task_struct *t;
804 
805 	/*
806 	 * Now find a thread we can wake up to take the signal off the queue.
807 	 *
808 	 * If the main thread wants the signal, it gets first crack.
809 	 * Probably the least surprising to the average bear.
810 	 */
811 	if (wants_signal(sig, p))
812 		t = p;
813 	else if (!group || thread_group_empty(p))
814 		/*
815 		 * There is just one thread and it does not need to be woken.
816 		 * It will dequeue unblocked signals before it runs again.
817 		 */
818 		return;
819 	else {
820 		/*
821 		 * Otherwise try to find a suitable thread.
822 		 */
823 		t = signal->curr_target;
824 		while (!wants_signal(sig, t)) {
825 			t = next_thread(t);
826 			if (t == signal->curr_target)
827 				/*
828 				 * No thread needs to be woken.
829 				 * Any eligible threads will see
830 				 * the signal in the queue soon.
831 				 */
832 				return;
833 		}
834 		signal->curr_target = t;
835 	}
836 
837 	/*
838 	 * Found a killable thread.  If the signal will be fatal,
839 	 * then start taking the whole group down immediately.
840 	 */
841 	if (sig_fatal(p, sig) &&
842 	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
843 	    !sigismember(&t->real_blocked, sig) &&
844 	    (sig == SIGKILL ||
845 	     !tracehook_consider_fatal_signal(t, sig))) {
846 		/*
847 		 * This signal will be fatal to the whole group.
848 		 */
849 		if (!sig_kernel_coredump(sig)) {
850 			/*
851 			 * Start a group exit and wake everybody up.
852 			 * This way we don't have other threads
853 			 * running and doing things after a slower
854 			 * thread has the fatal signal pending.
855 			 */
856 			signal->flags = SIGNAL_GROUP_EXIT;
857 			signal->group_exit_code = sig;
858 			signal->group_stop_count = 0;
859 			t = p;
860 			do {
861 				sigaddset(&t->pending.signal, SIGKILL);
862 				signal_wake_up(t, 1);
863 			} while_each_thread(p, t);
864 			return;
865 		}
866 	}
867 
868 	/*
869 	 * The signal is already in the shared-pending queue.
870 	 * Tell the chosen thread to wake up and dequeue it.
871 	 */
872 	signal_wake_up(t, sig == SIGKILL);
873 	return;
874 }
875 
876 static inline int legacy_queue(struct sigpending *signals, int sig)
877 {
878 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
879 }
880 
881 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
882 			int group, int from_ancestor_ns)
883 {
884 	struct sigpending *pending;
885 	struct sigqueue *q;
886 	int override_rlimit;
887 
888 	trace_signal_generate(sig, info, t);
889 
890 	assert_spin_locked(&t->sighand->siglock);
891 
892 	if (!prepare_signal(sig, t, from_ancestor_ns))
893 		return 0;
894 
895 	pending = group ? &t->signal->shared_pending : &t->pending;
896 	/*
897 	 * Short-circuit ignored signals and support queuing
898 	 * exactly one non-rt signal, so that we can get more
899 	 * detailed information about the cause of the signal.
900 	 */
901 	if (legacy_queue(pending, sig))
902 		return 0;
903 	/*
904 	 * fast-pathed signals for kernel-internal things like SIGSTOP
905 	 * or SIGKILL.
906 	 */
907 	if (info == SEND_SIG_FORCED)
908 		goto out_set;
909 
910 	/* Real-time signals must be queued if sent by sigqueue, or
911 	   some other real-time mechanism.  It is implementation
912 	   defined whether kill() does so.  We attempt to do so, on
913 	   the principle of least surprise, but since kill is not
914 	   allowed to fail with EAGAIN when low on memory we just
915 	   make sure at least one signal gets delivered and don't
916 	   pass on the info struct.  */
917 
918 	if (sig < SIGRTMIN)
919 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
920 	else
921 		override_rlimit = 0;
922 
923 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
924 		override_rlimit);
925 	if (q) {
926 		list_add_tail(&q->list, &pending->list);
927 		switch ((unsigned long) info) {
928 		case (unsigned long) SEND_SIG_NOINFO:
929 			q->info.si_signo = sig;
930 			q->info.si_errno = 0;
931 			q->info.si_code = SI_USER;
932 			q->info.si_pid = task_tgid_nr_ns(current,
933 							task_active_pid_ns(t));
934 			q->info.si_uid = current_uid();
935 			break;
936 		case (unsigned long) SEND_SIG_PRIV:
937 			q->info.si_signo = sig;
938 			q->info.si_errno = 0;
939 			q->info.si_code = SI_KERNEL;
940 			q->info.si_pid = 0;
941 			q->info.si_uid = 0;
942 			break;
943 		default:
944 			copy_siginfo(&q->info, info);
945 			if (from_ancestor_ns)
946 				q->info.si_pid = 0;
947 			break;
948 		}
949 	} else if (!is_si_special(info)) {
950 		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
951 			/*
952 			 * Queue overflow, abort.  We may abort if the
953 			 * signal was rt and sent by user using something
954 			 * other than kill().
955 			 */
956 			trace_signal_overflow_fail(sig, group, info);
957 			return -EAGAIN;
958 		} else {
959 			/*
960 			 * This is a silent loss of information.  We still
961 			 * send the signal, but the *info bits are lost.
962 			 */
963 			trace_signal_lose_info(sig, group, info);
964 		}
965 	}
966 
967 out_set:
968 	signalfd_notify(t, sig);
969 	sigaddset(&pending->signal, sig);
970 	complete_signal(sig, t, group);
971 	return 0;
972 }
973 
974 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
975 			int group)
976 {
977 	int from_ancestor_ns = 0;
978 
979 #ifdef CONFIG_PID_NS
980 	from_ancestor_ns = si_fromuser(info) &&
981 			   !task_pid_nr_ns(current, task_active_pid_ns(t));
982 #endif
983 
984 	return __send_signal(sig, info, t, group, from_ancestor_ns);
985 }
986 
987 static void print_fatal_signal(struct pt_regs *regs, int signr)
988 {
989 	printk("%s/%d: potentially unexpected fatal signal %d.\n",
990 		current->comm, task_pid_nr(current), signr);
991 
992 #if defined(__i386__) && !defined(__arch_um__)
993 	printk("code at %08lx: ", regs->ip);
994 	{
995 		int i;
996 		for (i = 0; i < 16; i++) {
997 			unsigned char insn;
998 
999 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1000 				break;
1001 			printk("%02x ", insn);
1002 		}
1003 	}
1004 #endif
1005 	printk("\n");
1006 	preempt_disable();
1007 	show_regs(regs);
1008 	preempt_enable();
1009 }
1010 
1011 static int __init setup_print_fatal_signals(char *str)
1012 {
1013 	get_option (&str, &print_fatal_signals);
1014 
1015 	return 1;
1016 }
1017 
1018 __setup("print-fatal-signals=", setup_print_fatal_signals);
1019 
1020 int
1021 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1022 {
1023 	return send_signal(sig, info, p, 1);
1024 }
1025 
1026 static int
1027 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1028 {
1029 	return send_signal(sig, info, t, 0);
1030 }
1031 
1032 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1033 			bool group)
1034 {
1035 	unsigned long flags;
1036 	int ret = -ESRCH;
1037 
1038 	if (lock_task_sighand(p, &flags)) {
1039 		ret = send_signal(sig, info, p, group);
1040 		unlock_task_sighand(p, &flags);
1041 	}
1042 
1043 	return ret;
1044 }
1045 
1046 /*
1047  * Force a signal that the process can't ignore: if necessary
1048  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1049  *
1050  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1051  * since we do not want to have a signal handler that was blocked
1052  * be invoked when user space had explicitly blocked it.
1053  *
1054  * We don't want to have recursive SIGSEGV's etc, for example,
1055  * that is why we also clear SIGNAL_UNKILLABLE.
1056  */
1057 int
1058 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1059 {
1060 	unsigned long int flags;
1061 	int ret, blocked, ignored;
1062 	struct k_sigaction *action;
1063 
1064 	spin_lock_irqsave(&t->sighand->siglock, flags);
1065 	action = &t->sighand->action[sig-1];
1066 	ignored = action->sa.sa_handler == SIG_IGN;
1067 	blocked = sigismember(&t->blocked, sig);
1068 	if (blocked || ignored) {
1069 		action->sa.sa_handler = SIG_DFL;
1070 		if (blocked) {
1071 			sigdelset(&t->blocked, sig);
1072 			recalc_sigpending_and_wake(t);
1073 		}
1074 	}
1075 	if (action->sa.sa_handler == SIG_DFL)
1076 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1077 	ret = specific_send_sig_info(sig, info, t);
1078 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1079 
1080 	return ret;
1081 }
1082 
1083 /*
1084  * Nuke all other threads in the group.
1085  */
1086 void zap_other_threads(struct task_struct *p)
1087 {
1088 	struct task_struct *t;
1089 
1090 	p->signal->group_stop_count = 0;
1091 
1092 	for (t = next_thread(p); t != p; t = next_thread(t)) {
1093 		/*
1094 		 * Don't bother with already dead threads
1095 		 */
1096 		if (t->exit_state)
1097 			continue;
1098 
1099 		/* SIGKILL will be handled before any pending SIGSTOP */
1100 		sigaddset(&t->pending.signal, SIGKILL);
1101 		signal_wake_up(t, 1);
1102 	}
1103 }
1104 
1105 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1106 {
1107 	struct sighand_struct *sighand;
1108 
1109 	rcu_read_lock();
1110 	for (;;) {
1111 		sighand = rcu_dereference(tsk->sighand);
1112 		if (unlikely(sighand == NULL))
1113 			break;
1114 
1115 		spin_lock_irqsave(&sighand->siglock, *flags);
1116 		if (likely(sighand == tsk->sighand))
1117 			break;
1118 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1119 	}
1120 	rcu_read_unlock();
1121 
1122 	return sighand;
1123 }
1124 
1125 /*
1126  * send signal info to all the members of a group
1127  * - the caller must hold the RCU read lock at least
1128  */
1129 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1130 {
1131 	int ret = check_kill_permission(sig, info, p);
1132 
1133 	if (!ret && sig)
1134 		ret = do_send_sig_info(sig, info, p, true);
1135 
1136 	return ret;
1137 }
1138 
1139 /*
1140  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1141  * control characters do (^C, ^Z etc)
1142  * - the caller must hold at least a readlock on tasklist_lock
1143  */
1144 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1145 {
1146 	struct task_struct *p = NULL;
1147 	int retval, success;
1148 
1149 	success = 0;
1150 	retval = -ESRCH;
1151 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1152 		int err = group_send_sig_info(sig, info, p);
1153 		success |= !err;
1154 		retval = err;
1155 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1156 	return success ? 0 : retval;
1157 }
1158 
1159 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1160 {
1161 	int error = -ESRCH;
1162 	struct task_struct *p;
1163 
1164 	rcu_read_lock();
1165 retry:
1166 	p = pid_task(pid, PIDTYPE_PID);
1167 	if (p) {
1168 		error = group_send_sig_info(sig, info, p);
1169 		if (unlikely(error == -ESRCH))
1170 			/*
1171 			 * The task was unhashed in between, try again.
1172 			 * If it is dead, pid_task() will return NULL,
1173 			 * if we race with de_thread() it will find the
1174 			 * new leader.
1175 			 */
1176 			goto retry;
1177 	}
1178 	rcu_read_unlock();
1179 
1180 	return error;
1181 }
1182 
1183 int
1184 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1185 {
1186 	int error;
1187 	rcu_read_lock();
1188 	error = kill_pid_info(sig, info, find_vpid(pid));
1189 	rcu_read_unlock();
1190 	return error;
1191 }
1192 
1193 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1194 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1195 		      uid_t uid, uid_t euid, u32 secid)
1196 {
1197 	int ret = -EINVAL;
1198 	struct task_struct *p;
1199 	const struct cred *pcred;
1200 	unsigned long flags;
1201 
1202 	if (!valid_signal(sig))
1203 		return ret;
1204 
1205 	rcu_read_lock();
1206 	p = pid_task(pid, PIDTYPE_PID);
1207 	if (!p) {
1208 		ret = -ESRCH;
1209 		goto out_unlock;
1210 	}
1211 	pcred = __task_cred(p);
1212 	if (si_fromuser(info) &&
1213 	    euid != pcred->suid && euid != pcred->uid &&
1214 	    uid  != pcred->suid && uid  != pcred->uid) {
1215 		ret = -EPERM;
1216 		goto out_unlock;
1217 	}
1218 	ret = security_task_kill(p, info, sig, secid);
1219 	if (ret)
1220 		goto out_unlock;
1221 
1222 	if (sig) {
1223 		if (lock_task_sighand(p, &flags)) {
1224 			ret = __send_signal(sig, info, p, 1, 0);
1225 			unlock_task_sighand(p, &flags);
1226 		} else
1227 			ret = -ESRCH;
1228 	}
1229 out_unlock:
1230 	rcu_read_unlock();
1231 	return ret;
1232 }
1233 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1234 
1235 /*
1236  * kill_something_info() interprets pid in interesting ways just like kill(2).
1237  *
1238  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1239  * is probably wrong.  Should make it like BSD or SYSV.
1240  */
1241 
1242 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1243 {
1244 	int ret;
1245 
1246 	if (pid > 0) {
1247 		rcu_read_lock();
1248 		ret = kill_pid_info(sig, info, find_vpid(pid));
1249 		rcu_read_unlock();
1250 		return ret;
1251 	}
1252 
1253 	read_lock(&tasklist_lock);
1254 	if (pid != -1) {
1255 		ret = __kill_pgrp_info(sig, info,
1256 				pid ? find_vpid(-pid) : task_pgrp(current));
1257 	} else {
1258 		int retval = 0, count = 0;
1259 		struct task_struct * p;
1260 
1261 		for_each_process(p) {
1262 			if (task_pid_vnr(p) > 1 &&
1263 					!same_thread_group(p, current)) {
1264 				int err = group_send_sig_info(sig, info, p);
1265 				++count;
1266 				if (err != -EPERM)
1267 					retval = err;
1268 			}
1269 		}
1270 		ret = count ? retval : -ESRCH;
1271 	}
1272 	read_unlock(&tasklist_lock);
1273 
1274 	return ret;
1275 }
1276 
1277 /*
1278  * These are for backward compatibility with the rest of the kernel source.
1279  */
1280 
1281 int
1282 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1283 {
1284 	/*
1285 	 * Make sure legacy kernel users don't send in bad values
1286 	 * (normal paths check this in check_kill_permission).
1287 	 */
1288 	if (!valid_signal(sig))
1289 		return -EINVAL;
1290 
1291 	return do_send_sig_info(sig, info, p, false);
1292 }
1293 
1294 #define __si_special(priv) \
1295 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1296 
1297 int
1298 send_sig(int sig, struct task_struct *p, int priv)
1299 {
1300 	return send_sig_info(sig, __si_special(priv), p);
1301 }
1302 
1303 void
1304 force_sig(int sig, struct task_struct *p)
1305 {
1306 	force_sig_info(sig, SEND_SIG_PRIV, p);
1307 }
1308 
1309 /*
1310  * When things go south during signal handling, we
1311  * will force a SIGSEGV. And if the signal that caused
1312  * the problem was already a SIGSEGV, we'll want to
1313  * make sure we don't even try to deliver the signal..
1314  */
1315 int
1316 force_sigsegv(int sig, struct task_struct *p)
1317 {
1318 	if (sig == SIGSEGV) {
1319 		unsigned long flags;
1320 		spin_lock_irqsave(&p->sighand->siglock, flags);
1321 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1322 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1323 	}
1324 	force_sig(SIGSEGV, p);
1325 	return 0;
1326 }
1327 
1328 int kill_pgrp(struct pid *pid, int sig, int priv)
1329 {
1330 	int ret;
1331 
1332 	read_lock(&tasklist_lock);
1333 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1334 	read_unlock(&tasklist_lock);
1335 
1336 	return ret;
1337 }
1338 EXPORT_SYMBOL(kill_pgrp);
1339 
1340 int kill_pid(struct pid *pid, int sig, int priv)
1341 {
1342 	return kill_pid_info(sig, __si_special(priv), pid);
1343 }
1344 EXPORT_SYMBOL(kill_pid);
1345 
1346 /*
1347  * These functions support sending signals using preallocated sigqueue
1348  * structures.  This is needed "because realtime applications cannot
1349  * afford to lose notifications of asynchronous events, like timer
1350  * expirations or I/O completions".  In the case of Posix Timers
1351  * we allocate the sigqueue structure from the timer_create.  If this
1352  * allocation fails we are able to report the failure to the application
1353  * with an EAGAIN error.
1354  */
1355 struct sigqueue *sigqueue_alloc(void)
1356 {
1357 	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1358 
1359 	if (q)
1360 		q->flags |= SIGQUEUE_PREALLOC;
1361 
1362 	return q;
1363 }
1364 
1365 void sigqueue_free(struct sigqueue *q)
1366 {
1367 	unsigned long flags;
1368 	spinlock_t *lock = &current->sighand->siglock;
1369 
1370 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1371 	/*
1372 	 * We must hold ->siglock while testing q->list
1373 	 * to serialize with collect_signal() or with
1374 	 * __exit_signal()->flush_sigqueue().
1375 	 */
1376 	spin_lock_irqsave(lock, flags);
1377 	q->flags &= ~SIGQUEUE_PREALLOC;
1378 	/*
1379 	 * If it is queued it will be freed when dequeued,
1380 	 * like the "regular" sigqueue.
1381 	 */
1382 	if (!list_empty(&q->list))
1383 		q = NULL;
1384 	spin_unlock_irqrestore(lock, flags);
1385 
1386 	if (q)
1387 		__sigqueue_free(q);
1388 }
1389 
1390 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1391 {
1392 	int sig = q->info.si_signo;
1393 	struct sigpending *pending;
1394 	unsigned long flags;
1395 	int ret;
1396 
1397 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1398 
1399 	ret = -1;
1400 	if (!likely(lock_task_sighand(t, &flags)))
1401 		goto ret;
1402 
1403 	ret = 1; /* the signal is ignored */
1404 	if (!prepare_signal(sig, t, 0))
1405 		goto out;
1406 
1407 	ret = 0;
1408 	if (unlikely(!list_empty(&q->list))) {
1409 		/*
1410 		 * If an SI_TIMER entry is already queue just increment
1411 		 * the overrun count.
1412 		 */
1413 		BUG_ON(q->info.si_code != SI_TIMER);
1414 		q->info.si_overrun++;
1415 		goto out;
1416 	}
1417 	q->info.si_overrun = 0;
1418 
1419 	signalfd_notify(t, sig);
1420 	pending = group ? &t->signal->shared_pending : &t->pending;
1421 	list_add_tail(&q->list, &pending->list);
1422 	sigaddset(&pending->signal, sig);
1423 	complete_signal(sig, t, group);
1424 out:
1425 	unlock_task_sighand(t, &flags);
1426 ret:
1427 	return ret;
1428 }
1429 
1430 /*
1431  * Let a parent know about the death of a child.
1432  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1433  *
1434  * Returns -1 if our parent ignored us and so we've switched to
1435  * self-reaping, or else @sig.
1436  */
1437 int do_notify_parent(struct task_struct *tsk, int sig)
1438 {
1439 	struct siginfo info;
1440 	unsigned long flags;
1441 	struct sighand_struct *psig;
1442 	int ret = sig;
1443 
1444 	BUG_ON(sig == -1);
1445 
1446  	/* do_notify_parent_cldstop should have been called instead.  */
1447  	BUG_ON(task_is_stopped_or_traced(tsk));
1448 
1449 	BUG_ON(!task_ptrace(tsk) &&
1450 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1451 
1452 	info.si_signo = sig;
1453 	info.si_errno = 0;
1454 	/*
1455 	 * we are under tasklist_lock here so our parent is tied to
1456 	 * us and cannot exit and release its namespace.
1457 	 *
1458 	 * the only it can is to switch its nsproxy with sys_unshare,
1459 	 * bu uncharing pid namespaces is not allowed, so we'll always
1460 	 * see relevant namespace
1461 	 *
1462 	 * write_lock() currently calls preempt_disable() which is the
1463 	 * same as rcu_read_lock(), but according to Oleg, this is not
1464 	 * correct to rely on this
1465 	 */
1466 	rcu_read_lock();
1467 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1468 	info.si_uid = __task_cred(tsk)->uid;
1469 	rcu_read_unlock();
1470 
1471 	info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1472 				tsk->signal->utime));
1473 	info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1474 				tsk->signal->stime));
1475 
1476 	info.si_status = tsk->exit_code & 0x7f;
1477 	if (tsk->exit_code & 0x80)
1478 		info.si_code = CLD_DUMPED;
1479 	else if (tsk->exit_code & 0x7f)
1480 		info.si_code = CLD_KILLED;
1481 	else {
1482 		info.si_code = CLD_EXITED;
1483 		info.si_status = tsk->exit_code >> 8;
1484 	}
1485 
1486 	psig = tsk->parent->sighand;
1487 	spin_lock_irqsave(&psig->siglock, flags);
1488 	if (!task_ptrace(tsk) && sig == SIGCHLD &&
1489 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1490 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1491 		/*
1492 		 * We are exiting and our parent doesn't care.  POSIX.1
1493 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1494 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1495 		 * automatically and not left for our parent's wait4 call.
1496 		 * Rather than having the parent do it as a magic kind of
1497 		 * signal handler, we just set this to tell do_exit that we
1498 		 * can be cleaned up without becoming a zombie.  Note that
1499 		 * we still call __wake_up_parent in this case, because a
1500 		 * blocked sys_wait4 might now return -ECHILD.
1501 		 *
1502 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1503 		 * is implementation-defined: we do (if you don't want
1504 		 * it, just use SIG_IGN instead).
1505 		 */
1506 		ret = tsk->exit_signal = -1;
1507 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1508 			sig = -1;
1509 	}
1510 	if (valid_signal(sig) && sig > 0)
1511 		__group_send_sig_info(sig, &info, tsk->parent);
1512 	__wake_up_parent(tsk, tsk->parent);
1513 	spin_unlock_irqrestore(&psig->siglock, flags);
1514 
1515 	return ret;
1516 }
1517 
1518 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1519 {
1520 	struct siginfo info;
1521 	unsigned long flags;
1522 	struct task_struct *parent;
1523 	struct sighand_struct *sighand;
1524 
1525 	if (task_ptrace(tsk))
1526 		parent = tsk->parent;
1527 	else {
1528 		tsk = tsk->group_leader;
1529 		parent = tsk->real_parent;
1530 	}
1531 
1532 	info.si_signo = SIGCHLD;
1533 	info.si_errno = 0;
1534 	/*
1535 	 * see comment in do_notify_parent() abot the following 3 lines
1536 	 */
1537 	rcu_read_lock();
1538 	info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1539 	info.si_uid = __task_cred(tsk)->uid;
1540 	rcu_read_unlock();
1541 
1542 	info.si_utime = cputime_to_clock_t(tsk->utime);
1543 	info.si_stime = cputime_to_clock_t(tsk->stime);
1544 
1545  	info.si_code = why;
1546  	switch (why) {
1547  	case CLD_CONTINUED:
1548  		info.si_status = SIGCONT;
1549  		break;
1550  	case CLD_STOPPED:
1551  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1552  		break;
1553  	case CLD_TRAPPED:
1554  		info.si_status = tsk->exit_code & 0x7f;
1555  		break;
1556  	default:
1557  		BUG();
1558  	}
1559 
1560 	sighand = parent->sighand;
1561 	spin_lock_irqsave(&sighand->siglock, flags);
1562 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1563 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1564 		__group_send_sig_info(SIGCHLD, &info, parent);
1565 	/*
1566 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1567 	 */
1568 	__wake_up_parent(tsk, parent);
1569 	spin_unlock_irqrestore(&sighand->siglock, flags);
1570 }
1571 
1572 static inline int may_ptrace_stop(void)
1573 {
1574 	if (!likely(task_ptrace(current)))
1575 		return 0;
1576 	/*
1577 	 * Are we in the middle of do_coredump?
1578 	 * If so and our tracer is also part of the coredump stopping
1579 	 * is a deadlock situation, and pointless because our tracer
1580 	 * is dead so don't allow us to stop.
1581 	 * If SIGKILL was already sent before the caller unlocked
1582 	 * ->siglock we must see ->core_state != NULL. Otherwise it
1583 	 * is safe to enter schedule().
1584 	 */
1585 	if (unlikely(current->mm->core_state) &&
1586 	    unlikely(current->mm == current->parent->mm))
1587 		return 0;
1588 
1589 	return 1;
1590 }
1591 
1592 /*
1593  * Return nonzero if there is a SIGKILL that should be waking us up.
1594  * Called with the siglock held.
1595  */
1596 static int sigkill_pending(struct task_struct *tsk)
1597 {
1598 	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1599 		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1600 }
1601 
1602 /*
1603  * This must be called with current->sighand->siglock held.
1604  *
1605  * This should be the path for all ptrace stops.
1606  * We always set current->last_siginfo while stopped here.
1607  * That makes it a way to test a stopped process for
1608  * being ptrace-stopped vs being job-control-stopped.
1609  *
1610  * If we actually decide not to stop at all because the tracer
1611  * is gone, we keep current->exit_code unless clear_code.
1612  */
1613 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1614 {
1615 	if (arch_ptrace_stop_needed(exit_code, info)) {
1616 		/*
1617 		 * The arch code has something special to do before a
1618 		 * ptrace stop.  This is allowed to block, e.g. for faults
1619 		 * on user stack pages.  We can't keep the siglock while
1620 		 * calling arch_ptrace_stop, so we must release it now.
1621 		 * To preserve proper semantics, we must do this before
1622 		 * any signal bookkeeping like checking group_stop_count.
1623 		 * Meanwhile, a SIGKILL could come in before we retake the
1624 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1625 		 * So after regaining the lock, we must check for SIGKILL.
1626 		 */
1627 		spin_unlock_irq(&current->sighand->siglock);
1628 		arch_ptrace_stop(exit_code, info);
1629 		spin_lock_irq(&current->sighand->siglock);
1630 		if (sigkill_pending(current))
1631 			return;
1632 	}
1633 
1634 	/*
1635 	 * If there is a group stop in progress,
1636 	 * we must participate in the bookkeeping.
1637 	 */
1638 	if (current->signal->group_stop_count > 0)
1639 		--current->signal->group_stop_count;
1640 
1641 	current->last_siginfo = info;
1642 	current->exit_code = exit_code;
1643 
1644 	/* Let the debugger run.  */
1645 	__set_current_state(TASK_TRACED);
1646 	spin_unlock_irq(&current->sighand->siglock);
1647 	read_lock(&tasklist_lock);
1648 	if (may_ptrace_stop()) {
1649 		do_notify_parent_cldstop(current, CLD_TRAPPED);
1650 		/*
1651 		 * Don't want to allow preemption here, because
1652 		 * sys_ptrace() needs this task to be inactive.
1653 		 *
1654 		 * XXX: implement read_unlock_no_resched().
1655 		 */
1656 		preempt_disable();
1657 		read_unlock(&tasklist_lock);
1658 		preempt_enable_no_resched();
1659 		schedule();
1660 	} else {
1661 		/*
1662 		 * By the time we got the lock, our tracer went away.
1663 		 * Don't drop the lock yet, another tracer may come.
1664 		 */
1665 		__set_current_state(TASK_RUNNING);
1666 		if (clear_code)
1667 			current->exit_code = 0;
1668 		read_unlock(&tasklist_lock);
1669 	}
1670 
1671 	/*
1672 	 * While in TASK_TRACED, we were considered "frozen enough".
1673 	 * Now that we woke up, it's crucial if we're supposed to be
1674 	 * frozen that we freeze now before running anything substantial.
1675 	 */
1676 	try_to_freeze();
1677 
1678 	/*
1679 	 * We are back.  Now reacquire the siglock before touching
1680 	 * last_siginfo, so that we are sure to have synchronized with
1681 	 * any signal-sending on another CPU that wants to examine it.
1682 	 */
1683 	spin_lock_irq(&current->sighand->siglock);
1684 	current->last_siginfo = NULL;
1685 
1686 	/*
1687 	 * Queued signals ignored us while we were stopped for tracing.
1688 	 * So check for any that we should take before resuming user mode.
1689 	 * This sets TIF_SIGPENDING, but never clears it.
1690 	 */
1691 	recalc_sigpending_tsk(current);
1692 }
1693 
1694 void ptrace_notify(int exit_code)
1695 {
1696 	siginfo_t info;
1697 
1698 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1699 
1700 	memset(&info, 0, sizeof info);
1701 	info.si_signo = SIGTRAP;
1702 	info.si_code = exit_code;
1703 	info.si_pid = task_pid_vnr(current);
1704 	info.si_uid = current_uid();
1705 
1706 	/* Let the debugger run.  */
1707 	spin_lock_irq(&current->sighand->siglock);
1708 	ptrace_stop(exit_code, 1, &info);
1709 	spin_unlock_irq(&current->sighand->siglock);
1710 }
1711 
1712 /*
1713  * This performs the stopping for SIGSTOP and other stop signals.
1714  * We have to stop all threads in the thread group.
1715  * Returns nonzero if we've actually stopped and released the siglock.
1716  * Returns zero if we didn't stop and still hold the siglock.
1717  */
1718 static int do_signal_stop(int signr)
1719 {
1720 	struct signal_struct *sig = current->signal;
1721 	int notify;
1722 
1723 	if (!sig->group_stop_count) {
1724 		struct task_struct *t;
1725 
1726 		if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1727 		    unlikely(signal_group_exit(sig)))
1728 			return 0;
1729 		/*
1730 		 * There is no group stop already in progress.
1731 		 * We must initiate one now.
1732 		 */
1733 		sig->group_exit_code = signr;
1734 
1735 		sig->group_stop_count = 1;
1736 		for (t = next_thread(current); t != current; t = next_thread(t))
1737 			/*
1738 			 * Setting state to TASK_STOPPED for a group
1739 			 * stop is always done with the siglock held,
1740 			 * so this check has no races.
1741 			 */
1742 			if (!(t->flags & PF_EXITING) &&
1743 			    !task_is_stopped_or_traced(t)) {
1744 				sig->group_stop_count++;
1745 				signal_wake_up(t, 0);
1746 			}
1747 	}
1748 	/*
1749 	 * If there are no other threads in the group, or if there is
1750 	 * a group stop in progress and we are the last to stop, report
1751 	 * to the parent.  When ptraced, every thread reports itself.
1752 	 */
1753 	notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
1754 	notify = tracehook_notify_jctl(notify, CLD_STOPPED);
1755 	/*
1756 	 * tracehook_notify_jctl() can drop and reacquire siglock, so
1757 	 * we keep ->group_stop_count != 0 before the call. If SIGCONT
1758 	 * or SIGKILL comes in between ->group_stop_count == 0.
1759 	 */
1760 	if (sig->group_stop_count) {
1761 		if (!--sig->group_stop_count)
1762 			sig->flags = SIGNAL_STOP_STOPPED;
1763 		current->exit_code = sig->group_exit_code;
1764 		__set_current_state(TASK_STOPPED);
1765 	}
1766 	spin_unlock_irq(&current->sighand->siglock);
1767 
1768 	if (notify) {
1769 		read_lock(&tasklist_lock);
1770 		do_notify_parent_cldstop(current, notify);
1771 		read_unlock(&tasklist_lock);
1772 	}
1773 
1774 	/* Now we don't run again until woken by SIGCONT or SIGKILL */
1775 	do {
1776 		schedule();
1777 	} while (try_to_freeze());
1778 
1779 	tracehook_finish_jctl();
1780 	current->exit_code = 0;
1781 
1782 	return 1;
1783 }
1784 
1785 static int ptrace_signal(int signr, siginfo_t *info,
1786 			 struct pt_regs *regs, void *cookie)
1787 {
1788 	if (!task_ptrace(current))
1789 		return signr;
1790 
1791 	ptrace_signal_deliver(regs, cookie);
1792 
1793 	/* Let the debugger run.  */
1794 	ptrace_stop(signr, 0, info);
1795 
1796 	/* We're back.  Did the debugger cancel the sig?  */
1797 	signr = current->exit_code;
1798 	if (signr == 0)
1799 		return signr;
1800 
1801 	current->exit_code = 0;
1802 
1803 	/* Update the siginfo structure if the signal has
1804 	   changed.  If the debugger wanted something
1805 	   specific in the siginfo structure then it should
1806 	   have updated *info via PTRACE_SETSIGINFO.  */
1807 	if (signr != info->si_signo) {
1808 		info->si_signo = signr;
1809 		info->si_errno = 0;
1810 		info->si_code = SI_USER;
1811 		info->si_pid = task_pid_vnr(current->parent);
1812 		info->si_uid = task_uid(current->parent);
1813 	}
1814 
1815 	/* If the (new) signal is now blocked, requeue it.  */
1816 	if (sigismember(&current->blocked, signr)) {
1817 		specific_send_sig_info(signr, info, current);
1818 		signr = 0;
1819 	}
1820 
1821 	return signr;
1822 }
1823 
1824 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1825 			  struct pt_regs *regs, void *cookie)
1826 {
1827 	struct sighand_struct *sighand = current->sighand;
1828 	struct signal_struct *signal = current->signal;
1829 	int signr;
1830 
1831 relock:
1832 	/*
1833 	 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1834 	 * While in TASK_STOPPED, we were considered "frozen enough".
1835 	 * Now that we woke up, it's crucial if we're supposed to be
1836 	 * frozen that we freeze now before running anything substantial.
1837 	 */
1838 	try_to_freeze();
1839 
1840 	spin_lock_irq(&sighand->siglock);
1841 	/*
1842 	 * Every stopped thread goes here after wakeup. Check to see if
1843 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
1844 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1845 	 */
1846 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1847 		int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1848 				? CLD_CONTINUED : CLD_STOPPED;
1849 		signal->flags &= ~SIGNAL_CLD_MASK;
1850 
1851 		why = tracehook_notify_jctl(why, CLD_CONTINUED);
1852 		spin_unlock_irq(&sighand->siglock);
1853 
1854 		if (why) {
1855 			read_lock(&tasklist_lock);
1856 			do_notify_parent_cldstop(current->group_leader, why);
1857 			read_unlock(&tasklist_lock);
1858 		}
1859 		goto relock;
1860 	}
1861 
1862 	for (;;) {
1863 		struct k_sigaction *ka;
1864 		/*
1865 		 * Tracing can induce an artifical signal and choose sigaction.
1866 		 * The return value in @signr determines the default action,
1867 		 * but @info->si_signo is the signal number we will report.
1868 		 */
1869 		signr = tracehook_get_signal(current, regs, info, return_ka);
1870 		if (unlikely(signr < 0))
1871 			goto relock;
1872 		if (unlikely(signr != 0))
1873 			ka = return_ka;
1874 		else {
1875 			if (unlikely(signal->group_stop_count > 0) &&
1876 			    do_signal_stop(0))
1877 				goto relock;
1878 
1879 			signr = dequeue_signal(current, &current->blocked,
1880 					       info);
1881 
1882 			if (!signr)
1883 				break; /* will return 0 */
1884 
1885 			if (signr != SIGKILL) {
1886 				signr = ptrace_signal(signr, info,
1887 						      regs, cookie);
1888 				if (!signr)
1889 					continue;
1890 			}
1891 
1892 			ka = &sighand->action[signr-1];
1893 		}
1894 
1895 		/* Trace actually delivered signals. */
1896 		trace_signal_deliver(signr, info, ka);
1897 
1898 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1899 			continue;
1900 		if (ka->sa.sa_handler != SIG_DFL) {
1901 			/* Run the handler.  */
1902 			*return_ka = *ka;
1903 
1904 			if (ka->sa.sa_flags & SA_ONESHOT)
1905 				ka->sa.sa_handler = SIG_DFL;
1906 
1907 			break; /* will return non-zero "signr" value */
1908 		}
1909 
1910 		/*
1911 		 * Now we are doing the default action for this signal.
1912 		 */
1913 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1914 			continue;
1915 
1916 		/*
1917 		 * Global init gets no signals it doesn't want.
1918 		 * Container-init gets no signals it doesn't want from same
1919 		 * container.
1920 		 *
1921 		 * Note that if global/container-init sees a sig_kernel_only()
1922 		 * signal here, the signal must have been generated internally
1923 		 * or must have come from an ancestor namespace. In either
1924 		 * case, the signal cannot be dropped.
1925 		 */
1926 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1927 				!sig_kernel_only(signr))
1928 			continue;
1929 
1930 		if (sig_kernel_stop(signr)) {
1931 			/*
1932 			 * The default action is to stop all threads in
1933 			 * the thread group.  The job control signals
1934 			 * do nothing in an orphaned pgrp, but SIGSTOP
1935 			 * always works.  Note that siglock needs to be
1936 			 * dropped during the call to is_orphaned_pgrp()
1937 			 * because of lock ordering with tasklist_lock.
1938 			 * This allows an intervening SIGCONT to be posted.
1939 			 * We need to check for that and bail out if necessary.
1940 			 */
1941 			if (signr != SIGSTOP) {
1942 				spin_unlock_irq(&sighand->siglock);
1943 
1944 				/* signals can be posted during this window */
1945 
1946 				if (is_current_pgrp_orphaned())
1947 					goto relock;
1948 
1949 				spin_lock_irq(&sighand->siglock);
1950 			}
1951 
1952 			if (likely(do_signal_stop(info->si_signo))) {
1953 				/* It released the siglock.  */
1954 				goto relock;
1955 			}
1956 
1957 			/*
1958 			 * We didn't actually stop, due to a race
1959 			 * with SIGCONT or something like that.
1960 			 */
1961 			continue;
1962 		}
1963 
1964 		spin_unlock_irq(&sighand->siglock);
1965 
1966 		/*
1967 		 * Anything else is fatal, maybe with a core dump.
1968 		 */
1969 		current->flags |= PF_SIGNALED;
1970 
1971 		if (sig_kernel_coredump(signr)) {
1972 			if (print_fatal_signals)
1973 				print_fatal_signal(regs, info->si_signo);
1974 			/*
1975 			 * If it was able to dump core, this kills all
1976 			 * other threads in the group and synchronizes with
1977 			 * their demise.  If we lost the race with another
1978 			 * thread getting here, it set group_exit_code
1979 			 * first and our do_group_exit call below will use
1980 			 * that value and ignore the one we pass it.
1981 			 */
1982 			do_coredump(info->si_signo, info->si_signo, regs);
1983 		}
1984 
1985 		/*
1986 		 * Death signals, no core dump.
1987 		 */
1988 		do_group_exit(info->si_signo);
1989 		/* NOTREACHED */
1990 	}
1991 	spin_unlock_irq(&sighand->siglock);
1992 	return signr;
1993 }
1994 
1995 void exit_signals(struct task_struct *tsk)
1996 {
1997 	int group_stop = 0;
1998 	struct task_struct *t;
1999 
2000 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2001 		tsk->flags |= PF_EXITING;
2002 		return;
2003 	}
2004 
2005 	spin_lock_irq(&tsk->sighand->siglock);
2006 	/*
2007 	 * From now this task is not visible for group-wide signals,
2008 	 * see wants_signal(), do_signal_stop().
2009 	 */
2010 	tsk->flags |= PF_EXITING;
2011 	if (!signal_pending(tsk))
2012 		goto out;
2013 
2014 	/* It could be that __group_complete_signal() choose us to
2015 	 * notify about group-wide signal. Another thread should be
2016 	 * woken now to take the signal since we will not.
2017 	 */
2018 	for (t = tsk; (t = next_thread(t)) != tsk; )
2019 		if (!signal_pending(t) && !(t->flags & PF_EXITING))
2020 			recalc_sigpending_and_wake(t);
2021 
2022 	if (unlikely(tsk->signal->group_stop_count) &&
2023 			!--tsk->signal->group_stop_count) {
2024 		tsk->signal->flags = SIGNAL_STOP_STOPPED;
2025 		group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
2026 	}
2027 out:
2028 	spin_unlock_irq(&tsk->sighand->siglock);
2029 
2030 	if (unlikely(group_stop)) {
2031 		read_lock(&tasklist_lock);
2032 		do_notify_parent_cldstop(tsk, group_stop);
2033 		read_unlock(&tasklist_lock);
2034 	}
2035 }
2036 
2037 EXPORT_SYMBOL(recalc_sigpending);
2038 EXPORT_SYMBOL_GPL(dequeue_signal);
2039 EXPORT_SYMBOL(flush_signals);
2040 EXPORT_SYMBOL(force_sig);
2041 EXPORT_SYMBOL(send_sig);
2042 EXPORT_SYMBOL(send_sig_info);
2043 EXPORT_SYMBOL(sigprocmask);
2044 EXPORT_SYMBOL(block_all_signals);
2045 EXPORT_SYMBOL(unblock_all_signals);
2046 
2047 
2048 /*
2049  * System call entry points.
2050  */
2051 
2052 SYSCALL_DEFINE0(restart_syscall)
2053 {
2054 	struct restart_block *restart = &current_thread_info()->restart_block;
2055 	return restart->fn(restart);
2056 }
2057 
2058 long do_no_restart_syscall(struct restart_block *param)
2059 {
2060 	return -EINTR;
2061 }
2062 
2063 /*
2064  * We don't need to get the kernel lock - this is all local to this
2065  * particular thread.. (and that's good, because this is _heavily_
2066  * used by various programs)
2067  */
2068 
2069 /*
2070  * This is also useful for kernel threads that want to temporarily
2071  * (or permanently) block certain signals.
2072  *
2073  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2074  * interface happily blocks "unblockable" signals like SIGKILL
2075  * and friends.
2076  */
2077 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2078 {
2079 	int error;
2080 
2081 	spin_lock_irq(&current->sighand->siglock);
2082 	if (oldset)
2083 		*oldset = current->blocked;
2084 
2085 	error = 0;
2086 	switch (how) {
2087 	case SIG_BLOCK:
2088 		sigorsets(&current->blocked, &current->blocked, set);
2089 		break;
2090 	case SIG_UNBLOCK:
2091 		signandsets(&current->blocked, &current->blocked, set);
2092 		break;
2093 	case SIG_SETMASK:
2094 		current->blocked = *set;
2095 		break;
2096 	default:
2097 		error = -EINVAL;
2098 	}
2099 	recalc_sigpending();
2100 	spin_unlock_irq(&current->sighand->siglock);
2101 
2102 	return error;
2103 }
2104 
2105 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2106 		sigset_t __user *, oset, size_t, sigsetsize)
2107 {
2108 	int error = -EINVAL;
2109 	sigset_t old_set, new_set;
2110 
2111 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2112 	if (sigsetsize != sizeof(sigset_t))
2113 		goto out;
2114 
2115 	if (set) {
2116 		error = -EFAULT;
2117 		if (copy_from_user(&new_set, set, sizeof(*set)))
2118 			goto out;
2119 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2120 
2121 		error = sigprocmask(how, &new_set, &old_set);
2122 		if (error)
2123 			goto out;
2124 		if (oset)
2125 			goto set_old;
2126 	} else if (oset) {
2127 		spin_lock_irq(&current->sighand->siglock);
2128 		old_set = current->blocked;
2129 		spin_unlock_irq(&current->sighand->siglock);
2130 
2131 	set_old:
2132 		error = -EFAULT;
2133 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2134 			goto out;
2135 	}
2136 	error = 0;
2137 out:
2138 	return error;
2139 }
2140 
2141 long do_sigpending(void __user *set, unsigned long sigsetsize)
2142 {
2143 	long error = -EINVAL;
2144 	sigset_t pending;
2145 
2146 	if (sigsetsize > sizeof(sigset_t))
2147 		goto out;
2148 
2149 	spin_lock_irq(&current->sighand->siglock);
2150 	sigorsets(&pending, &current->pending.signal,
2151 		  &current->signal->shared_pending.signal);
2152 	spin_unlock_irq(&current->sighand->siglock);
2153 
2154 	/* Outside the lock because only this thread touches it.  */
2155 	sigandsets(&pending, &current->blocked, &pending);
2156 
2157 	error = -EFAULT;
2158 	if (!copy_to_user(set, &pending, sigsetsize))
2159 		error = 0;
2160 
2161 out:
2162 	return error;
2163 }
2164 
2165 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2166 {
2167 	return do_sigpending(set, sigsetsize);
2168 }
2169 
2170 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2171 
2172 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2173 {
2174 	int err;
2175 
2176 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2177 		return -EFAULT;
2178 	if (from->si_code < 0)
2179 		return __copy_to_user(to, from, sizeof(siginfo_t))
2180 			? -EFAULT : 0;
2181 	/*
2182 	 * If you change siginfo_t structure, please be sure
2183 	 * this code is fixed accordingly.
2184 	 * Please remember to update the signalfd_copyinfo() function
2185 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2186 	 * It should never copy any pad contained in the structure
2187 	 * to avoid security leaks, but must copy the generic
2188 	 * 3 ints plus the relevant union member.
2189 	 */
2190 	err = __put_user(from->si_signo, &to->si_signo);
2191 	err |= __put_user(from->si_errno, &to->si_errno);
2192 	err |= __put_user((short)from->si_code, &to->si_code);
2193 	switch (from->si_code & __SI_MASK) {
2194 	case __SI_KILL:
2195 		err |= __put_user(from->si_pid, &to->si_pid);
2196 		err |= __put_user(from->si_uid, &to->si_uid);
2197 		break;
2198 	case __SI_TIMER:
2199 		 err |= __put_user(from->si_tid, &to->si_tid);
2200 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2201 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2202 		break;
2203 	case __SI_POLL:
2204 		err |= __put_user(from->si_band, &to->si_band);
2205 		err |= __put_user(from->si_fd, &to->si_fd);
2206 		break;
2207 	case __SI_FAULT:
2208 		err |= __put_user(from->si_addr, &to->si_addr);
2209 #ifdef __ARCH_SI_TRAPNO
2210 		err |= __put_user(from->si_trapno, &to->si_trapno);
2211 #endif
2212 		break;
2213 	case __SI_CHLD:
2214 		err |= __put_user(from->si_pid, &to->si_pid);
2215 		err |= __put_user(from->si_uid, &to->si_uid);
2216 		err |= __put_user(from->si_status, &to->si_status);
2217 		err |= __put_user(from->si_utime, &to->si_utime);
2218 		err |= __put_user(from->si_stime, &to->si_stime);
2219 		break;
2220 	case __SI_RT: /* This is not generated by the kernel as of now. */
2221 	case __SI_MESGQ: /* But this is */
2222 		err |= __put_user(from->si_pid, &to->si_pid);
2223 		err |= __put_user(from->si_uid, &to->si_uid);
2224 		err |= __put_user(from->si_ptr, &to->si_ptr);
2225 		break;
2226 	default: /* this is just in case for now ... */
2227 		err |= __put_user(from->si_pid, &to->si_pid);
2228 		err |= __put_user(from->si_uid, &to->si_uid);
2229 		break;
2230 	}
2231 	return err;
2232 }
2233 
2234 #endif
2235 
2236 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2237 		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2238 		size_t, sigsetsize)
2239 {
2240 	int ret, sig;
2241 	sigset_t these;
2242 	struct timespec ts;
2243 	siginfo_t info;
2244 	long timeout = 0;
2245 
2246 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2247 	if (sigsetsize != sizeof(sigset_t))
2248 		return -EINVAL;
2249 
2250 	if (copy_from_user(&these, uthese, sizeof(these)))
2251 		return -EFAULT;
2252 
2253 	/*
2254 	 * Invert the set of allowed signals to get those we
2255 	 * want to block.
2256 	 */
2257 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2258 	signotset(&these);
2259 
2260 	if (uts) {
2261 		if (copy_from_user(&ts, uts, sizeof(ts)))
2262 			return -EFAULT;
2263 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2264 		    || ts.tv_sec < 0)
2265 			return -EINVAL;
2266 	}
2267 
2268 	spin_lock_irq(&current->sighand->siglock);
2269 	sig = dequeue_signal(current, &these, &info);
2270 	if (!sig) {
2271 		timeout = MAX_SCHEDULE_TIMEOUT;
2272 		if (uts)
2273 			timeout = (timespec_to_jiffies(&ts)
2274 				   + (ts.tv_sec || ts.tv_nsec));
2275 
2276 		if (timeout) {
2277 			/* None ready -- temporarily unblock those we're
2278 			 * interested while we are sleeping in so that we'll
2279 			 * be awakened when they arrive.  */
2280 			current->real_blocked = current->blocked;
2281 			sigandsets(&current->blocked, &current->blocked, &these);
2282 			recalc_sigpending();
2283 			spin_unlock_irq(&current->sighand->siglock);
2284 
2285 			timeout = schedule_timeout_interruptible(timeout);
2286 
2287 			spin_lock_irq(&current->sighand->siglock);
2288 			sig = dequeue_signal(current, &these, &info);
2289 			current->blocked = current->real_blocked;
2290 			siginitset(&current->real_blocked, 0);
2291 			recalc_sigpending();
2292 		}
2293 	}
2294 	spin_unlock_irq(&current->sighand->siglock);
2295 
2296 	if (sig) {
2297 		ret = sig;
2298 		if (uinfo) {
2299 			if (copy_siginfo_to_user(uinfo, &info))
2300 				ret = -EFAULT;
2301 		}
2302 	} else {
2303 		ret = -EAGAIN;
2304 		if (timeout)
2305 			ret = -EINTR;
2306 	}
2307 
2308 	return ret;
2309 }
2310 
2311 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2312 {
2313 	struct siginfo info;
2314 
2315 	info.si_signo = sig;
2316 	info.si_errno = 0;
2317 	info.si_code = SI_USER;
2318 	info.si_pid = task_tgid_vnr(current);
2319 	info.si_uid = current_uid();
2320 
2321 	return kill_something_info(sig, &info, pid);
2322 }
2323 
2324 static int
2325 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2326 {
2327 	struct task_struct *p;
2328 	int error = -ESRCH;
2329 
2330 	rcu_read_lock();
2331 	p = find_task_by_vpid(pid);
2332 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2333 		error = check_kill_permission(sig, info, p);
2334 		/*
2335 		 * The null signal is a permissions and process existence
2336 		 * probe.  No signal is actually delivered.
2337 		 */
2338 		if (!error && sig) {
2339 			error = do_send_sig_info(sig, info, p, false);
2340 			/*
2341 			 * If lock_task_sighand() failed we pretend the task
2342 			 * dies after receiving the signal. The window is tiny,
2343 			 * and the signal is private anyway.
2344 			 */
2345 			if (unlikely(error == -ESRCH))
2346 				error = 0;
2347 		}
2348 	}
2349 	rcu_read_unlock();
2350 
2351 	return error;
2352 }
2353 
2354 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2355 {
2356 	struct siginfo info;
2357 
2358 	info.si_signo = sig;
2359 	info.si_errno = 0;
2360 	info.si_code = SI_TKILL;
2361 	info.si_pid = task_tgid_vnr(current);
2362 	info.si_uid = current_uid();
2363 
2364 	return do_send_specific(tgid, pid, sig, &info);
2365 }
2366 
2367 /**
2368  *  sys_tgkill - send signal to one specific thread
2369  *  @tgid: the thread group ID of the thread
2370  *  @pid: the PID of the thread
2371  *  @sig: signal to be sent
2372  *
2373  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2374  *  exists but it's not belonging to the target process anymore. This
2375  *  method solves the problem of threads exiting and PIDs getting reused.
2376  */
2377 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2378 {
2379 	/* This is only valid for single tasks */
2380 	if (pid <= 0 || tgid <= 0)
2381 		return -EINVAL;
2382 
2383 	return do_tkill(tgid, pid, sig);
2384 }
2385 
2386 /*
2387  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2388  */
2389 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2390 {
2391 	/* This is only valid for single tasks */
2392 	if (pid <= 0)
2393 		return -EINVAL;
2394 
2395 	return do_tkill(0, pid, sig);
2396 }
2397 
2398 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2399 		siginfo_t __user *, uinfo)
2400 {
2401 	siginfo_t info;
2402 
2403 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2404 		return -EFAULT;
2405 
2406 	/* Not even root can pretend to send signals from the kernel.
2407 	   Nor can they impersonate a kill(), which adds source info.  */
2408 	if (info.si_code >= 0)
2409 		return -EPERM;
2410 	info.si_signo = sig;
2411 
2412 	/* POSIX.1b doesn't mention process groups.  */
2413 	return kill_proc_info(sig, &info, pid);
2414 }
2415 
2416 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2417 {
2418 	/* This is only valid for single tasks */
2419 	if (pid <= 0 || tgid <= 0)
2420 		return -EINVAL;
2421 
2422 	/* Not even root can pretend to send signals from the kernel.
2423 	   Nor can they impersonate a kill(), which adds source info.  */
2424 	if (info->si_code >= 0)
2425 		return -EPERM;
2426 	info->si_signo = sig;
2427 
2428 	return do_send_specific(tgid, pid, sig, info);
2429 }
2430 
2431 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2432 		siginfo_t __user *, uinfo)
2433 {
2434 	siginfo_t info;
2435 
2436 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2437 		return -EFAULT;
2438 
2439 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2440 }
2441 
2442 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2443 {
2444 	struct task_struct *t = current;
2445 	struct k_sigaction *k;
2446 	sigset_t mask;
2447 
2448 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2449 		return -EINVAL;
2450 
2451 	k = &t->sighand->action[sig-1];
2452 
2453 	spin_lock_irq(&current->sighand->siglock);
2454 	if (oact)
2455 		*oact = *k;
2456 
2457 	if (act) {
2458 		sigdelsetmask(&act->sa.sa_mask,
2459 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2460 		*k = *act;
2461 		/*
2462 		 * POSIX 3.3.1.3:
2463 		 *  "Setting a signal action to SIG_IGN for a signal that is
2464 		 *   pending shall cause the pending signal to be discarded,
2465 		 *   whether or not it is blocked."
2466 		 *
2467 		 *  "Setting a signal action to SIG_DFL for a signal that is
2468 		 *   pending and whose default action is to ignore the signal
2469 		 *   (for example, SIGCHLD), shall cause the pending signal to
2470 		 *   be discarded, whether or not it is blocked"
2471 		 */
2472 		if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2473 			sigemptyset(&mask);
2474 			sigaddset(&mask, sig);
2475 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2476 			do {
2477 				rm_from_queue_full(&mask, &t->pending);
2478 				t = next_thread(t);
2479 			} while (t != current);
2480 		}
2481 	}
2482 
2483 	spin_unlock_irq(&current->sighand->siglock);
2484 	return 0;
2485 }
2486 
2487 int
2488 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2489 {
2490 	stack_t oss;
2491 	int error;
2492 
2493 	oss.ss_sp = (void __user *) current->sas_ss_sp;
2494 	oss.ss_size = current->sas_ss_size;
2495 	oss.ss_flags = sas_ss_flags(sp);
2496 
2497 	if (uss) {
2498 		void __user *ss_sp;
2499 		size_t ss_size;
2500 		int ss_flags;
2501 
2502 		error = -EFAULT;
2503 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2504 			goto out;
2505 		error = __get_user(ss_sp, &uss->ss_sp) |
2506 			__get_user(ss_flags, &uss->ss_flags) |
2507 			__get_user(ss_size, &uss->ss_size);
2508 		if (error)
2509 			goto out;
2510 
2511 		error = -EPERM;
2512 		if (on_sig_stack(sp))
2513 			goto out;
2514 
2515 		error = -EINVAL;
2516 		/*
2517 		 *
2518 		 * Note - this code used to test ss_flags incorrectly
2519 		 *  	  old code may have been written using ss_flags==0
2520 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2521 		 *	  way that worked) - this fix preserves that older
2522 		 *	  mechanism
2523 		 */
2524 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2525 			goto out;
2526 
2527 		if (ss_flags == SS_DISABLE) {
2528 			ss_size = 0;
2529 			ss_sp = NULL;
2530 		} else {
2531 			error = -ENOMEM;
2532 			if (ss_size < MINSIGSTKSZ)
2533 				goto out;
2534 		}
2535 
2536 		current->sas_ss_sp = (unsigned long) ss_sp;
2537 		current->sas_ss_size = ss_size;
2538 	}
2539 
2540 	error = 0;
2541 	if (uoss) {
2542 		error = -EFAULT;
2543 		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2544 			goto out;
2545 		error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2546 			__put_user(oss.ss_size, &uoss->ss_size) |
2547 			__put_user(oss.ss_flags, &uoss->ss_flags);
2548 	}
2549 
2550 out:
2551 	return error;
2552 }
2553 
2554 #ifdef __ARCH_WANT_SYS_SIGPENDING
2555 
2556 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2557 {
2558 	return do_sigpending(set, sizeof(*set));
2559 }
2560 
2561 #endif
2562 
2563 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2564 /* Some platforms have their own version with special arguments others
2565    support only sys_rt_sigprocmask.  */
2566 
2567 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2568 		old_sigset_t __user *, oset)
2569 {
2570 	int error;
2571 	old_sigset_t old_set, new_set;
2572 
2573 	if (set) {
2574 		error = -EFAULT;
2575 		if (copy_from_user(&new_set, set, sizeof(*set)))
2576 			goto out;
2577 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2578 
2579 		spin_lock_irq(&current->sighand->siglock);
2580 		old_set = current->blocked.sig[0];
2581 
2582 		error = 0;
2583 		switch (how) {
2584 		default:
2585 			error = -EINVAL;
2586 			break;
2587 		case SIG_BLOCK:
2588 			sigaddsetmask(&current->blocked, new_set);
2589 			break;
2590 		case SIG_UNBLOCK:
2591 			sigdelsetmask(&current->blocked, new_set);
2592 			break;
2593 		case SIG_SETMASK:
2594 			current->blocked.sig[0] = new_set;
2595 			break;
2596 		}
2597 
2598 		recalc_sigpending();
2599 		spin_unlock_irq(&current->sighand->siglock);
2600 		if (error)
2601 			goto out;
2602 		if (oset)
2603 			goto set_old;
2604 	} else if (oset) {
2605 		old_set = current->blocked.sig[0];
2606 	set_old:
2607 		error = -EFAULT;
2608 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2609 			goto out;
2610 	}
2611 	error = 0;
2612 out:
2613 	return error;
2614 }
2615 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2616 
2617 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2618 SYSCALL_DEFINE4(rt_sigaction, int, sig,
2619 		const struct sigaction __user *, act,
2620 		struct sigaction __user *, oact,
2621 		size_t, sigsetsize)
2622 {
2623 	struct k_sigaction new_sa, old_sa;
2624 	int ret = -EINVAL;
2625 
2626 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2627 	if (sigsetsize != sizeof(sigset_t))
2628 		goto out;
2629 
2630 	if (act) {
2631 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2632 			return -EFAULT;
2633 	}
2634 
2635 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2636 
2637 	if (!ret && oact) {
2638 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2639 			return -EFAULT;
2640 	}
2641 out:
2642 	return ret;
2643 }
2644 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2645 
2646 #ifdef __ARCH_WANT_SYS_SGETMASK
2647 
2648 /*
2649  * For backwards compatibility.  Functionality superseded by sigprocmask.
2650  */
2651 SYSCALL_DEFINE0(sgetmask)
2652 {
2653 	/* SMP safe */
2654 	return current->blocked.sig[0];
2655 }
2656 
2657 SYSCALL_DEFINE1(ssetmask, int, newmask)
2658 {
2659 	int old;
2660 
2661 	spin_lock_irq(&current->sighand->siglock);
2662 	old = current->blocked.sig[0];
2663 
2664 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2665 						  sigmask(SIGSTOP)));
2666 	recalc_sigpending();
2667 	spin_unlock_irq(&current->sighand->siglock);
2668 
2669 	return old;
2670 }
2671 #endif /* __ARCH_WANT_SGETMASK */
2672 
2673 #ifdef __ARCH_WANT_SYS_SIGNAL
2674 /*
2675  * For backwards compatibility.  Functionality superseded by sigaction.
2676  */
2677 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
2678 {
2679 	struct k_sigaction new_sa, old_sa;
2680 	int ret;
2681 
2682 	new_sa.sa.sa_handler = handler;
2683 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2684 	sigemptyset(&new_sa.sa.sa_mask);
2685 
2686 	ret = do_sigaction(sig, &new_sa, &old_sa);
2687 
2688 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2689 }
2690 #endif /* __ARCH_WANT_SYS_SIGNAL */
2691 
2692 #ifdef __ARCH_WANT_SYS_PAUSE
2693 
2694 SYSCALL_DEFINE0(pause)
2695 {
2696 	current->state = TASK_INTERRUPTIBLE;
2697 	schedule();
2698 	return -ERESTARTNOHAND;
2699 }
2700 
2701 #endif
2702 
2703 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2704 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2705 {
2706 	sigset_t newset;
2707 
2708 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2709 	if (sigsetsize != sizeof(sigset_t))
2710 		return -EINVAL;
2711 
2712 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2713 		return -EFAULT;
2714 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2715 
2716 	spin_lock_irq(&current->sighand->siglock);
2717 	current->saved_sigmask = current->blocked;
2718 	current->blocked = newset;
2719 	recalc_sigpending();
2720 	spin_unlock_irq(&current->sighand->siglock);
2721 
2722 	current->state = TASK_INTERRUPTIBLE;
2723 	schedule();
2724 	set_restore_sigmask();
2725 	return -ERESTARTNOHAND;
2726 }
2727 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2728 
2729 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2730 {
2731 	return NULL;
2732 }
2733 
2734 void __init signals_init(void)
2735 {
2736 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2737 }
2738