xref: /linux/kernel/signal.c (revision f7af616c632ee2ac3af0876fe33bf9e0232e665a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/signal.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
8  *
9  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
10  *		Changes to use preallocated sigqueue structures
11  *		to allow signals to be sent reliably.
12  */
13 
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/cgroup.h>
47 #include <linux/audit.h>
48 
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/signal.h>
51 
52 #include <asm/param.h>
53 #include <linux/uaccess.h>
54 #include <asm/unistd.h>
55 #include <asm/siginfo.h>
56 #include <asm/cacheflush.h>
57 
58 /*
59  * SLAB caches for signal bits.
60  */
61 
62 static struct kmem_cache *sigqueue_cachep;
63 
64 int print_fatal_signals __read_mostly;
65 
66 static void __user *sig_handler(struct task_struct *t, int sig)
67 {
68 	return t->sighand->action[sig - 1].sa.sa_handler;
69 }
70 
71 static inline bool sig_handler_ignored(void __user *handler, int sig)
72 {
73 	/* Is it explicitly or implicitly ignored? */
74 	return handler == SIG_IGN ||
75 	       (handler == SIG_DFL && sig_kernel_ignore(sig));
76 }
77 
78 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
79 {
80 	void __user *handler;
81 
82 	handler = sig_handler(t, sig);
83 
84 	/* SIGKILL and SIGSTOP may not be sent to the global init */
85 	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
86 		return true;
87 
88 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
89 	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
90 		return true;
91 
92 	/* Only allow kernel generated signals to this kthread */
93 	if (unlikely((t->flags & PF_KTHREAD) &&
94 		     (handler == SIG_KTHREAD_KERNEL) && !force))
95 		return true;
96 
97 	return sig_handler_ignored(handler, sig);
98 }
99 
100 static bool sig_ignored(struct task_struct *t, int sig, bool force)
101 {
102 	/*
103 	 * Blocked signals are never ignored, since the
104 	 * signal handler may change by the time it is
105 	 * unblocked.
106 	 */
107 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
108 		return false;
109 
110 	/*
111 	 * Tracers may want to know about even ignored signal unless it
112 	 * is SIGKILL which can't be reported anyway but can be ignored
113 	 * by SIGNAL_UNKILLABLE task.
114 	 */
115 	if (t->ptrace && sig != SIGKILL)
116 		return false;
117 
118 	return sig_task_ignored(t, sig, force);
119 }
120 
121 /*
122  * Re-calculate pending state from the set of locally pending
123  * signals, globally pending signals, and blocked signals.
124  */
125 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
126 {
127 	unsigned long ready;
128 	long i;
129 
130 	switch (_NSIG_WORDS) {
131 	default:
132 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
133 			ready |= signal->sig[i] &~ blocked->sig[i];
134 		break;
135 
136 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
137 		ready |= signal->sig[2] &~ blocked->sig[2];
138 		ready |= signal->sig[1] &~ blocked->sig[1];
139 		ready |= signal->sig[0] &~ blocked->sig[0];
140 		break;
141 
142 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
143 		ready |= signal->sig[0] &~ blocked->sig[0];
144 		break;
145 
146 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
147 	}
148 	return ready !=	0;
149 }
150 
151 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
152 
153 static bool recalc_sigpending_tsk(struct task_struct *t)
154 {
155 	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
156 	    PENDING(&t->pending, &t->blocked) ||
157 	    PENDING(&t->signal->shared_pending, &t->blocked) ||
158 	    cgroup_task_frozen(t)) {
159 		set_tsk_thread_flag(t, TIF_SIGPENDING);
160 		return true;
161 	}
162 
163 	/*
164 	 * We must never clear the flag in another thread, or in current
165 	 * when it's possible the current syscall is returning -ERESTART*.
166 	 * So we don't clear it here, and only callers who know they should do.
167 	 */
168 	return false;
169 }
170 
171 /*
172  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
173  * This is superfluous when called on current, the wakeup is a harmless no-op.
174  */
175 void recalc_sigpending_and_wake(struct task_struct *t)
176 {
177 	if (recalc_sigpending_tsk(t))
178 		signal_wake_up(t, 0);
179 }
180 
181 void recalc_sigpending(void)
182 {
183 	if (!recalc_sigpending_tsk(current) && !freezing(current))
184 		clear_thread_flag(TIF_SIGPENDING);
185 
186 }
187 EXPORT_SYMBOL(recalc_sigpending);
188 
189 void calculate_sigpending(void)
190 {
191 	/* Have any signals or users of TIF_SIGPENDING been delayed
192 	 * until after fork?
193 	 */
194 	spin_lock_irq(&current->sighand->siglock);
195 	set_tsk_thread_flag(current, TIF_SIGPENDING);
196 	recalc_sigpending();
197 	spin_unlock_irq(&current->sighand->siglock);
198 }
199 
200 /* Given the mask, find the first available signal that should be serviced. */
201 
202 #define SYNCHRONOUS_MASK \
203 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
204 	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
205 
206 int next_signal(struct sigpending *pending, sigset_t *mask)
207 {
208 	unsigned long i, *s, *m, x;
209 	int sig = 0;
210 
211 	s = pending->signal.sig;
212 	m = mask->sig;
213 
214 	/*
215 	 * Handle the first word specially: it contains the
216 	 * synchronous signals that need to be dequeued first.
217 	 */
218 	x = *s &~ *m;
219 	if (x) {
220 		if (x & SYNCHRONOUS_MASK)
221 			x &= SYNCHRONOUS_MASK;
222 		sig = ffz(~x) + 1;
223 		return sig;
224 	}
225 
226 	switch (_NSIG_WORDS) {
227 	default:
228 		for (i = 1; i < _NSIG_WORDS; ++i) {
229 			x = *++s &~ *++m;
230 			if (!x)
231 				continue;
232 			sig = ffz(~x) + i*_NSIG_BPW + 1;
233 			break;
234 		}
235 		break;
236 
237 	case 2:
238 		x = s[1] &~ m[1];
239 		if (!x)
240 			break;
241 		sig = ffz(~x) + _NSIG_BPW + 1;
242 		break;
243 
244 	case 1:
245 		/* Nothing to do */
246 		break;
247 	}
248 
249 	return sig;
250 }
251 
252 static inline void print_dropped_signal(int sig)
253 {
254 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
255 
256 	if (!print_fatal_signals)
257 		return;
258 
259 	if (!__ratelimit(&ratelimit_state))
260 		return;
261 
262 	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
263 				current->comm, current->pid, sig);
264 }
265 
266 /**
267  * task_set_jobctl_pending - set jobctl pending bits
268  * @task: target task
269  * @mask: pending bits to set
270  *
271  * Clear @mask from @task->jobctl.  @mask must be subset of
272  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
273  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
274  * cleared.  If @task is already being killed or exiting, this function
275  * becomes noop.
276  *
277  * CONTEXT:
278  * Must be called with @task->sighand->siglock held.
279  *
280  * RETURNS:
281  * %true if @mask is set, %false if made noop because @task was dying.
282  */
283 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
284 {
285 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
286 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
287 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
288 
289 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
290 		return false;
291 
292 	if (mask & JOBCTL_STOP_SIGMASK)
293 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
294 
295 	task->jobctl |= mask;
296 	return true;
297 }
298 
299 /**
300  * task_clear_jobctl_trapping - clear jobctl trapping bit
301  * @task: target task
302  *
303  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
304  * Clear it and wake up the ptracer.  Note that we don't need any further
305  * locking.  @task->siglock guarantees that @task->parent points to the
306  * ptracer.
307  *
308  * CONTEXT:
309  * Must be called with @task->sighand->siglock held.
310  */
311 void task_clear_jobctl_trapping(struct task_struct *task)
312 {
313 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
314 		task->jobctl &= ~JOBCTL_TRAPPING;
315 		smp_mb();	/* advised by wake_up_bit() */
316 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
317 	}
318 }
319 
320 /**
321  * task_clear_jobctl_pending - clear jobctl pending bits
322  * @task: target task
323  * @mask: pending bits to clear
324  *
325  * Clear @mask from @task->jobctl.  @mask must be subset of
326  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
327  * STOP bits are cleared together.
328  *
329  * If clearing of @mask leaves no stop or trap pending, this function calls
330  * task_clear_jobctl_trapping().
331  *
332  * CONTEXT:
333  * Must be called with @task->sighand->siglock held.
334  */
335 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
336 {
337 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
338 
339 	if (mask & JOBCTL_STOP_PENDING)
340 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
341 
342 	task->jobctl &= ~mask;
343 
344 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
345 		task_clear_jobctl_trapping(task);
346 }
347 
348 /**
349  * task_participate_group_stop - participate in a group stop
350  * @task: task participating in a group stop
351  *
352  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
353  * Group stop states are cleared and the group stop count is consumed if
354  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
355  * stop, the appropriate `SIGNAL_*` flags are set.
356  *
357  * CONTEXT:
358  * Must be called with @task->sighand->siglock held.
359  *
360  * RETURNS:
361  * %true if group stop completion should be notified to the parent, %false
362  * otherwise.
363  */
364 static bool task_participate_group_stop(struct task_struct *task)
365 {
366 	struct signal_struct *sig = task->signal;
367 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
368 
369 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
370 
371 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
372 
373 	if (!consume)
374 		return false;
375 
376 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
377 		sig->group_stop_count--;
378 
379 	/*
380 	 * Tell the caller to notify completion iff we are entering into a
381 	 * fresh group stop.  Read comment in do_signal_stop() for details.
382 	 */
383 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
384 		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
385 		return true;
386 	}
387 	return false;
388 }
389 
390 void task_join_group_stop(struct task_struct *task)
391 {
392 	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
393 	struct signal_struct *sig = current->signal;
394 
395 	if (sig->group_stop_count) {
396 		sig->group_stop_count++;
397 		mask |= JOBCTL_STOP_CONSUME;
398 	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
399 		return;
400 
401 	/* Have the new thread join an on-going signal group stop */
402 	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
403 }
404 
405 /*
406  * allocate a new signal queue record
407  * - this may be called without locks if and only if t == current, otherwise an
408  *   appropriate lock must be held to stop the target task from exiting
409  */
410 static struct sigqueue *
411 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
412 		 int override_rlimit, const unsigned int sigqueue_flags)
413 {
414 	struct sigqueue *q = NULL;
415 	struct user_struct *user;
416 	int sigpending;
417 
418 	/*
419 	 * Protect access to @t credentials. This can go away when all
420 	 * callers hold rcu read lock.
421 	 *
422 	 * NOTE! A pending signal will hold on to the user refcount,
423 	 * and we get/put the refcount only when the sigpending count
424 	 * changes from/to zero.
425 	 */
426 	rcu_read_lock();
427 	user = __task_cred(t)->user;
428 	sigpending = atomic_inc_return(&user->sigpending);
429 	if (sigpending == 1)
430 		get_uid(user);
431 	rcu_read_unlock();
432 
433 	if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
434 		/*
435 		 * Preallocation does not hold sighand::siglock so it can't
436 		 * use the cache. The lockless caching requires that only
437 		 * one consumer and only one producer run at a time.
438 		 */
439 		q = READ_ONCE(t->sigqueue_cache);
440 		if (!q || sigqueue_flags)
441 			q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
442 		else
443 			WRITE_ONCE(t->sigqueue_cache, NULL);
444 	} else {
445 		print_dropped_signal(sig);
446 	}
447 
448 	if (unlikely(q == NULL)) {
449 		if (atomic_dec_and_test(&user->sigpending))
450 			free_uid(user);
451 	} else {
452 		INIT_LIST_HEAD(&q->list);
453 		q->flags = sigqueue_flags;
454 		q->user = user;
455 	}
456 
457 	return q;
458 }
459 
460 void exit_task_sigqueue_cache(struct task_struct *tsk)
461 {
462 	/* Race free because @tsk is mopped up */
463 	struct sigqueue *q = tsk->sigqueue_cache;
464 
465 	if (q) {
466 		tsk->sigqueue_cache = NULL;
467 		/*
468 		 * Hand it back to the cache as the task might
469 		 * be self reaping which would leak the object.
470 		 */
471 		 kmem_cache_free(sigqueue_cachep, q);
472 	}
473 }
474 
475 static void sigqueue_cache_or_free(struct sigqueue *q)
476 {
477 	/*
478 	 * Cache one sigqueue per task. This pairs with the consumer side
479 	 * in __sigqueue_alloc() and needs READ/WRITE_ONCE() to prevent the
480 	 * compiler from store tearing and to tell KCSAN that the data race
481 	 * is intentional when run without holding current->sighand->siglock,
482 	 * which is fine as current obviously cannot run __sigqueue_free()
483 	 * concurrently.
484 	 */
485 	if (!READ_ONCE(current->sigqueue_cache))
486 		WRITE_ONCE(current->sigqueue_cache, q);
487 	else
488 		kmem_cache_free(sigqueue_cachep, q);
489 }
490 
491 static void __sigqueue_free(struct sigqueue *q)
492 {
493 	if (q->flags & SIGQUEUE_PREALLOC)
494 		return;
495 	if (atomic_dec_and_test(&q->user->sigpending))
496 		free_uid(q->user);
497 	sigqueue_cache_or_free(q);
498 }
499 
500 void flush_sigqueue(struct sigpending *queue)
501 {
502 	struct sigqueue *q;
503 
504 	sigemptyset(&queue->signal);
505 	while (!list_empty(&queue->list)) {
506 		q = list_entry(queue->list.next, struct sigqueue , list);
507 		list_del_init(&q->list);
508 		__sigqueue_free(q);
509 	}
510 }
511 
512 /*
513  * Flush all pending signals for this kthread.
514  */
515 void flush_signals(struct task_struct *t)
516 {
517 	unsigned long flags;
518 
519 	spin_lock_irqsave(&t->sighand->siglock, flags);
520 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
521 	flush_sigqueue(&t->pending);
522 	flush_sigqueue(&t->signal->shared_pending);
523 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
524 }
525 EXPORT_SYMBOL(flush_signals);
526 
527 #ifdef CONFIG_POSIX_TIMERS
528 static void __flush_itimer_signals(struct sigpending *pending)
529 {
530 	sigset_t signal, retain;
531 	struct sigqueue *q, *n;
532 
533 	signal = pending->signal;
534 	sigemptyset(&retain);
535 
536 	list_for_each_entry_safe(q, n, &pending->list, list) {
537 		int sig = q->info.si_signo;
538 
539 		if (likely(q->info.si_code != SI_TIMER)) {
540 			sigaddset(&retain, sig);
541 		} else {
542 			sigdelset(&signal, sig);
543 			list_del_init(&q->list);
544 			__sigqueue_free(q);
545 		}
546 	}
547 
548 	sigorsets(&pending->signal, &signal, &retain);
549 }
550 
551 void flush_itimer_signals(void)
552 {
553 	struct task_struct *tsk = current;
554 	unsigned long flags;
555 
556 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
557 	__flush_itimer_signals(&tsk->pending);
558 	__flush_itimer_signals(&tsk->signal->shared_pending);
559 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
560 }
561 #endif
562 
563 void ignore_signals(struct task_struct *t)
564 {
565 	int i;
566 
567 	for (i = 0; i < _NSIG; ++i)
568 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
569 
570 	flush_signals(t);
571 }
572 
573 /*
574  * Flush all handlers for a task.
575  */
576 
577 void
578 flush_signal_handlers(struct task_struct *t, int force_default)
579 {
580 	int i;
581 	struct k_sigaction *ka = &t->sighand->action[0];
582 	for (i = _NSIG ; i != 0 ; i--) {
583 		if (force_default || ka->sa.sa_handler != SIG_IGN)
584 			ka->sa.sa_handler = SIG_DFL;
585 		ka->sa.sa_flags = 0;
586 #ifdef __ARCH_HAS_SA_RESTORER
587 		ka->sa.sa_restorer = NULL;
588 #endif
589 		sigemptyset(&ka->sa.sa_mask);
590 		ka++;
591 	}
592 }
593 
594 bool unhandled_signal(struct task_struct *tsk, int sig)
595 {
596 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
597 	if (is_global_init(tsk))
598 		return true;
599 
600 	if (handler != SIG_IGN && handler != SIG_DFL)
601 		return false;
602 
603 	/* if ptraced, let the tracer determine */
604 	return !tsk->ptrace;
605 }
606 
607 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
608 			   bool *resched_timer)
609 {
610 	struct sigqueue *q, *first = NULL;
611 
612 	/*
613 	 * Collect the siginfo appropriate to this signal.  Check if
614 	 * there is another siginfo for the same signal.
615 	*/
616 	list_for_each_entry(q, &list->list, list) {
617 		if (q->info.si_signo == sig) {
618 			if (first)
619 				goto still_pending;
620 			first = q;
621 		}
622 	}
623 
624 	sigdelset(&list->signal, sig);
625 
626 	if (first) {
627 still_pending:
628 		list_del_init(&first->list);
629 		copy_siginfo(info, &first->info);
630 
631 		*resched_timer =
632 			(first->flags & SIGQUEUE_PREALLOC) &&
633 			(info->si_code == SI_TIMER) &&
634 			(info->si_sys_private);
635 
636 		__sigqueue_free(first);
637 	} else {
638 		/*
639 		 * Ok, it wasn't in the queue.  This must be
640 		 * a fast-pathed signal or we must have been
641 		 * out of queue space.  So zero out the info.
642 		 */
643 		clear_siginfo(info);
644 		info->si_signo = sig;
645 		info->si_errno = 0;
646 		info->si_code = SI_USER;
647 		info->si_pid = 0;
648 		info->si_uid = 0;
649 	}
650 }
651 
652 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
653 			kernel_siginfo_t *info, bool *resched_timer)
654 {
655 	int sig = next_signal(pending, mask);
656 
657 	if (sig)
658 		collect_signal(sig, pending, info, resched_timer);
659 	return sig;
660 }
661 
662 /*
663  * Dequeue a signal and return the element to the caller, which is
664  * expected to free it.
665  *
666  * All callers have to hold the siglock.
667  */
668 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
669 {
670 	bool resched_timer = false;
671 	int signr;
672 
673 	/* We only dequeue private signals from ourselves, we don't let
674 	 * signalfd steal them
675 	 */
676 	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
677 	if (!signr) {
678 		signr = __dequeue_signal(&tsk->signal->shared_pending,
679 					 mask, info, &resched_timer);
680 #ifdef CONFIG_POSIX_TIMERS
681 		/*
682 		 * itimer signal ?
683 		 *
684 		 * itimers are process shared and we restart periodic
685 		 * itimers in the signal delivery path to prevent DoS
686 		 * attacks in the high resolution timer case. This is
687 		 * compliant with the old way of self-restarting
688 		 * itimers, as the SIGALRM is a legacy signal and only
689 		 * queued once. Changing the restart behaviour to
690 		 * restart the timer in the signal dequeue path is
691 		 * reducing the timer noise on heavy loaded !highres
692 		 * systems too.
693 		 */
694 		if (unlikely(signr == SIGALRM)) {
695 			struct hrtimer *tmr = &tsk->signal->real_timer;
696 
697 			if (!hrtimer_is_queued(tmr) &&
698 			    tsk->signal->it_real_incr != 0) {
699 				hrtimer_forward(tmr, tmr->base->get_time(),
700 						tsk->signal->it_real_incr);
701 				hrtimer_restart(tmr);
702 			}
703 		}
704 #endif
705 	}
706 
707 	recalc_sigpending();
708 	if (!signr)
709 		return 0;
710 
711 	if (unlikely(sig_kernel_stop(signr))) {
712 		/*
713 		 * Set a marker that we have dequeued a stop signal.  Our
714 		 * caller might release the siglock and then the pending
715 		 * stop signal it is about to process is no longer in the
716 		 * pending bitmasks, but must still be cleared by a SIGCONT
717 		 * (and overruled by a SIGKILL).  So those cases clear this
718 		 * shared flag after we've set it.  Note that this flag may
719 		 * remain set after the signal we return is ignored or
720 		 * handled.  That doesn't matter because its only purpose
721 		 * is to alert stop-signal processing code when another
722 		 * processor has come along and cleared the flag.
723 		 */
724 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
725 	}
726 #ifdef CONFIG_POSIX_TIMERS
727 	if (resched_timer) {
728 		/*
729 		 * Release the siglock to ensure proper locking order
730 		 * of timer locks outside of siglocks.  Note, we leave
731 		 * irqs disabled here, since the posix-timers code is
732 		 * about to disable them again anyway.
733 		 */
734 		spin_unlock(&tsk->sighand->siglock);
735 		posixtimer_rearm(info);
736 		spin_lock(&tsk->sighand->siglock);
737 
738 		/* Don't expose the si_sys_private value to userspace */
739 		info->si_sys_private = 0;
740 	}
741 #endif
742 	return signr;
743 }
744 EXPORT_SYMBOL_GPL(dequeue_signal);
745 
746 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
747 {
748 	struct task_struct *tsk = current;
749 	struct sigpending *pending = &tsk->pending;
750 	struct sigqueue *q, *sync = NULL;
751 
752 	/*
753 	 * Might a synchronous signal be in the queue?
754 	 */
755 	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
756 		return 0;
757 
758 	/*
759 	 * Return the first synchronous signal in the queue.
760 	 */
761 	list_for_each_entry(q, &pending->list, list) {
762 		/* Synchronous signals have a positive si_code */
763 		if ((q->info.si_code > SI_USER) &&
764 		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
765 			sync = q;
766 			goto next;
767 		}
768 	}
769 	return 0;
770 next:
771 	/*
772 	 * Check if there is another siginfo for the same signal.
773 	 */
774 	list_for_each_entry_continue(q, &pending->list, list) {
775 		if (q->info.si_signo == sync->info.si_signo)
776 			goto still_pending;
777 	}
778 
779 	sigdelset(&pending->signal, sync->info.si_signo);
780 	recalc_sigpending();
781 still_pending:
782 	list_del_init(&sync->list);
783 	copy_siginfo(info, &sync->info);
784 	__sigqueue_free(sync);
785 	return info->si_signo;
786 }
787 
788 /*
789  * Tell a process that it has a new active signal..
790  *
791  * NOTE! we rely on the previous spin_lock to
792  * lock interrupts for us! We can only be called with
793  * "siglock" held, and the local interrupt must
794  * have been disabled when that got acquired!
795  *
796  * No need to set need_resched since signal event passing
797  * goes through ->blocked
798  */
799 void signal_wake_up_state(struct task_struct *t, unsigned int state)
800 {
801 	set_tsk_thread_flag(t, TIF_SIGPENDING);
802 	/*
803 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
804 	 * case. We don't check t->state here because there is a race with it
805 	 * executing another processor and just now entering stopped state.
806 	 * By using wake_up_state, we ensure the process will wake up and
807 	 * handle its death signal.
808 	 */
809 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
810 		kick_process(t);
811 }
812 
813 /*
814  * Remove signals in mask from the pending set and queue.
815  * Returns 1 if any signals were found.
816  *
817  * All callers must be holding the siglock.
818  */
819 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
820 {
821 	struct sigqueue *q, *n;
822 	sigset_t m;
823 
824 	sigandsets(&m, mask, &s->signal);
825 	if (sigisemptyset(&m))
826 		return;
827 
828 	sigandnsets(&s->signal, &s->signal, mask);
829 	list_for_each_entry_safe(q, n, &s->list, list) {
830 		if (sigismember(mask, q->info.si_signo)) {
831 			list_del_init(&q->list);
832 			__sigqueue_free(q);
833 		}
834 	}
835 }
836 
837 static inline int is_si_special(const struct kernel_siginfo *info)
838 {
839 	return info <= SEND_SIG_PRIV;
840 }
841 
842 static inline bool si_fromuser(const struct kernel_siginfo *info)
843 {
844 	return info == SEND_SIG_NOINFO ||
845 		(!is_si_special(info) && SI_FROMUSER(info));
846 }
847 
848 /*
849  * called with RCU read lock from check_kill_permission()
850  */
851 static bool kill_ok_by_cred(struct task_struct *t)
852 {
853 	const struct cred *cred = current_cred();
854 	const struct cred *tcred = __task_cred(t);
855 
856 	return uid_eq(cred->euid, tcred->suid) ||
857 	       uid_eq(cred->euid, tcred->uid) ||
858 	       uid_eq(cred->uid, tcred->suid) ||
859 	       uid_eq(cred->uid, tcred->uid) ||
860 	       ns_capable(tcred->user_ns, CAP_KILL);
861 }
862 
863 /*
864  * Bad permissions for sending the signal
865  * - the caller must hold the RCU read lock
866  */
867 static int check_kill_permission(int sig, struct kernel_siginfo *info,
868 				 struct task_struct *t)
869 {
870 	struct pid *sid;
871 	int error;
872 
873 	if (!valid_signal(sig))
874 		return -EINVAL;
875 
876 	if (!si_fromuser(info))
877 		return 0;
878 
879 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
880 	if (error)
881 		return error;
882 
883 	if (!same_thread_group(current, t) &&
884 	    !kill_ok_by_cred(t)) {
885 		switch (sig) {
886 		case SIGCONT:
887 			sid = task_session(t);
888 			/*
889 			 * We don't return the error if sid == NULL. The
890 			 * task was unhashed, the caller must notice this.
891 			 */
892 			if (!sid || sid == task_session(current))
893 				break;
894 			fallthrough;
895 		default:
896 			return -EPERM;
897 		}
898 	}
899 
900 	return security_task_kill(t, info, sig, NULL);
901 }
902 
903 /**
904  * ptrace_trap_notify - schedule trap to notify ptracer
905  * @t: tracee wanting to notify tracer
906  *
907  * This function schedules sticky ptrace trap which is cleared on the next
908  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
909  * ptracer.
910  *
911  * If @t is running, STOP trap will be taken.  If trapped for STOP and
912  * ptracer is listening for events, tracee is woken up so that it can
913  * re-trap for the new event.  If trapped otherwise, STOP trap will be
914  * eventually taken without returning to userland after the existing traps
915  * are finished by PTRACE_CONT.
916  *
917  * CONTEXT:
918  * Must be called with @task->sighand->siglock held.
919  */
920 static void ptrace_trap_notify(struct task_struct *t)
921 {
922 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
923 	assert_spin_locked(&t->sighand->siglock);
924 
925 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
926 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
927 }
928 
929 /*
930  * Handle magic process-wide effects of stop/continue signals. Unlike
931  * the signal actions, these happen immediately at signal-generation
932  * time regardless of blocking, ignoring, or handling.  This does the
933  * actual continuing for SIGCONT, but not the actual stopping for stop
934  * signals. The process stop is done as a signal action for SIG_DFL.
935  *
936  * Returns true if the signal should be actually delivered, otherwise
937  * it should be dropped.
938  */
939 static bool prepare_signal(int sig, struct task_struct *p, bool force)
940 {
941 	struct signal_struct *signal = p->signal;
942 	struct task_struct *t;
943 	sigset_t flush;
944 
945 	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
946 		if (!(signal->flags & SIGNAL_GROUP_EXIT))
947 			return sig == SIGKILL;
948 		/*
949 		 * The process is in the middle of dying, nothing to do.
950 		 */
951 	} else if (sig_kernel_stop(sig)) {
952 		/*
953 		 * This is a stop signal.  Remove SIGCONT from all queues.
954 		 */
955 		siginitset(&flush, sigmask(SIGCONT));
956 		flush_sigqueue_mask(&flush, &signal->shared_pending);
957 		for_each_thread(p, t)
958 			flush_sigqueue_mask(&flush, &t->pending);
959 	} else if (sig == SIGCONT) {
960 		unsigned int why;
961 		/*
962 		 * Remove all stop signals from all queues, wake all threads.
963 		 */
964 		siginitset(&flush, SIG_KERNEL_STOP_MASK);
965 		flush_sigqueue_mask(&flush, &signal->shared_pending);
966 		for_each_thread(p, t) {
967 			flush_sigqueue_mask(&flush, &t->pending);
968 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
969 			if (likely(!(t->ptrace & PT_SEIZED)))
970 				wake_up_state(t, __TASK_STOPPED);
971 			else
972 				ptrace_trap_notify(t);
973 		}
974 
975 		/*
976 		 * Notify the parent with CLD_CONTINUED if we were stopped.
977 		 *
978 		 * If we were in the middle of a group stop, we pretend it
979 		 * was already finished, and then continued. Since SIGCHLD
980 		 * doesn't queue we report only CLD_STOPPED, as if the next
981 		 * CLD_CONTINUED was dropped.
982 		 */
983 		why = 0;
984 		if (signal->flags & SIGNAL_STOP_STOPPED)
985 			why |= SIGNAL_CLD_CONTINUED;
986 		else if (signal->group_stop_count)
987 			why |= SIGNAL_CLD_STOPPED;
988 
989 		if (why) {
990 			/*
991 			 * The first thread which returns from do_signal_stop()
992 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
993 			 * notify its parent. See get_signal().
994 			 */
995 			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
996 			signal->group_stop_count = 0;
997 			signal->group_exit_code = 0;
998 		}
999 	}
1000 
1001 	return !sig_ignored(p, sig, force);
1002 }
1003 
1004 /*
1005  * Test if P wants to take SIG.  After we've checked all threads with this,
1006  * it's equivalent to finding no threads not blocking SIG.  Any threads not
1007  * blocking SIG were ruled out because they are not running and already
1008  * have pending signals.  Such threads will dequeue from the shared queue
1009  * as soon as they're available, so putting the signal on the shared queue
1010  * will be equivalent to sending it to one such thread.
1011  */
1012 static inline bool wants_signal(int sig, struct task_struct *p)
1013 {
1014 	if (sigismember(&p->blocked, sig))
1015 		return false;
1016 
1017 	if (p->flags & PF_EXITING)
1018 		return false;
1019 
1020 	if (sig == SIGKILL)
1021 		return true;
1022 
1023 	if (task_is_stopped_or_traced(p))
1024 		return false;
1025 
1026 	return task_curr(p) || !task_sigpending(p);
1027 }
1028 
1029 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
1030 {
1031 	struct signal_struct *signal = p->signal;
1032 	struct task_struct *t;
1033 
1034 	/*
1035 	 * Now find a thread we can wake up to take the signal off the queue.
1036 	 *
1037 	 * If the main thread wants the signal, it gets first crack.
1038 	 * Probably the least surprising to the average bear.
1039 	 */
1040 	if (wants_signal(sig, p))
1041 		t = p;
1042 	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1043 		/*
1044 		 * There is just one thread and it does not need to be woken.
1045 		 * It will dequeue unblocked signals before it runs again.
1046 		 */
1047 		return;
1048 	else {
1049 		/*
1050 		 * Otherwise try to find a suitable thread.
1051 		 */
1052 		t = signal->curr_target;
1053 		while (!wants_signal(sig, t)) {
1054 			t = next_thread(t);
1055 			if (t == signal->curr_target)
1056 				/*
1057 				 * No thread needs to be woken.
1058 				 * Any eligible threads will see
1059 				 * the signal in the queue soon.
1060 				 */
1061 				return;
1062 		}
1063 		signal->curr_target = t;
1064 	}
1065 
1066 	/*
1067 	 * Found a killable thread.  If the signal will be fatal,
1068 	 * then start taking the whole group down immediately.
1069 	 */
1070 	if (sig_fatal(p, sig) &&
1071 	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
1072 	    !sigismember(&t->real_blocked, sig) &&
1073 	    (sig == SIGKILL || !p->ptrace)) {
1074 		/*
1075 		 * This signal will be fatal to the whole group.
1076 		 */
1077 		if (!sig_kernel_coredump(sig)) {
1078 			/*
1079 			 * Start a group exit and wake everybody up.
1080 			 * This way we don't have other threads
1081 			 * running and doing things after a slower
1082 			 * thread has the fatal signal pending.
1083 			 */
1084 			signal->flags = SIGNAL_GROUP_EXIT;
1085 			signal->group_exit_code = sig;
1086 			signal->group_stop_count = 0;
1087 			t = p;
1088 			do {
1089 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1090 				sigaddset(&t->pending.signal, SIGKILL);
1091 				signal_wake_up(t, 1);
1092 			} while_each_thread(p, t);
1093 			return;
1094 		}
1095 	}
1096 
1097 	/*
1098 	 * The signal is already in the shared-pending queue.
1099 	 * Tell the chosen thread to wake up and dequeue it.
1100 	 */
1101 	signal_wake_up(t, sig == SIGKILL);
1102 	return;
1103 }
1104 
1105 static inline bool legacy_queue(struct sigpending *signals, int sig)
1106 {
1107 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1108 }
1109 
1110 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1111 			enum pid_type type, bool force)
1112 {
1113 	struct sigpending *pending;
1114 	struct sigqueue *q;
1115 	int override_rlimit;
1116 	int ret = 0, result;
1117 
1118 	assert_spin_locked(&t->sighand->siglock);
1119 
1120 	result = TRACE_SIGNAL_IGNORED;
1121 	if (!prepare_signal(sig, t, force))
1122 		goto ret;
1123 
1124 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1125 	/*
1126 	 * Short-circuit ignored signals and support queuing
1127 	 * exactly one non-rt signal, so that we can get more
1128 	 * detailed information about the cause of the signal.
1129 	 */
1130 	result = TRACE_SIGNAL_ALREADY_PENDING;
1131 	if (legacy_queue(pending, sig))
1132 		goto ret;
1133 
1134 	result = TRACE_SIGNAL_DELIVERED;
1135 	/*
1136 	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1137 	 */
1138 	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1139 		goto out_set;
1140 
1141 	/*
1142 	 * Real-time signals must be queued if sent by sigqueue, or
1143 	 * some other real-time mechanism.  It is implementation
1144 	 * defined whether kill() does so.  We attempt to do so, on
1145 	 * the principle of least surprise, but since kill is not
1146 	 * allowed to fail with EAGAIN when low on memory we just
1147 	 * make sure at least one signal gets delivered and don't
1148 	 * pass on the info struct.
1149 	 */
1150 	if (sig < SIGRTMIN)
1151 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1152 	else
1153 		override_rlimit = 0;
1154 
1155 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1156 
1157 	if (q) {
1158 		list_add_tail(&q->list, &pending->list);
1159 		switch ((unsigned long) info) {
1160 		case (unsigned long) SEND_SIG_NOINFO:
1161 			clear_siginfo(&q->info);
1162 			q->info.si_signo = sig;
1163 			q->info.si_errno = 0;
1164 			q->info.si_code = SI_USER;
1165 			q->info.si_pid = task_tgid_nr_ns(current,
1166 							task_active_pid_ns(t));
1167 			rcu_read_lock();
1168 			q->info.si_uid =
1169 				from_kuid_munged(task_cred_xxx(t, user_ns),
1170 						 current_uid());
1171 			rcu_read_unlock();
1172 			break;
1173 		case (unsigned long) SEND_SIG_PRIV:
1174 			clear_siginfo(&q->info);
1175 			q->info.si_signo = sig;
1176 			q->info.si_errno = 0;
1177 			q->info.si_code = SI_KERNEL;
1178 			q->info.si_pid = 0;
1179 			q->info.si_uid = 0;
1180 			break;
1181 		default:
1182 			copy_siginfo(&q->info, info);
1183 			break;
1184 		}
1185 	} else if (!is_si_special(info) &&
1186 		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1187 		/*
1188 		 * Queue overflow, abort.  We may abort if the
1189 		 * signal was rt and sent by user using something
1190 		 * other than kill().
1191 		 */
1192 		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1193 		ret = -EAGAIN;
1194 		goto ret;
1195 	} else {
1196 		/*
1197 		 * This is a silent loss of information.  We still
1198 		 * send the signal, but the *info bits are lost.
1199 		 */
1200 		result = TRACE_SIGNAL_LOSE_INFO;
1201 	}
1202 
1203 out_set:
1204 	signalfd_notify(t, sig);
1205 	sigaddset(&pending->signal, sig);
1206 
1207 	/* Let multiprocess signals appear after on-going forks */
1208 	if (type > PIDTYPE_TGID) {
1209 		struct multiprocess_signals *delayed;
1210 		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1211 			sigset_t *signal = &delayed->signal;
1212 			/* Can't queue both a stop and a continue signal */
1213 			if (sig == SIGCONT)
1214 				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1215 			else if (sig_kernel_stop(sig))
1216 				sigdelset(signal, SIGCONT);
1217 			sigaddset(signal, sig);
1218 		}
1219 	}
1220 
1221 	complete_signal(sig, t, type);
1222 ret:
1223 	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1224 	return ret;
1225 }
1226 
1227 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1228 {
1229 	bool ret = false;
1230 	switch (siginfo_layout(info->si_signo, info->si_code)) {
1231 	case SIL_KILL:
1232 	case SIL_CHLD:
1233 	case SIL_RT:
1234 		ret = true;
1235 		break;
1236 	case SIL_TIMER:
1237 	case SIL_POLL:
1238 	case SIL_FAULT:
1239 	case SIL_FAULT_TRAPNO:
1240 	case SIL_FAULT_MCEERR:
1241 	case SIL_FAULT_BNDERR:
1242 	case SIL_FAULT_PKUERR:
1243 	case SIL_PERF_EVENT:
1244 	case SIL_SYS:
1245 		ret = false;
1246 		break;
1247 	}
1248 	return ret;
1249 }
1250 
1251 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1252 			enum pid_type type)
1253 {
1254 	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1255 	bool force = false;
1256 
1257 	if (info == SEND_SIG_NOINFO) {
1258 		/* Force if sent from an ancestor pid namespace */
1259 		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1260 	} else if (info == SEND_SIG_PRIV) {
1261 		/* Don't ignore kernel generated signals */
1262 		force = true;
1263 	} else if (has_si_pid_and_uid(info)) {
1264 		/* SIGKILL and SIGSTOP is special or has ids */
1265 		struct user_namespace *t_user_ns;
1266 
1267 		rcu_read_lock();
1268 		t_user_ns = task_cred_xxx(t, user_ns);
1269 		if (current_user_ns() != t_user_ns) {
1270 			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1271 			info->si_uid = from_kuid_munged(t_user_ns, uid);
1272 		}
1273 		rcu_read_unlock();
1274 
1275 		/* A kernel generated signal? */
1276 		force = (info->si_code == SI_KERNEL);
1277 
1278 		/* From an ancestor pid namespace? */
1279 		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1280 			info->si_pid = 0;
1281 			force = true;
1282 		}
1283 	}
1284 	return __send_signal(sig, info, t, type, force);
1285 }
1286 
1287 static void print_fatal_signal(int signr)
1288 {
1289 	struct pt_regs *regs = signal_pt_regs();
1290 	pr_info("potentially unexpected fatal signal %d.\n", signr);
1291 
1292 #if defined(__i386__) && !defined(__arch_um__)
1293 	pr_info("code at %08lx: ", regs->ip);
1294 	{
1295 		int i;
1296 		for (i = 0; i < 16; i++) {
1297 			unsigned char insn;
1298 
1299 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1300 				break;
1301 			pr_cont("%02x ", insn);
1302 		}
1303 	}
1304 	pr_cont("\n");
1305 #endif
1306 	preempt_disable();
1307 	show_regs(regs);
1308 	preempt_enable();
1309 }
1310 
1311 static int __init setup_print_fatal_signals(char *str)
1312 {
1313 	get_option (&str, &print_fatal_signals);
1314 
1315 	return 1;
1316 }
1317 
1318 __setup("print-fatal-signals=", setup_print_fatal_signals);
1319 
1320 int
1321 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1322 {
1323 	return send_signal(sig, info, p, PIDTYPE_TGID);
1324 }
1325 
1326 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1327 			enum pid_type type)
1328 {
1329 	unsigned long flags;
1330 	int ret = -ESRCH;
1331 
1332 	if (lock_task_sighand(p, &flags)) {
1333 		ret = send_signal(sig, info, p, type);
1334 		unlock_task_sighand(p, &flags);
1335 	}
1336 
1337 	return ret;
1338 }
1339 
1340 /*
1341  * Force a signal that the process can't ignore: if necessary
1342  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1343  *
1344  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1345  * since we do not want to have a signal handler that was blocked
1346  * be invoked when user space had explicitly blocked it.
1347  *
1348  * We don't want to have recursive SIGSEGV's etc, for example,
1349  * that is why we also clear SIGNAL_UNKILLABLE.
1350  */
1351 static int
1352 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1353 {
1354 	unsigned long int flags;
1355 	int ret, blocked, ignored;
1356 	struct k_sigaction *action;
1357 	int sig = info->si_signo;
1358 
1359 	spin_lock_irqsave(&t->sighand->siglock, flags);
1360 	action = &t->sighand->action[sig-1];
1361 	ignored = action->sa.sa_handler == SIG_IGN;
1362 	blocked = sigismember(&t->blocked, sig);
1363 	if (blocked || ignored) {
1364 		action->sa.sa_handler = SIG_DFL;
1365 		if (blocked) {
1366 			sigdelset(&t->blocked, sig);
1367 			recalc_sigpending_and_wake(t);
1368 		}
1369 	}
1370 	/*
1371 	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1372 	 * debugging to leave init killable.
1373 	 */
1374 	if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1375 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1376 	ret = send_signal(sig, info, t, PIDTYPE_PID);
1377 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1378 
1379 	return ret;
1380 }
1381 
1382 int force_sig_info(struct kernel_siginfo *info)
1383 {
1384 	return force_sig_info_to_task(info, current);
1385 }
1386 
1387 /*
1388  * Nuke all other threads in the group.
1389  */
1390 int zap_other_threads(struct task_struct *p)
1391 {
1392 	struct task_struct *t = p;
1393 	int count = 0;
1394 
1395 	p->signal->group_stop_count = 0;
1396 
1397 	while_each_thread(p, t) {
1398 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1399 		count++;
1400 
1401 		/* Don't bother with already dead threads */
1402 		if (t->exit_state)
1403 			continue;
1404 		sigaddset(&t->pending.signal, SIGKILL);
1405 		signal_wake_up(t, 1);
1406 	}
1407 
1408 	return count;
1409 }
1410 
1411 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1412 					   unsigned long *flags)
1413 {
1414 	struct sighand_struct *sighand;
1415 
1416 	rcu_read_lock();
1417 	for (;;) {
1418 		sighand = rcu_dereference(tsk->sighand);
1419 		if (unlikely(sighand == NULL))
1420 			break;
1421 
1422 		/*
1423 		 * This sighand can be already freed and even reused, but
1424 		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1425 		 * initializes ->siglock: this slab can't go away, it has
1426 		 * the same object type, ->siglock can't be reinitialized.
1427 		 *
1428 		 * We need to ensure that tsk->sighand is still the same
1429 		 * after we take the lock, we can race with de_thread() or
1430 		 * __exit_signal(). In the latter case the next iteration
1431 		 * must see ->sighand == NULL.
1432 		 */
1433 		spin_lock_irqsave(&sighand->siglock, *flags);
1434 		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1435 			break;
1436 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1437 	}
1438 	rcu_read_unlock();
1439 
1440 	return sighand;
1441 }
1442 
1443 /*
1444  * send signal info to all the members of a group
1445  */
1446 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1447 			struct task_struct *p, enum pid_type type)
1448 {
1449 	int ret;
1450 
1451 	rcu_read_lock();
1452 	ret = check_kill_permission(sig, info, p);
1453 	rcu_read_unlock();
1454 
1455 	if (!ret && sig)
1456 		ret = do_send_sig_info(sig, info, p, type);
1457 
1458 	return ret;
1459 }
1460 
1461 /*
1462  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1463  * control characters do (^C, ^Z etc)
1464  * - the caller must hold at least a readlock on tasklist_lock
1465  */
1466 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1467 {
1468 	struct task_struct *p = NULL;
1469 	int retval, success;
1470 
1471 	success = 0;
1472 	retval = -ESRCH;
1473 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1474 		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1475 		success |= !err;
1476 		retval = err;
1477 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1478 	return success ? 0 : retval;
1479 }
1480 
1481 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1482 {
1483 	int error = -ESRCH;
1484 	struct task_struct *p;
1485 
1486 	for (;;) {
1487 		rcu_read_lock();
1488 		p = pid_task(pid, PIDTYPE_PID);
1489 		if (p)
1490 			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1491 		rcu_read_unlock();
1492 		if (likely(!p || error != -ESRCH))
1493 			return error;
1494 
1495 		/*
1496 		 * The task was unhashed in between, try again.  If it
1497 		 * is dead, pid_task() will return NULL, if we race with
1498 		 * de_thread() it will find the new leader.
1499 		 */
1500 	}
1501 }
1502 
1503 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1504 {
1505 	int error;
1506 	rcu_read_lock();
1507 	error = kill_pid_info(sig, info, find_vpid(pid));
1508 	rcu_read_unlock();
1509 	return error;
1510 }
1511 
1512 static inline bool kill_as_cred_perm(const struct cred *cred,
1513 				     struct task_struct *target)
1514 {
1515 	const struct cred *pcred = __task_cred(target);
1516 
1517 	return uid_eq(cred->euid, pcred->suid) ||
1518 	       uid_eq(cred->euid, pcred->uid) ||
1519 	       uid_eq(cred->uid, pcred->suid) ||
1520 	       uid_eq(cred->uid, pcred->uid);
1521 }
1522 
1523 /*
1524  * The usb asyncio usage of siginfo is wrong.  The glibc support
1525  * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1526  * AKA after the generic fields:
1527  *	kernel_pid_t	si_pid;
1528  *	kernel_uid32_t	si_uid;
1529  *	sigval_t	si_value;
1530  *
1531  * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1532  * after the generic fields is:
1533  *	void __user 	*si_addr;
1534  *
1535  * This is a practical problem when there is a 64bit big endian kernel
1536  * and a 32bit userspace.  As the 32bit address will encoded in the low
1537  * 32bits of the pointer.  Those low 32bits will be stored at higher
1538  * address than appear in a 32 bit pointer.  So userspace will not
1539  * see the address it was expecting for it's completions.
1540  *
1541  * There is nothing in the encoding that can allow
1542  * copy_siginfo_to_user32 to detect this confusion of formats, so
1543  * handle this by requiring the caller of kill_pid_usb_asyncio to
1544  * notice when this situration takes place and to store the 32bit
1545  * pointer in sival_int, instead of sival_addr of the sigval_t addr
1546  * parameter.
1547  */
1548 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1549 			 struct pid *pid, const struct cred *cred)
1550 {
1551 	struct kernel_siginfo info;
1552 	struct task_struct *p;
1553 	unsigned long flags;
1554 	int ret = -EINVAL;
1555 
1556 	if (!valid_signal(sig))
1557 		return ret;
1558 
1559 	clear_siginfo(&info);
1560 	info.si_signo = sig;
1561 	info.si_errno = errno;
1562 	info.si_code = SI_ASYNCIO;
1563 	*((sigval_t *)&info.si_pid) = addr;
1564 
1565 	rcu_read_lock();
1566 	p = pid_task(pid, PIDTYPE_PID);
1567 	if (!p) {
1568 		ret = -ESRCH;
1569 		goto out_unlock;
1570 	}
1571 	if (!kill_as_cred_perm(cred, p)) {
1572 		ret = -EPERM;
1573 		goto out_unlock;
1574 	}
1575 	ret = security_task_kill(p, &info, sig, cred);
1576 	if (ret)
1577 		goto out_unlock;
1578 
1579 	if (sig) {
1580 		if (lock_task_sighand(p, &flags)) {
1581 			ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1582 			unlock_task_sighand(p, &flags);
1583 		} else
1584 			ret = -ESRCH;
1585 	}
1586 out_unlock:
1587 	rcu_read_unlock();
1588 	return ret;
1589 }
1590 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1591 
1592 /*
1593  * kill_something_info() interprets pid in interesting ways just like kill(2).
1594  *
1595  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1596  * is probably wrong.  Should make it like BSD or SYSV.
1597  */
1598 
1599 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1600 {
1601 	int ret;
1602 
1603 	if (pid > 0)
1604 		return kill_proc_info(sig, info, pid);
1605 
1606 	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1607 	if (pid == INT_MIN)
1608 		return -ESRCH;
1609 
1610 	read_lock(&tasklist_lock);
1611 	if (pid != -1) {
1612 		ret = __kill_pgrp_info(sig, info,
1613 				pid ? find_vpid(-pid) : task_pgrp(current));
1614 	} else {
1615 		int retval = 0, count = 0;
1616 		struct task_struct * p;
1617 
1618 		for_each_process(p) {
1619 			if (task_pid_vnr(p) > 1 &&
1620 					!same_thread_group(p, current)) {
1621 				int err = group_send_sig_info(sig, info, p,
1622 							      PIDTYPE_MAX);
1623 				++count;
1624 				if (err != -EPERM)
1625 					retval = err;
1626 			}
1627 		}
1628 		ret = count ? retval : -ESRCH;
1629 	}
1630 	read_unlock(&tasklist_lock);
1631 
1632 	return ret;
1633 }
1634 
1635 /*
1636  * These are for backward compatibility with the rest of the kernel source.
1637  */
1638 
1639 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1640 {
1641 	/*
1642 	 * Make sure legacy kernel users don't send in bad values
1643 	 * (normal paths check this in check_kill_permission).
1644 	 */
1645 	if (!valid_signal(sig))
1646 		return -EINVAL;
1647 
1648 	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1649 }
1650 EXPORT_SYMBOL(send_sig_info);
1651 
1652 #define __si_special(priv) \
1653 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1654 
1655 int
1656 send_sig(int sig, struct task_struct *p, int priv)
1657 {
1658 	return send_sig_info(sig, __si_special(priv), p);
1659 }
1660 EXPORT_SYMBOL(send_sig);
1661 
1662 void force_sig(int sig)
1663 {
1664 	struct kernel_siginfo info;
1665 
1666 	clear_siginfo(&info);
1667 	info.si_signo = sig;
1668 	info.si_errno = 0;
1669 	info.si_code = SI_KERNEL;
1670 	info.si_pid = 0;
1671 	info.si_uid = 0;
1672 	force_sig_info(&info);
1673 }
1674 EXPORT_SYMBOL(force_sig);
1675 
1676 /*
1677  * When things go south during signal handling, we
1678  * will force a SIGSEGV. And if the signal that caused
1679  * the problem was already a SIGSEGV, we'll want to
1680  * make sure we don't even try to deliver the signal..
1681  */
1682 void force_sigsegv(int sig)
1683 {
1684 	struct task_struct *p = current;
1685 
1686 	if (sig == SIGSEGV) {
1687 		unsigned long flags;
1688 		spin_lock_irqsave(&p->sighand->siglock, flags);
1689 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1690 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1691 	}
1692 	force_sig(SIGSEGV);
1693 }
1694 
1695 int force_sig_fault_to_task(int sig, int code, void __user *addr
1696 	___ARCH_SI_TRAPNO(int trapno)
1697 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1698 	, struct task_struct *t)
1699 {
1700 	struct kernel_siginfo info;
1701 
1702 	clear_siginfo(&info);
1703 	info.si_signo = sig;
1704 	info.si_errno = 0;
1705 	info.si_code  = code;
1706 	info.si_addr  = addr;
1707 #ifdef __ARCH_SI_TRAPNO
1708 	info.si_trapno = trapno;
1709 #endif
1710 #ifdef __ia64__
1711 	info.si_imm = imm;
1712 	info.si_flags = flags;
1713 	info.si_isr = isr;
1714 #endif
1715 	return force_sig_info_to_task(&info, t);
1716 }
1717 
1718 int force_sig_fault(int sig, int code, void __user *addr
1719 	___ARCH_SI_TRAPNO(int trapno)
1720 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1721 {
1722 	return force_sig_fault_to_task(sig, code, addr
1723 				       ___ARCH_SI_TRAPNO(trapno)
1724 				       ___ARCH_SI_IA64(imm, flags, isr), current);
1725 }
1726 
1727 int send_sig_fault(int sig, int code, void __user *addr
1728 	___ARCH_SI_TRAPNO(int trapno)
1729 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1730 	, struct task_struct *t)
1731 {
1732 	struct kernel_siginfo info;
1733 
1734 	clear_siginfo(&info);
1735 	info.si_signo = sig;
1736 	info.si_errno = 0;
1737 	info.si_code  = code;
1738 	info.si_addr  = addr;
1739 #ifdef __ARCH_SI_TRAPNO
1740 	info.si_trapno = trapno;
1741 #endif
1742 #ifdef __ia64__
1743 	info.si_imm = imm;
1744 	info.si_flags = flags;
1745 	info.si_isr = isr;
1746 #endif
1747 	return send_sig_info(info.si_signo, &info, t);
1748 }
1749 
1750 int force_sig_mceerr(int code, void __user *addr, short lsb)
1751 {
1752 	struct kernel_siginfo info;
1753 
1754 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1755 	clear_siginfo(&info);
1756 	info.si_signo = SIGBUS;
1757 	info.si_errno = 0;
1758 	info.si_code = code;
1759 	info.si_addr = addr;
1760 	info.si_addr_lsb = lsb;
1761 	return force_sig_info(&info);
1762 }
1763 
1764 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1765 {
1766 	struct kernel_siginfo info;
1767 
1768 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1769 	clear_siginfo(&info);
1770 	info.si_signo = SIGBUS;
1771 	info.si_errno = 0;
1772 	info.si_code = code;
1773 	info.si_addr = addr;
1774 	info.si_addr_lsb = lsb;
1775 	return send_sig_info(info.si_signo, &info, t);
1776 }
1777 EXPORT_SYMBOL(send_sig_mceerr);
1778 
1779 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1780 {
1781 	struct kernel_siginfo info;
1782 
1783 	clear_siginfo(&info);
1784 	info.si_signo = SIGSEGV;
1785 	info.si_errno = 0;
1786 	info.si_code  = SEGV_BNDERR;
1787 	info.si_addr  = addr;
1788 	info.si_lower = lower;
1789 	info.si_upper = upper;
1790 	return force_sig_info(&info);
1791 }
1792 
1793 #ifdef SEGV_PKUERR
1794 int force_sig_pkuerr(void __user *addr, u32 pkey)
1795 {
1796 	struct kernel_siginfo info;
1797 
1798 	clear_siginfo(&info);
1799 	info.si_signo = SIGSEGV;
1800 	info.si_errno = 0;
1801 	info.si_code  = SEGV_PKUERR;
1802 	info.si_addr  = addr;
1803 	info.si_pkey  = pkey;
1804 	return force_sig_info(&info);
1805 }
1806 #endif
1807 
1808 int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1809 {
1810 	struct kernel_siginfo info;
1811 
1812 	clear_siginfo(&info);
1813 	info.si_signo     = SIGTRAP;
1814 	info.si_errno     = 0;
1815 	info.si_code      = TRAP_PERF;
1816 	info.si_addr      = addr;
1817 	info.si_perf_data = sig_data;
1818 	info.si_perf_type = type;
1819 
1820 	return force_sig_info(&info);
1821 }
1822 
1823 /* For the crazy architectures that include trap information in
1824  * the errno field, instead of an actual errno value.
1825  */
1826 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1827 {
1828 	struct kernel_siginfo info;
1829 
1830 	clear_siginfo(&info);
1831 	info.si_signo = SIGTRAP;
1832 	info.si_errno = errno;
1833 	info.si_code  = TRAP_HWBKPT;
1834 	info.si_addr  = addr;
1835 	return force_sig_info(&info);
1836 }
1837 
1838 int kill_pgrp(struct pid *pid, int sig, int priv)
1839 {
1840 	int ret;
1841 
1842 	read_lock(&tasklist_lock);
1843 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1844 	read_unlock(&tasklist_lock);
1845 
1846 	return ret;
1847 }
1848 EXPORT_SYMBOL(kill_pgrp);
1849 
1850 int kill_pid(struct pid *pid, int sig, int priv)
1851 {
1852 	return kill_pid_info(sig, __si_special(priv), pid);
1853 }
1854 EXPORT_SYMBOL(kill_pid);
1855 
1856 /*
1857  * These functions support sending signals using preallocated sigqueue
1858  * structures.  This is needed "because realtime applications cannot
1859  * afford to lose notifications of asynchronous events, like timer
1860  * expirations or I/O completions".  In the case of POSIX Timers
1861  * we allocate the sigqueue structure from the timer_create.  If this
1862  * allocation fails we are able to report the failure to the application
1863  * with an EAGAIN error.
1864  */
1865 struct sigqueue *sigqueue_alloc(void)
1866 {
1867 	return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1868 }
1869 
1870 void sigqueue_free(struct sigqueue *q)
1871 {
1872 	unsigned long flags;
1873 	spinlock_t *lock = &current->sighand->siglock;
1874 
1875 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1876 	/*
1877 	 * We must hold ->siglock while testing q->list
1878 	 * to serialize with collect_signal() or with
1879 	 * __exit_signal()->flush_sigqueue().
1880 	 */
1881 	spin_lock_irqsave(lock, flags);
1882 	q->flags &= ~SIGQUEUE_PREALLOC;
1883 	/*
1884 	 * If it is queued it will be freed when dequeued,
1885 	 * like the "regular" sigqueue.
1886 	 */
1887 	if (!list_empty(&q->list))
1888 		q = NULL;
1889 	spin_unlock_irqrestore(lock, flags);
1890 
1891 	if (q)
1892 		__sigqueue_free(q);
1893 }
1894 
1895 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1896 {
1897 	int sig = q->info.si_signo;
1898 	struct sigpending *pending;
1899 	struct task_struct *t;
1900 	unsigned long flags;
1901 	int ret, result;
1902 
1903 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1904 
1905 	ret = -1;
1906 	rcu_read_lock();
1907 	t = pid_task(pid, type);
1908 	if (!t || !likely(lock_task_sighand(t, &flags)))
1909 		goto ret;
1910 
1911 	ret = 1; /* the signal is ignored */
1912 	result = TRACE_SIGNAL_IGNORED;
1913 	if (!prepare_signal(sig, t, false))
1914 		goto out;
1915 
1916 	ret = 0;
1917 	if (unlikely(!list_empty(&q->list))) {
1918 		/*
1919 		 * If an SI_TIMER entry is already queue just increment
1920 		 * the overrun count.
1921 		 */
1922 		BUG_ON(q->info.si_code != SI_TIMER);
1923 		q->info.si_overrun++;
1924 		result = TRACE_SIGNAL_ALREADY_PENDING;
1925 		goto out;
1926 	}
1927 	q->info.si_overrun = 0;
1928 
1929 	signalfd_notify(t, sig);
1930 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1931 	list_add_tail(&q->list, &pending->list);
1932 	sigaddset(&pending->signal, sig);
1933 	complete_signal(sig, t, type);
1934 	result = TRACE_SIGNAL_DELIVERED;
1935 out:
1936 	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1937 	unlock_task_sighand(t, &flags);
1938 ret:
1939 	rcu_read_unlock();
1940 	return ret;
1941 }
1942 
1943 static void do_notify_pidfd(struct task_struct *task)
1944 {
1945 	struct pid *pid;
1946 
1947 	WARN_ON(task->exit_state == 0);
1948 	pid = task_pid(task);
1949 	wake_up_all(&pid->wait_pidfd);
1950 }
1951 
1952 /*
1953  * Let a parent know about the death of a child.
1954  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1955  *
1956  * Returns true if our parent ignored us and so we've switched to
1957  * self-reaping.
1958  */
1959 bool do_notify_parent(struct task_struct *tsk, int sig)
1960 {
1961 	struct kernel_siginfo info;
1962 	unsigned long flags;
1963 	struct sighand_struct *psig;
1964 	bool autoreap = false;
1965 	u64 utime, stime;
1966 
1967 	BUG_ON(sig == -1);
1968 
1969  	/* do_notify_parent_cldstop should have been called instead.  */
1970  	BUG_ON(task_is_stopped_or_traced(tsk));
1971 
1972 	BUG_ON(!tsk->ptrace &&
1973 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1974 
1975 	/* Wake up all pidfd waiters */
1976 	do_notify_pidfd(tsk);
1977 
1978 	if (sig != SIGCHLD) {
1979 		/*
1980 		 * This is only possible if parent == real_parent.
1981 		 * Check if it has changed security domain.
1982 		 */
1983 		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1984 			sig = SIGCHLD;
1985 	}
1986 
1987 	clear_siginfo(&info);
1988 	info.si_signo = sig;
1989 	info.si_errno = 0;
1990 	/*
1991 	 * We are under tasklist_lock here so our parent is tied to
1992 	 * us and cannot change.
1993 	 *
1994 	 * task_active_pid_ns will always return the same pid namespace
1995 	 * until a task passes through release_task.
1996 	 *
1997 	 * write_lock() currently calls preempt_disable() which is the
1998 	 * same as rcu_read_lock(), but according to Oleg, this is not
1999 	 * correct to rely on this
2000 	 */
2001 	rcu_read_lock();
2002 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2003 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2004 				       task_uid(tsk));
2005 	rcu_read_unlock();
2006 
2007 	task_cputime(tsk, &utime, &stime);
2008 	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2009 	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2010 
2011 	info.si_status = tsk->exit_code & 0x7f;
2012 	if (tsk->exit_code & 0x80)
2013 		info.si_code = CLD_DUMPED;
2014 	else if (tsk->exit_code & 0x7f)
2015 		info.si_code = CLD_KILLED;
2016 	else {
2017 		info.si_code = CLD_EXITED;
2018 		info.si_status = tsk->exit_code >> 8;
2019 	}
2020 
2021 	psig = tsk->parent->sighand;
2022 	spin_lock_irqsave(&psig->siglock, flags);
2023 	if (!tsk->ptrace && sig == SIGCHLD &&
2024 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2025 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2026 		/*
2027 		 * We are exiting and our parent doesn't care.  POSIX.1
2028 		 * defines special semantics for setting SIGCHLD to SIG_IGN
2029 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
2030 		 * automatically and not left for our parent's wait4 call.
2031 		 * Rather than having the parent do it as a magic kind of
2032 		 * signal handler, we just set this to tell do_exit that we
2033 		 * can be cleaned up without becoming a zombie.  Note that
2034 		 * we still call __wake_up_parent in this case, because a
2035 		 * blocked sys_wait4 might now return -ECHILD.
2036 		 *
2037 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2038 		 * is implementation-defined: we do (if you don't want
2039 		 * it, just use SIG_IGN instead).
2040 		 */
2041 		autoreap = true;
2042 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2043 			sig = 0;
2044 	}
2045 	/*
2046 	 * Send with __send_signal as si_pid and si_uid are in the
2047 	 * parent's namespaces.
2048 	 */
2049 	if (valid_signal(sig) && sig)
2050 		__send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2051 	__wake_up_parent(tsk, tsk->parent);
2052 	spin_unlock_irqrestore(&psig->siglock, flags);
2053 
2054 	return autoreap;
2055 }
2056 
2057 /**
2058  * do_notify_parent_cldstop - notify parent of stopped/continued state change
2059  * @tsk: task reporting the state change
2060  * @for_ptracer: the notification is for ptracer
2061  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2062  *
2063  * Notify @tsk's parent that the stopped/continued state has changed.  If
2064  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2065  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2066  *
2067  * CONTEXT:
2068  * Must be called with tasklist_lock at least read locked.
2069  */
2070 static void do_notify_parent_cldstop(struct task_struct *tsk,
2071 				     bool for_ptracer, int why)
2072 {
2073 	struct kernel_siginfo info;
2074 	unsigned long flags;
2075 	struct task_struct *parent;
2076 	struct sighand_struct *sighand;
2077 	u64 utime, stime;
2078 
2079 	if (for_ptracer) {
2080 		parent = tsk->parent;
2081 	} else {
2082 		tsk = tsk->group_leader;
2083 		parent = tsk->real_parent;
2084 	}
2085 
2086 	clear_siginfo(&info);
2087 	info.si_signo = SIGCHLD;
2088 	info.si_errno = 0;
2089 	/*
2090 	 * see comment in do_notify_parent() about the following 4 lines
2091 	 */
2092 	rcu_read_lock();
2093 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2094 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2095 	rcu_read_unlock();
2096 
2097 	task_cputime(tsk, &utime, &stime);
2098 	info.si_utime = nsec_to_clock_t(utime);
2099 	info.si_stime = nsec_to_clock_t(stime);
2100 
2101  	info.si_code = why;
2102  	switch (why) {
2103  	case CLD_CONTINUED:
2104  		info.si_status = SIGCONT;
2105  		break;
2106  	case CLD_STOPPED:
2107  		info.si_status = tsk->signal->group_exit_code & 0x7f;
2108  		break;
2109  	case CLD_TRAPPED:
2110  		info.si_status = tsk->exit_code & 0x7f;
2111  		break;
2112  	default:
2113  		BUG();
2114  	}
2115 
2116 	sighand = parent->sighand;
2117 	spin_lock_irqsave(&sighand->siglock, flags);
2118 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2119 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2120 		__group_send_sig_info(SIGCHLD, &info, parent);
2121 	/*
2122 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2123 	 */
2124 	__wake_up_parent(tsk, parent);
2125 	spin_unlock_irqrestore(&sighand->siglock, flags);
2126 }
2127 
2128 static inline bool may_ptrace_stop(void)
2129 {
2130 	if (!likely(current->ptrace))
2131 		return false;
2132 	/*
2133 	 * Are we in the middle of do_coredump?
2134 	 * If so and our tracer is also part of the coredump stopping
2135 	 * is a deadlock situation, and pointless because our tracer
2136 	 * is dead so don't allow us to stop.
2137 	 * If SIGKILL was already sent before the caller unlocked
2138 	 * ->siglock we must see ->core_state != NULL. Otherwise it
2139 	 * is safe to enter schedule().
2140 	 *
2141 	 * This is almost outdated, a task with the pending SIGKILL can't
2142 	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2143 	 * after SIGKILL was already dequeued.
2144 	 */
2145 	if (unlikely(current->mm->core_state) &&
2146 	    unlikely(current->mm == current->parent->mm))
2147 		return false;
2148 
2149 	return true;
2150 }
2151 
2152 /*
2153  * Return non-zero if there is a SIGKILL that should be waking us up.
2154  * Called with the siglock held.
2155  */
2156 static bool sigkill_pending(struct task_struct *tsk)
2157 {
2158 	return sigismember(&tsk->pending.signal, SIGKILL) ||
2159 	       sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2160 }
2161 
2162 /*
2163  * This must be called with current->sighand->siglock held.
2164  *
2165  * This should be the path for all ptrace stops.
2166  * We always set current->last_siginfo while stopped here.
2167  * That makes it a way to test a stopped process for
2168  * being ptrace-stopped vs being job-control-stopped.
2169  *
2170  * If we actually decide not to stop at all because the tracer
2171  * is gone, we keep current->exit_code unless clear_code.
2172  */
2173 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2174 	__releases(&current->sighand->siglock)
2175 	__acquires(&current->sighand->siglock)
2176 {
2177 	bool gstop_done = false;
2178 
2179 	if (arch_ptrace_stop_needed(exit_code, info)) {
2180 		/*
2181 		 * The arch code has something special to do before a
2182 		 * ptrace stop.  This is allowed to block, e.g. for faults
2183 		 * on user stack pages.  We can't keep the siglock while
2184 		 * calling arch_ptrace_stop, so we must release it now.
2185 		 * To preserve proper semantics, we must do this before
2186 		 * any signal bookkeeping like checking group_stop_count.
2187 		 * Meanwhile, a SIGKILL could come in before we retake the
2188 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
2189 		 * So after regaining the lock, we must check for SIGKILL.
2190 		 */
2191 		spin_unlock_irq(&current->sighand->siglock);
2192 		arch_ptrace_stop(exit_code, info);
2193 		spin_lock_irq(&current->sighand->siglock);
2194 		if (sigkill_pending(current))
2195 			return;
2196 	}
2197 
2198 	set_special_state(TASK_TRACED);
2199 
2200 	/*
2201 	 * We're committing to trapping.  TRACED should be visible before
2202 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2203 	 * Also, transition to TRACED and updates to ->jobctl should be
2204 	 * atomic with respect to siglock and should be done after the arch
2205 	 * hook as siglock is released and regrabbed across it.
2206 	 *
2207 	 *     TRACER				    TRACEE
2208 	 *
2209 	 *     ptrace_attach()
2210 	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2211 	 *     do_wait()
2212 	 *       set_current_state()                smp_wmb();
2213 	 *       ptrace_do_wait()
2214 	 *         wait_task_stopped()
2215 	 *           task_stopped_code()
2216 	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2217 	 */
2218 	smp_wmb();
2219 
2220 	current->last_siginfo = info;
2221 	current->exit_code = exit_code;
2222 
2223 	/*
2224 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2225 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2226 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2227 	 * could be clear now.  We act as if SIGCONT is received after
2228 	 * TASK_TRACED is entered - ignore it.
2229 	 */
2230 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2231 		gstop_done = task_participate_group_stop(current);
2232 
2233 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2234 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2235 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2236 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2237 
2238 	/* entering a trap, clear TRAPPING */
2239 	task_clear_jobctl_trapping(current);
2240 
2241 	spin_unlock_irq(&current->sighand->siglock);
2242 	read_lock(&tasklist_lock);
2243 	if (may_ptrace_stop()) {
2244 		/*
2245 		 * Notify parents of the stop.
2246 		 *
2247 		 * While ptraced, there are two parents - the ptracer and
2248 		 * the real_parent of the group_leader.  The ptracer should
2249 		 * know about every stop while the real parent is only
2250 		 * interested in the completion of group stop.  The states
2251 		 * for the two don't interact with each other.  Notify
2252 		 * separately unless they're gonna be duplicates.
2253 		 */
2254 		do_notify_parent_cldstop(current, true, why);
2255 		if (gstop_done && ptrace_reparented(current))
2256 			do_notify_parent_cldstop(current, false, why);
2257 
2258 		/*
2259 		 * Don't want to allow preemption here, because
2260 		 * sys_ptrace() needs this task to be inactive.
2261 		 *
2262 		 * XXX: implement read_unlock_no_resched().
2263 		 */
2264 		preempt_disable();
2265 		read_unlock(&tasklist_lock);
2266 		cgroup_enter_frozen();
2267 		preempt_enable_no_resched();
2268 		freezable_schedule();
2269 		cgroup_leave_frozen(true);
2270 	} else {
2271 		/*
2272 		 * By the time we got the lock, our tracer went away.
2273 		 * Don't drop the lock yet, another tracer may come.
2274 		 *
2275 		 * If @gstop_done, the ptracer went away between group stop
2276 		 * completion and here.  During detach, it would have set
2277 		 * JOBCTL_STOP_PENDING on us and we'll re-enter
2278 		 * TASK_STOPPED in do_signal_stop() on return, so notifying
2279 		 * the real parent of the group stop completion is enough.
2280 		 */
2281 		if (gstop_done)
2282 			do_notify_parent_cldstop(current, false, why);
2283 
2284 		/* tasklist protects us from ptrace_freeze_traced() */
2285 		__set_current_state(TASK_RUNNING);
2286 		if (clear_code)
2287 			current->exit_code = 0;
2288 		read_unlock(&tasklist_lock);
2289 	}
2290 
2291 	/*
2292 	 * We are back.  Now reacquire the siglock before touching
2293 	 * last_siginfo, so that we are sure to have synchronized with
2294 	 * any signal-sending on another CPU that wants to examine it.
2295 	 */
2296 	spin_lock_irq(&current->sighand->siglock);
2297 	current->last_siginfo = NULL;
2298 
2299 	/* LISTENING can be set only during STOP traps, clear it */
2300 	current->jobctl &= ~JOBCTL_LISTENING;
2301 
2302 	/*
2303 	 * Queued signals ignored us while we were stopped for tracing.
2304 	 * So check for any that we should take before resuming user mode.
2305 	 * This sets TIF_SIGPENDING, but never clears it.
2306 	 */
2307 	recalc_sigpending_tsk(current);
2308 }
2309 
2310 static void ptrace_do_notify(int signr, int exit_code, int why)
2311 {
2312 	kernel_siginfo_t info;
2313 
2314 	clear_siginfo(&info);
2315 	info.si_signo = signr;
2316 	info.si_code = exit_code;
2317 	info.si_pid = task_pid_vnr(current);
2318 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2319 
2320 	/* Let the debugger run.  */
2321 	ptrace_stop(exit_code, why, 1, &info);
2322 }
2323 
2324 void ptrace_notify(int exit_code)
2325 {
2326 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2327 	if (unlikely(current->task_works))
2328 		task_work_run();
2329 
2330 	spin_lock_irq(&current->sighand->siglock);
2331 	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2332 	spin_unlock_irq(&current->sighand->siglock);
2333 }
2334 
2335 /**
2336  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2337  * @signr: signr causing group stop if initiating
2338  *
2339  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2340  * and participate in it.  If already set, participate in the existing
2341  * group stop.  If participated in a group stop (and thus slept), %true is
2342  * returned with siglock released.
2343  *
2344  * If ptraced, this function doesn't handle stop itself.  Instead,
2345  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2346  * untouched.  The caller must ensure that INTERRUPT trap handling takes
2347  * places afterwards.
2348  *
2349  * CONTEXT:
2350  * Must be called with @current->sighand->siglock held, which is released
2351  * on %true return.
2352  *
2353  * RETURNS:
2354  * %false if group stop is already cancelled or ptrace trap is scheduled.
2355  * %true if participated in group stop.
2356  */
2357 static bool do_signal_stop(int signr)
2358 	__releases(&current->sighand->siglock)
2359 {
2360 	struct signal_struct *sig = current->signal;
2361 
2362 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2363 		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2364 		struct task_struct *t;
2365 
2366 		/* signr will be recorded in task->jobctl for retries */
2367 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2368 
2369 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2370 		    unlikely(signal_group_exit(sig)))
2371 			return false;
2372 		/*
2373 		 * There is no group stop already in progress.  We must
2374 		 * initiate one now.
2375 		 *
2376 		 * While ptraced, a task may be resumed while group stop is
2377 		 * still in effect and then receive a stop signal and
2378 		 * initiate another group stop.  This deviates from the
2379 		 * usual behavior as two consecutive stop signals can't
2380 		 * cause two group stops when !ptraced.  That is why we
2381 		 * also check !task_is_stopped(t) below.
2382 		 *
2383 		 * The condition can be distinguished by testing whether
2384 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2385 		 * group_exit_code in such case.
2386 		 *
2387 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2388 		 * an intervening stop signal is required to cause two
2389 		 * continued events regardless of ptrace.
2390 		 */
2391 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2392 			sig->group_exit_code = signr;
2393 
2394 		sig->group_stop_count = 0;
2395 
2396 		if (task_set_jobctl_pending(current, signr | gstop))
2397 			sig->group_stop_count++;
2398 
2399 		t = current;
2400 		while_each_thread(current, t) {
2401 			/*
2402 			 * Setting state to TASK_STOPPED for a group
2403 			 * stop is always done with the siglock held,
2404 			 * so this check has no races.
2405 			 */
2406 			if (!task_is_stopped(t) &&
2407 			    task_set_jobctl_pending(t, signr | gstop)) {
2408 				sig->group_stop_count++;
2409 				if (likely(!(t->ptrace & PT_SEIZED)))
2410 					signal_wake_up(t, 0);
2411 				else
2412 					ptrace_trap_notify(t);
2413 			}
2414 		}
2415 	}
2416 
2417 	if (likely(!current->ptrace)) {
2418 		int notify = 0;
2419 
2420 		/*
2421 		 * If there are no other threads in the group, or if there
2422 		 * is a group stop in progress and we are the last to stop,
2423 		 * report to the parent.
2424 		 */
2425 		if (task_participate_group_stop(current))
2426 			notify = CLD_STOPPED;
2427 
2428 		set_special_state(TASK_STOPPED);
2429 		spin_unlock_irq(&current->sighand->siglock);
2430 
2431 		/*
2432 		 * Notify the parent of the group stop completion.  Because
2433 		 * we're not holding either the siglock or tasklist_lock
2434 		 * here, ptracer may attach inbetween; however, this is for
2435 		 * group stop and should always be delivered to the real
2436 		 * parent of the group leader.  The new ptracer will get
2437 		 * its notification when this task transitions into
2438 		 * TASK_TRACED.
2439 		 */
2440 		if (notify) {
2441 			read_lock(&tasklist_lock);
2442 			do_notify_parent_cldstop(current, false, notify);
2443 			read_unlock(&tasklist_lock);
2444 		}
2445 
2446 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2447 		cgroup_enter_frozen();
2448 		freezable_schedule();
2449 		return true;
2450 	} else {
2451 		/*
2452 		 * While ptraced, group stop is handled by STOP trap.
2453 		 * Schedule it and let the caller deal with it.
2454 		 */
2455 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2456 		return false;
2457 	}
2458 }
2459 
2460 /**
2461  * do_jobctl_trap - take care of ptrace jobctl traps
2462  *
2463  * When PT_SEIZED, it's used for both group stop and explicit
2464  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2465  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2466  * the stop signal; otherwise, %SIGTRAP.
2467  *
2468  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2469  * number as exit_code and no siginfo.
2470  *
2471  * CONTEXT:
2472  * Must be called with @current->sighand->siglock held, which may be
2473  * released and re-acquired before returning with intervening sleep.
2474  */
2475 static void do_jobctl_trap(void)
2476 {
2477 	struct signal_struct *signal = current->signal;
2478 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2479 
2480 	if (current->ptrace & PT_SEIZED) {
2481 		if (!signal->group_stop_count &&
2482 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2483 			signr = SIGTRAP;
2484 		WARN_ON_ONCE(!signr);
2485 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2486 				 CLD_STOPPED);
2487 	} else {
2488 		WARN_ON_ONCE(!signr);
2489 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2490 		current->exit_code = 0;
2491 	}
2492 }
2493 
2494 /**
2495  * do_freezer_trap - handle the freezer jobctl trap
2496  *
2497  * Puts the task into frozen state, if only the task is not about to quit.
2498  * In this case it drops JOBCTL_TRAP_FREEZE.
2499  *
2500  * CONTEXT:
2501  * Must be called with @current->sighand->siglock held,
2502  * which is always released before returning.
2503  */
2504 static void do_freezer_trap(void)
2505 	__releases(&current->sighand->siglock)
2506 {
2507 	/*
2508 	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2509 	 * let's make another loop to give it a chance to be handled.
2510 	 * In any case, we'll return back.
2511 	 */
2512 	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2513 	     JOBCTL_TRAP_FREEZE) {
2514 		spin_unlock_irq(&current->sighand->siglock);
2515 		return;
2516 	}
2517 
2518 	/*
2519 	 * Now we're sure that there is no pending fatal signal and no
2520 	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2521 	 * immediately (if there is a non-fatal signal pending), and
2522 	 * put the task into sleep.
2523 	 */
2524 	__set_current_state(TASK_INTERRUPTIBLE);
2525 	clear_thread_flag(TIF_SIGPENDING);
2526 	spin_unlock_irq(&current->sighand->siglock);
2527 	cgroup_enter_frozen();
2528 	freezable_schedule();
2529 }
2530 
2531 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2532 {
2533 	/*
2534 	 * We do not check sig_kernel_stop(signr) but set this marker
2535 	 * unconditionally because we do not know whether debugger will
2536 	 * change signr. This flag has no meaning unless we are going
2537 	 * to stop after return from ptrace_stop(). In this case it will
2538 	 * be checked in do_signal_stop(), we should only stop if it was
2539 	 * not cleared by SIGCONT while we were sleeping. See also the
2540 	 * comment in dequeue_signal().
2541 	 */
2542 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2543 	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2544 
2545 	/* We're back.  Did the debugger cancel the sig?  */
2546 	signr = current->exit_code;
2547 	if (signr == 0)
2548 		return signr;
2549 
2550 	current->exit_code = 0;
2551 
2552 	/*
2553 	 * Update the siginfo structure if the signal has
2554 	 * changed.  If the debugger wanted something
2555 	 * specific in the siginfo structure then it should
2556 	 * have updated *info via PTRACE_SETSIGINFO.
2557 	 */
2558 	if (signr != info->si_signo) {
2559 		clear_siginfo(info);
2560 		info->si_signo = signr;
2561 		info->si_errno = 0;
2562 		info->si_code = SI_USER;
2563 		rcu_read_lock();
2564 		info->si_pid = task_pid_vnr(current->parent);
2565 		info->si_uid = from_kuid_munged(current_user_ns(),
2566 						task_uid(current->parent));
2567 		rcu_read_unlock();
2568 	}
2569 
2570 	/* If the (new) signal is now blocked, requeue it.  */
2571 	if (sigismember(&current->blocked, signr)) {
2572 		send_signal(signr, info, current, PIDTYPE_PID);
2573 		signr = 0;
2574 	}
2575 
2576 	return signr;
2577 }
2578 
2579 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2580 {
2581 	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2582 	case SIL_FAULT:
2583 	case SIL_FAULT_TRAPNO:
2584 	case SIL_FAULT_MCEERR:
2585 	case SIL_FAULT_BNDERR:
2586 	case SIL_FAULT_PKUERR:
2587 	case SIL_PERF_EVENT:
2588 		ksig->info.si_addr = arch_untagged_si_addr(
2589 			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2590 		break;
2591 	case SIL_KILL:
2592 	case SIL_TIMER:
2593 	case SIL_POLL:
2594 	case SIL_CHLD:
2595 	case SIL_RT:
2596 	case SIL_SYS:
2597 		break;
2598 	}
2599 }
2600 
2601 bool get_signal(struct ksignal *ksig)
2602 {
2603 	struct sighand_struct *sighand = current->sighand;
2604 	struct signal_struct *signal = current->signal;
2605 	int signr;
2606 
2607 	if (unlikely(current->task_works))
2608 		task_work_run();
2609 
2610 	/*
2611 	 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2612 	 * that the arch handlers don't all have to do it. If we get here
2613 	 * without TIF_SIGPENDING, just exit after running signal work.
2614 	 */
2615 	if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2616 		if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2617 			tracehook_notify_signal();
2618 		if (!task_sigpending(current))
2619 			return false;
2620 	}
2621 
2622 	if (unlikely(uprobe_deny_signal()))
2623 		return false;
2624 
2625 	/*
2626 	 * Do this once, we can't return to user-mode if freezing() == T.
2627 	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2628 	 * thus do not need another check after return.
2629 	 */
2630 	try_to_freeze();
2631 
2632 relock:
2633 	spin_lock_irq(&sighand->siglock);
2634 
2635 	/*
2636 	 * Every stopped thread goes here after wakeup. Check to see if
2637 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2638 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2639 	 */
2640 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2641 		int why;
2642 
2643 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2644 			why = CLD_CONTINUED;
2645 		else
2646 			why = CLD_STOPPED;
2647 
2648 		signal->flags &= ~SIGNAL_CLD_MASK;
2649 
2650 		spin_unlock_irq(&sighand->siglock);
2651 
2652 		/*
2653 		 * Notify the parent that we're continuing.  This event is
2654 		 * always per-process and doesn't make whole lot of sense
2655 		 * for ptracers, who shouldn't consume the state via
2656 		 * wait(2) either, but, for backward compatibility, notify
2657 		 * the ptracer of the group leader too unless it's gonna be
2658 		 * a duplicate.
2659 		 */
2660 		read_lock(&tasklist_lock);
2661 		do_notify_parent_cldstop(current, false, why);
2662 
2663 		if (ptrace_reparented(current->group_leader))
2664 			do_notify_parent_cldstop(current->group_leader,
2665 						true, why);
2666 		read_unlock(&tasklist_lock);
2667 
2668 		goto relock;
2669 	}
2670 
2671 	/* Has this task already been marked for death? */
2672 	if (signal_group_exit(signal)) {
2673 		ksig->info.si_signo = signr = SIGKILL;
2674 		sigdelset(&current->pending.signal, SIGKILL);
2675 		trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2676 				&sighand->action[SIGKILL - 1]);
2677 		recalc_sigpending();
2678 		goto fatal;
2679 	}
2680 
2681 	for (;;) {
2682 		struct k_sigaction *ka;
2683 
2684 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2685 		    do_signal_stop(0))
2686 			goto relock;
2687 
2688 		if (unlikely(current->jobctl &
2689 			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2690 			if (current->jobctl & JOBCTL_TRAP_MASK) {
2691 				do_jobctl_trap();
2692 				spin_unlock_irq(&sighand->siglock);
2693 			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2694 				do_freezer_trap();
2695 
2696 			goto relock;
2697 		}
2698 
2699 		/*
2700 		 * If the task is leaving the frozen state, let's update
2701 		 * cgroup counters and reset the frozen bit.
2702 		 */
2703 		if (unlikely(cgroup_task_frozen(current))) {
2704 			spin_unlock_irq(&sighand->siglock);
2705 			cgroup_leave_frozen(false);
2706 			goto relock;
2707 		}
2708 
2709 		/*
2710 		 * Signals generated by the execution of an instruction
2711 		 * need to be delivered before any other pending signals
2712 		 * so that the instruction pointer in the signal stack
2713 		 * frame points to the faulting instruction.
2714 		 */
2715 		signr = dequeue_synchronous_signal(&ksig->info);
2716 		if (!signr)
2717 			signr = dequeue_signal(current, &current->blocked, &ksig->info);
2718 
2719 		if (!signr)
2720 			break; /* will return 0 */
2721 
2722 		if (unlikely(current->ptrace) && signr != SIGKILL) {
2723 			signr = ptrace_signal(signr, &ksig->info);
2724 			if (!signr)
2725 				continue;
2726 		}
2727 
2728 		ka = &sighand->action[signr-1];
2729 
2730 		/* Trace actually delivered signals. */
2731 		trace_signal_deliver(signr, &ksig->info, ka);
2732 
2733 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2734 			continue;
2735 		if (ka->sa.sa_handler != SIG_DFL) {
2736 			/* Run the handler.  */
2737 			ksig->ka = *ka;
2738 
2739 			if (ka->sa.sa_flags & SA_ONESHOT)
2740 				ka->sa.sa_handler = SIG_DFL;
2741 
2742 			break; /* will return non-zero "signr" value */
2743 		}
2744 
2745 		/*
2746 		 * Now we are doing the default action for this signal.
2747 		 */
2748 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2749 			continue;
2750 
2751 		/*
2752 		 * Global init gets no signals it doesn't want.
2753 		 * Container-init gets no signals it doesn't want from same
2754 		 * container.
2755 		 *
2756 		 * Note that if global/container-init sees a sig_kernel_only()
2757 		 * signal here, the signal must have been generated internally
2758 		 * or must have come from an ancestor namespace. In either
2759 		 * case, the signal cannot be dropped.
2760 		 */
2761 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2762 				!sig_kernel_only(signr))
2763 			continue;
2764 
2765 		if (sig_kernel_stop(signr)) {
2766 			/*
2767 			 * The default action is to stop all threads in
2768 			 * the thread group.  The job control signals
2769 			 * do nothing in an orphaned pgrp, but SIGSTOP
2770 			 * always works.  Note that siglock needs to be
2771 			 * dropped during the call to is_orphaned_pgrp()
2772 			 * because of lock ordering with tasklist_lock.
2773 			 * This allows an intervening SIGCONT to be posted.
2774 			 * We need to check for that and bail out if necessary.
2775 			 */
2776 			if (signr != SIGSTOP) {
2777 				spin_unlock_irq(&sighand->siglock);
2778 
2779 				/* signals can be posted during this window */
2780 
2781 				if (is_current_pgrp_orphaned())
2782 					goto relock;
2783 
2784 				spin_lock_irq(&sighand->siglock);
2785 			}
2786 
2787 			if (likely(do_signal_stop(ksig->info.si_signo))) {
2788 				/* It released the siglock.  */
2789 				goto relock;
2790 			}
2791 
2792 			/*
2793 			 * We didn't actually stop, due to a race
2794 			 * with SIGCONT or something like that.
2795 			 */
2796 			continue;
2797 		}
2798 
2799 	fatal:
2800 		spin_unlock_irq(&sighand->siglock);
2801 		if (unlikely(cgroup_task_frozen(current)))
2802 			cgroup_leave_frozen(true);
2803 
2804 		/*
2805 		 * Anything else is fatal, maybe with a core dump.
2806 		 */
2807 		current->flags |= PF_SIGNALED;
2808 
2809 		if (sig_kernel_coredump(signr)) {
2810 			if (print_fatal_signals)
2811 				print_fatal_signal(ksig->info.si_signo);
2812 			proc_coredump_connector(current);
2813 			/*
2814 			 * If it was able to dump core, this kills all
2815 			 * other threads in the group and synchronizes with
2816 			 * their demise.  If we lost the race with another
2817 			 * thread getting here, it set group_exit_code
2818 			 * first and our do_group_exit call below will use
2819 			 * that value and ignore the one we pass it.
2820 			 */
2821 			do_coredump(&ksig->info);
2822 		}
2823 
2824 		/*
2825 		 * PF_IO_WORKER threads will catch and exit on fatal signals
2826 		 * themselves. They have cleanup that must be performed, so
2827 		 * we cannot call do_exit() on their behalf.
2828 		 */
2829 		if (current->flags & PF_IO_WORKER)
2830 			goto out;
2831 
2832 		/*
2833 		 * Death signals, no core dump.
2834 		 */
2835 		do_group_exit(ksig->info.si_signo);
2836 		/* NOTREACHED */
2837 	}
2838 	spin_unlock_irq(&sighand->siglock);
2839 out:
2840 	ksig->sig = signr;
2841 
2842 	if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2843 		hide_si_addr_tag_bits(ksig);
2844 
2845 	return ksig->sig > 0;
2846 }
2847 
2848 /**
2849  * signal_delivered -
2850  * @ksig:		kernel signal struct
2851  * @stepping:		nonzero if debugger single-step or block-step in use
2852  *
2853  * This function should be called when a signal has successfully been
2854  * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2855  * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2856  * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2857  */
2858 static void signal_delivered(struct ksignal *ksig, int stepping)
2859 {
2860 	sigset_t blocked;
2861 
2862 	/* A signal was successfully delivered, and the
2863 	   saved sigmask was stored on the signal frame,
2864 	   and will be restored by sigreturn.  So we can
2865 	   simply clear the restore sigmask flag.  */
2866 	clear_restore_sigmask();
2867 
2868 	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2869 	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2870 		sigaddset(&blocked, ksig->sig);
2871 	set_current_blocked(&blocked);
2872 	tracehook_signal_handler(stepping);
2873 }
2874 
2875 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2876 {
2877 	if (failed)
2878 		force_sigsegv(ksig->sig);
2879 	else
2880 		signal_delivered(ksig, stepping);
2881 }
2882 
2883 /*
2884  * It could be that complete_signal() picked us to notify about the
2885  * group-wide signal. Other threads should be notified now to take
2886  * the shared signals in @which since we will not.
2887  */
2888 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2889 {
2890 	sigset_t retarget;
2891 	struct task_struct *t;
2892 
2893 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2894 	if (sigisemptyset(&retarget))
2895 		return;
2896 
2897 	t = tsk;
2898 	while_each_thread(tsk, t) {
2899 		if (t->flags & PF_EXITING)
2900 			continue;
2901 
2902 		if (!has_pending_signals(&retarget, &t->blocked))
2903 			continue;
2904 		/* Remove the signals this thread can handle. */
2905 		sigandsets(&retarget, &retarget, &t->blocked);
2906 
2907 		if (!task_sigpending(t))
2908 			signal_wake_up(t, 0);
2909 
2910 		if (sigisemptyset(&retarget))
2911 			break;
2912 	}
2913 }
2914 
2915 void exit_signals(struct task_struct *tsk)
2916 {
2917 	int group_stop = 0;
2918 	sigset_t unblocked;
2919 
2920 	/*
2921 	 * @tsk is about to have PF_EXITING set - lock out users which
2922 	 * expect stable threadgroup.
2923 	 */
2924 	cgroup_threadgroup_change_begin(tsk);
2925 
2926 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2927 		tsk->flags |= PF_EXITING;
2928 		cgroup_threadgroup_change_end(tsk);
2929 		return;
2930 	}
2931 
2932 	spin_lock_irq(&tsk->sighand->siglock);
2933 	/*
2934 	 * From now this task is not visible for group-wide signals,
2935 	 * see wants_signal(), do_signal_stop().
2936 	 */
2937 	tsk->flags |= PF_EXITING;
2938 
2939 	cgroup_threadgroup_change_end(tsk);
2940 
2941 	if (!task_sigpending(tsk))
2942 		goto out;
2943 
2944 	unblocked = tsk->blocked;
2945 	signotset(&unblocked);
2946 	retarget_shared_pending(tsk, &unblocked);
2947 
2948 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2949 	    task_participate_group_stop(tsk))
2950 		group_stop = CLD_STOPPED;
2951 out:
2952 	spin_unlock_irq(&tsk->sighand->siglock);
2953 
2954 	/*
2955 	 * If group stop has completed, deliver the notification.  This
2956 	 * should always go to the real parent of the group leader.
2957 	 */
2958 	if (unlikely(group_stop)) {
2959 		read_lock(&tasklist_lock);
2960 		do_notify_parent_cldstop(tsk, false, group_stop);
2961 		read_unlock(&tasklist_lock);
2962 	}
2963 }
2964 
2965 /*
2966  * System call entry points.
2967  */
2968 
2969 /**
2970  *  sys_restart_syscall - restart a system call
2971  */
2972 SYSCALL_DEFINE0(restart_syscall)
2973 {
2974 	struct restart_block *restart = &current->restart_block;
2975 	return restart->fn(restart);
2976 }
2977 
2978 long do_no_restart_syscall(struct restart_block *param)
2979 {
2980 	return -EINTR;
2981 }
2982 
2983 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2984 {
2985 	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2986 		sigset_t newblocked;
2987 		/* A set of now blocked but previously unblocked signals. */
2988 		sigandnsets(&newblocked, newset, &current->blocked);
2989 		retarget_shared_pending(tsk, &newblocked);
2990 	}
2991 	tsk->blocked = *newset;
2992 	recalc_sigpending();
2993 }
2994 
2995 /**
2996  * set_current_blocked - change current->blocked mask
2997  * @newset: new mask
2998  *
2999  * It is wrong to change ->blocked directly, this helper should be used
3000  * to ensure the process can't miss a shared signal we are going to block.
3001  */
3002 void set_current_blocked(sigset_t *newset)
3003 {
3004 	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3005 	__set_current_blocked(newset);
3006 }
3007 
3008 void __set_current_blocked(const sigset_t *newset)
3009 {
3010 	struct task_struct *tsk = current;
3011 
3012 	/*
3013 	 * In case the signal mask hasn't changed, there is nothing we need
3014 	 * to do. The current->blocked shouldn't be modified by other task.
3015 	 */
3016 	if (sigequalsets(&tsk->blocked, newset))
3017 		return;
3018 
3019 	spin_lock_irq(&tsk->sighand->siglock);
3020 	__set_task_blocked(tsk, newset);
3021 	spin_unlock_irq(&tsk->sighand->siglock);
3022 }
3023 
3024 /*
3025  * This is also useful for kernel threads that want to temporarily
3026  * (or permanently) block certain signals.
3027  *
3028  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3029  * interface happily blocks "unblockable" signals like SIGKILL
3030  * and friends.
3031  */
3032 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3033 {
3034 	struct task_struct *tsk = current;
3035 	sigset_t newset;
3036 
3037 	/* Lockless, only current can change ->blocked, never from irq */
3038 	if (oldset)
3039 		*oldset = tsk->blocked;
3040 
3041 	switch (how) {
3042 	case SIG_BLOCK:
3043 		sigorsets(&newset, &tsk->blocked, set);
3044 		break;
3045 	case SIG_UNBLOCK:
3046 		sigandnsets(&newset, &tsk->blocked, set);
3047 		break;
3048 	case SIG_SETMASK:
3049 		newset = *set;
3050 		break;
3051 	default:
3052 		return -EINVAL;
3053 	}
3054 
3055 	__set_current_blocked(&newset);
3056 	return 0;
3057 }
3058 EXPORT_SYMBOL(sigprocmask);
3059 
3060 /*
3061  * The api helps set app-provided sigmasks.
3062  *
3063  * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3064  * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3065  *
3066  * Note that it does set_restore_sigmask() in advance, so it must be always
3067  * paired with restore_saved_sigmask_unless() before return from syscall.
3068  */
3069 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3070 {
3071 	sigset_t kmask;
3072 
3073 	if (!umask)
3074 		return 0;
3075 	if (sigsetsize != sizeof(sigset_t))
3076 		return -EINVAL;
3077 	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3078 		return -EFAULT;
3079 
3080 	set_restore_sigmask();
3081 	current->saved_sigmask = current->blocked;
3082 	set_current_blocked(&kmask);
3083 
3084 	return 0;
3085 }
3086 
3087 #ifdef CONFIG_COMPAT
3088 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3089 			    size_t sigsetsize)
3090 {
3091 	sigset_t kmask;
3092 
3093 	if (!umask)
3094 		return 0;
3095 	if (sigsetsize != sizeof(compat_sigset_t))
3096 		return -EINVAL;
3097 	if (get_compat_sigset(&kmask, umask))
3098 		return -EFAULT;
3099 
3100 	set_restore_sigmask();
3101 	current->saved_sigmask = current->blocked;
3102 	set_current_blocked(&kmask);
3103 
3104 	return 0;
3105 }
3106 #endif
3107 
3108 /**
3109  *  sys_rt_sigprocmask - change the list of currently blocked signals
3110  *  @how: whether to add, remove, or set signals
3111  *  @nset: stores pending signals
3112  *  @oset: previous value of signal mask if non-null
3113  *  @sigsetsize: size of sigset_t type
3114  */
3115 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3116 		sigset_t __user *, oset, size_t, sigsetsize)
3117 {
3118 	sigset_t old_set, new_set;
3119 	int error;
3120 
3121 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3122 	if (sigsetsize != sizeof(sigset_t))
3123 		return -EINVAL;
3124 
3125 	old_set = current->blocked;
3126 
3127 	if (nset) {
3128 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3129 			return -EFAULT;
3130 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3131 
3132 		error = sigprocmask(how, &new_set, NULL);
3133 		if (error)
3134 			return error;
3135 	}
3136 
3137 	if (oset) {
3138 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3139 			return -EFAULT;
3140 	}
3141 
3142 	return 0;
3143 }
3144 
3145 #ifdef CONFIG_COMPAT
3146 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3147 		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3148 {
3149 	sigset_t old_set = current->blocked;
3150 
3151 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3152 	if (sigsetsize != sizeof(sigset_t))
3153 		return -EINVAL;
3154 
3155 	if (nset) {
3156 		sigset_t new_set;
3157 		int error;
3158 		if (get_compat_sigset(&new_set, nset))
3159 			return -EFAULT;
3160 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3161 
3162 		error = sigprocmask(how, &new_set, NULL);
3163 		if (error)
3164 			return error;
3165 	}
3166 	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3167 }
3168 #endif
3169 
3170 static void do_sigpending(sigset_t *set)
3171 {
3172 	spin_lock_irq(&current->sighand->siglock);
3173 	sigorsets(set, &current->pending.signal,
3174 		  &current->signal->shared_pending.signal);
3175 	spin_unlock_irq(&current->sighand->siglock);
3176 
3177 	/* Outside the lock because only this thread touches it.  */
3178 	sigandsets(set, &current->blocked, set);
3179 }
3180 
3181 /**
3182  *  sys_rt_sigpending - examine a pending signal that has been raised
3183  *			while blocked
3184  *  @uset: stores pending signals
3185  *  @sigsetsize: size of sigset_t type or larger
3186  */
3187 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3188 {
3189 	sigset_t set;
3190 
3191 	if (sigsetsize > sizeof(*uset))
3192 		return -EINVAL;
3193 
3194 	do_sigpending(&set);
3195 
3196 	if (copy_to_user(uset, &set, sigsetsize))
3197 		return -EFAULT;
3198 
3199 	return 0;
3200 }
3201 
3202 #ifdef CONFIG_COMPAT
3203 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3204 		compat_size_t, sigsetsize)
3205 {
3206 	sigset_t set;
3207 
3208 	if (sigsetsize > sizeof(*uset))
3209 		return -EINVAL;
3210 
3211 	do_sigpending(&set);
3212 
3213 	return put_compat_sigset(uset, &set, sigsetsize);
3214 }
3215 #endif
3216 
3217 static const struct {
3218 	unsigned char limit, layout;
3219 } sig_sicodes[] = {
3220 	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3221 	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3222 	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3223 	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3224 	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3225 #if defined(SIGEMT)
3226 	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3227 #endif
3228 	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3229 	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3230 	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3231 };
3232 
3233 static bool known_siginfo_layout(unsigned sig, int si_code)
3234 {
3235 	if (si_code == SI_KERNEL)
3236 		return true;
3237 	else if ((si_code > SI_USER)) {
3238 		if (sig_specific_sicodes(sig)) {
3239 			if (si_code <= sig_sicodes[sig].limit)
3240 				return true;
3241 		}
3242 		else if (si_code <= NSIGPOLL)
3243 			return true;
3244 	}
3245 	else if (si_code >= SI_DETHREAD)
3246 		return true;
3247 	else if (si_code == SI_ASYNCNL)
3248 		return true;
3249 	return false;
3250 }
3251 
3252 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3253 {
3254 	enum siginfo_layout layout = SIL_KILL;
3255 	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3256 		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3257 		    (si_code <= sig_sicodes[sig].limit)) {
3258 			layout = sig_sicodes[sig].layout;
3259 			/* Handle the exceptions */
3260 			if ((sig == SIGBUS) &&
3261 			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3262 				layout = SIL_FAULT_MCEERR;
3263 			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3264 				layout = SIL_FAULT_BNDERR;
3265 #ifdef SEGV_PKUERR
3266 			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3267 				layout = SIL_FAULT_PKUERR;
3268 #endif
3269 			else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3270 				layout = SIL_PERF_EVENT;
3271 #ifdef __ARCH_SI_TRAPNO
3272 			else if (layout == SIL_FAULT)
3273 				layout = SIL_FAULT_TRAPNO;
3274 #endif
3275 		}
3276 		else if (si_code <= NSIGPOLL)
3277 			layout = SIL_POLL;
3278 	} else {
3279 		if (si_code == SI_TIMER)
3280 			layout = SIL_TIMER;
3281 		else if (si_code == SI_SIGIO)
3282 			layout = SIL_POLL;
3283 		else if (si_code < 0)
3284 			layout = SIL_RT;
3285 	}
3286 	return layout;
3287 }
3288 
3289 static inline char __user *si_expansion(const siginfo_t __user *info)
3290 {
3291 	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3292 }
3293 
3294 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3295 {
3296 	char __user *expansion = si_expansion(to);
3297 	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3298 		return -EFAULT;
3299 	if (clear_user(expansion, SI_EXPANSION_SIZE))
3300 		return -EFAULT;
3301 	return 0;
3302 }
3303 
3304 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3305 				       const siginfo_t __user *from)
3306 {
3307 	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3308 		char __user *expansion = si_expansion(from);
3309 		char buf[SI_EXPANSION_SIZE];
3310 		int i;
3311 		/*
3312 		 * An unknown si_code might need more than
3313 		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3314 		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3315 		 * will return this data to userspace exactly.
3316 		 */
3317 		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3318 			return -EFAULT;
3319 		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3320 			if (buf[i] != 0)
3321 				return -E2BIG;
3322 		}
3323 	}
3324 	return 0;
3325 }
3326 
3327 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3328 				    const siginfo_t __user *from)
3329 {
3330 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3331 		return -EFAULT;
3332 	to->si_signo = signo;
3333 	return post_copy_siginfo_from_user(to, from);
3334 }
3335 
3336 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3337 {
3338 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3339 		return -EFAULT;
3340 	return post_copy_siginfo_from_user(to, from);
3341 }
3342 
3343 #ifdef CONFIG_COMPAT
3344 /**
3345  * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3346  * @to: compat siginfo destination
3347  * @from: kernel siginfo source
3348  *
3349  * Note: This function does not work properly for the SIGCHLD on x32, but
3350  * fortunately it doesn't have to.  The only valid callers for this function are
3351  * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3352  * The latter does not care because SIGCHLD will never cause a coredump.
3353  */
3354 void copy_siginfo_to_external32(struct compat_siginfo *to,
3355 		const struct kernel_siginfo *from)
3356 {
3357 	memset(to, 0, sizeof(*to));
3358 
3359 	to->si_signo = from->si_signo;
3360 	to->si_errno = from->si_errno;
3361 	to->si_code  = from->si_code;
3362 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3363 	case SIL_KILL:
3364 		to->si_pid = from->si_pid;
3365 		to->si_uid = from->si_uid;
3366 		break;
3367 	case SIL_TIMER:
3368 		to->si_tid     = from->si_tid;
3369 		to->si_overrun = from->si_overrun;
3370 		to->si_int     = from->si_int;
3371 		break;
3372 	case SIL_POLL:
3373 		to->si_band = from->si_band;
3374 		to->si_fd   = from->si_fd;
3375 		break;
3376 	case SIL_FAULT:
3377 		to->si_addr = ptr_to_compat(from->si_addr);
3378 		break;
3379 	case SIL_FAULT_TRAPNO:
3380 		to->si_addr = ptr_to_compat(from->si_addr);
3381 		to->si_trapno = from->si_trapno;
3382 		break;
3383 	case SIL_FAULT_MCEERR:
3384 		to->si_addr = ptr_to_compat(from->si_addr);
3385 		to->si_addr_lsb = from->si_addr_lsb;
3386 		break;
3387 	case SIL_FAULT_BNDERR:
3388 		to->si_addr = ptr_to_compat(from->si_addr);
3389 		to->si_lower = ptr_to_compat(from->si_lower);
3390 		to->si_upper = ptr_to_compat(from->si_upper);
3391 		break;
3392 	case SIL_FAULT_PKUERR:
3393 		to->si_addr = ptr_to_compat(from->si_addr);
3394 		to->si_pkey = from->si_pkey;
3395 		break;
3396 	case SIL_PERF_EVENT:
3397 		to->si_addr = ptr_to_compat(from->si_addr);
3398 		to->si_perf_data = from->si_perf_data;
3399 		to->si_perf_type = from->si_perf_type;
3400 		break;
3401 	case SIL_CHLD:
3402 		to->si_pid = from->si_pid;
3403 		to->si_uid = from->si_uid;
3404 		to->si_status = from->si_status;
3405 		to->si_utime = from->si_utime;
3406 		to->si_stime = from->si_stime;
3407 		break;
3408 	case SIL_RT:
3409 		to->si_pid = from->si_pid;
3410 		to->si_uid = from->si_uid;
3411 		to->si_int = from->si_int;
3412 		break;
3413 	case SIL_SYS:
3414 		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3415 		to->si_syscall   = from->si_syscall;
3416 		to->si_arch      = from->si_arch;
3417 		break;
3418 	}
3419 }
3420 
3421 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3422 			   const struct kernel_siginfo *from)
3423 {
3424 	struct compat_siginfo new;
3425 
3426 	copy_siginfo_to_external32(&new, from);
3427 	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3428 		return -EFAULT;
3429 	return 0;
3430 }
3431 
3432 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3433 					 const struct compat_siginfo *from)
3434 {
3435 	clear_siginfo(to);
3436 	to->si_signo = from->si_signo;
3437 	to->si_errno = from->si_errno;
3438 	to->si_code  = from->si_code;
3439 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3440 	case SIL_KILL:
3441 		to->si_pid = from->si_pid;
3442 		to->si_uid = from->si_uid;
3443 		break;
3444 	case SIL_TIMER:
3445 		to->si_tid     = from->si_tid;
3446 		to->si_overrun = from->si_overrun;
3447 		to->si_int     = from->si_int;
3448 		break;
3449 	case SIL_POLL:
3450 		to->si_band = from->si_band;
3451 		to->si_fd   = from->si_fd;
3452 		break;
3453 	case SIL_FAULT:
3454 		to->si_addr = compat_ptr(from->si_addr);
3455 		break;
3456 	case SIL_FAULT_TRAPNO:
3457 		to->si_addr = compat_ptr(from->si_addr);
3458 		to->si_trapno = from->si_trapno;
3459 		break;
3460 	case SIL_FAULT_MCEERR:
3461 		to->si_addr = compat_ptr(from->si_addr);
3462 		to->si_addr_lsb = from->si_addr_lsb;
3463 		break;
3464 	case SIL_FAULT_BNDERR:
3465 		to->si_addr = compat_ptr(from->si_addr);
3466 		to->si_lower = compat_ptr(from->si_lower);
3467 		to->si_upper = compat_ptr(from->si_upper);
3468 		break;
3469 	case SIL_FAULT_PKUERR:
3470 		to->si_addr = compat_ptr(from->si_addr);
3471 		to->si_pkey = from->si_pkey;
3472 		break;
3473 	case SIL_PERF_EVENT:
3474 		to->si_addr = compat_ptr(from->si_addr);
3475 		to->si_perf_data = from->si_perf_data;
3476 		to->si_perf_type = from->si_perf_type;
3477 		break;
3478 	case SIL_CHLD:
3479 		to->si_pid    = from->si_pid;
3480 		to->si_uid    = from->si_uid;
3481 		to->si_status = from->si_status;
3482 #ifdef CONFIG_X86_X32_ABI
3483 		if (in_x32_syscall()) {
3484 			to->si_utime = from->_sifields._sigchld_x32._utime;
3485 			to->si_stime = from->_sifields._sigchld_x32._stime;
3486 		} else
3487 #endif
3488 		{
3489 			to->si_utime = from->si_utime;
3490 			to->si_stime = from->si_stime;
3491 		}
3492 		break;
3493 	case SIL_RT:
3494 		to->si_pid = from->si_pid;
3495 		to->si_uid = from->si_uid;
3496 		to->si_int = from->si_int;
3497 		break;
3498 	case SIL_SYS:
3499 		to->si_call_addr = compat_ptr(from->si_call_addr);
3500 		to->si_syscall   = from->si_syscall;
3501 		to->si_arch      = from->si_arch;
3502 		break;
3503 	}
3504 	return 0;
3505 }
3506 
3507 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3508 				      const struct compat_siginfo __user *ufrom)
3509 {
3510 	struct compat_siginfo from;
3511 
3512 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3513 		return -EFAULT;
3514 
3515 	from.si_signo = signo;
3516 	return post_copy_siginfo_from_user32(to, &from);
3517 }
3518 
3519 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3520 			     const struct compat_siginfo __user *ufrom)
3521 {
3522 	struct compat_siginfo from;
3523 
3524 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3525 		return -EFAULT;
3526 
3527 	return post_copy_siginfo_from_user32(to, &from);
3528 }
3529 #endif /* CONFIG_COMPAT */
3530 
3531 /**
3532  *  do_sigtimedwait - wait for queued signals specified in @which
3533  *  @which: queued signals to wait for
3534  *  @info: if non-null, the signal's siginfo is returned here
3535  *  @ts: upper bound on process time suspension
3536  */
3537 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3538 		    const struct timespec64 *ts)
3539 {
3540 	ktime_t *to = NULL, timeout = KTIME_MAX;
3541 	struct task_struct *tsk = current;
3542 	sigset_t mask = *which;
3543 	int sig, ret = 0;
3544 
3545 	if (ts) {
3546 		if (!timespec64_valid(ts))
3547 			return -EINVAL;
3548 		timeout = timespec64_to_ktime(*ts);
3549 		to = &timeout;
3550 	}
3551 
3552 	/*
3553 	 * Invert the set of allowed signals to get those we want to block.
3554 	 */
3555 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3556 	signotset(&mask);
3557 
3558 	spin_lock_irq(&tsk->sighand->siglock);
3559 	sig = dequeue_signal(tsk, &mask, info);
3560 	if (!sig && timeout) {
3561 		/*
3562 		 * None ready, temporarily unblock those we're interested
3563 		 * while we are sleeping in so that we'll be awakened when
3564 		 * they arrive. Unblocking is always fine, we can avoid
3565 		 * set_current_blocked().
3566 		 */
3567 		tsk->real_blocked = tsk->blocked;
3568 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3569 		recalc_sigpending();
3570 		spin_unlock_irq(&tsk->sighand->siglock);
3571 
3572 		__set_current_state(TASK_INTERRUPTIBLE);
3573 		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3574 							 HRTIMER_MODE_REL);
3575 		spin_lock_irq(&tsk->sighand->siglock);
3576 		__set_task_blocked(tsk, &tsk->real_blocked);
3577 		sigemptyset(&tsk->real_blocked);
3578 		sig = dequeue_signal(tsk, &mask, info);
3579 	}
3580 	spin_unlock_irq(&tsk->sighand->siglock);
3581 
3582 	if (sig)
3583 		return sig;
3584 	return ret ? -EINTR : -EAGAIN;
3585 }
3586 
3587 /**
3588  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3589  *			in @uthese
3590  *  @uthese: queued signals to wait for
3591  *  @uinfo: if non-null, the signal's siginfo is returned here
3592  *  @uts: upper bound on process time suspension
3593  *  @sigsetsize: size of sigset_t type
3594  */
3595 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3596 		siginfo_t __user *, uinfo,
3597 		const struct __kernel_timespec __user *, uts,
3598 		size_t, sigsetsize)
3599 {
3600 	sigset_t these;
3601 	struct timespec64 ts;
3602 	kernel_siginfo_t info;
3603 	int ret;
3604 
3605 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3606 	if (sigsetsize != sizeof(sigset_t))
3607 		return -EINVAL;
3608 
3609 	if (copy_from_user(&these, uthese, sizeof(these)))
3610 		return -EFAULT;
3611 
3612 	if (uts) {
3613 		if (get_timespec64(&ts, uts))
3614 			return -EFAULT;
3615 	}
3616 
3617 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3618 
3619 	if (ret > 0 && uinfo) {
3620 		if (copy_siginfo_to_user(uinfo, &info))
3621 			ret = -EFAULT;
3622 	}
3623 
3624 	return ret;
3625 }
3626 
3627 #ifdef CONFIG_COMPAT_32BIT_TIME
3628 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3629 		siginfo_t __user *, uinfo,
3630 		const struct old_timespec32 __user *, uts,
3631 		size_t, sigsetsize)
3632 {
3633 	sigset_t these;
3634 	struct timespec64 ts;
3635 	kernel_siginfo_t info;
3636 	int ret;
3637 
3638 	if (sigsetsize != sizeof(sigset_t))
3639 		return -EINVAL;
3640 
3641 	if (copy_from_user(&these, uthese, sizeof(these)))
3642 		return -EFAULT;
3643 
3644 	if (uts) {
3645 		if (get_old_timespec32(&ts, uts))
3646 			return -EFAULT;
3647 	}
3648 
3649 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3650 
3651 	if (ret > 0 && uinfo) {
3652 		if (copy_siginfo_to_user(uinfo, &info))
3653 			ret = -EFAULT;
3654 	}
3655 
3656 	return ret;
3657 }
3658 #endif
3659 
3660 #ifdef CONFIG_COMPAT
3661 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3662 		struct compat_siginfo __user *, uinfo,
3663 		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3664 {
3665 	sigset_t s;
3666 	struct timespec64 t;
3667 	kernel_siginfo_t info;
3668 	long ret;
3669 
3670 	if (sigsetsize != sizeof(sigset_t))
3671 		return -EINVAL;
3672 
3673 	if (get_compat_sigset(&s, uthese))
3674 		return -EFAULT;
3675 
3676 	if (uts) {
3677 		if (get_timespec64(&t, uts))
3678 			return -EFAULT;
3679 	}
3680 
3681 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3682 
3683 	if (ret > 0 && uinfo) {
3684 		if (copy_siginfo_to_user32(uinfo, &info))
3685 			ret = -EFAULT;
3686 	}
3687 
3688 	return ret;
3689 }
3690 
3691 #ifdef CONFIG_COMPAT_32BIT_TIME
3692 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3693 		struct compat_siginfo __user *, uinfo,
3694 		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3695 {
3696 	sigset_t s;
3697 	struct timespec64 t;
3698 	kernel_siginfo_t info;
3699 	long ret;
3700 
3701 	if (sigsetsize != sizeof(sigset_t))
3702 		return -EINVAL;
3703 
3704 	if (get_compat_sigset(&s, uthese))
3705 		return -EFAULT;
3706 
3707 	if (uts) {
3708 		if (get_old_timespec32(&t, uts))
3709 			return -EFAULT;
3710 	}
3711 
3712 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3713 
3714 	if (ret > 0 && uinfo) {
3715 		if (copy_siginfo_to_user32(uinfo, &info))
3716 			ret = -EFAULT;
3717 	}
3718 
3719 	return ret;
3720 }
3721 #endif
3722 #endif
3723 
3724 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3725 {
3726 	clear_siginfo(info);
3727 	info->si_signo = sig;
3728 	info->si_errno = 0;
3729 	info->si_code = SI_USER;
3730 	info->si_pid = task_tgid_vnr(current);
3731 	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3732 }
3733 
3734 /**
3735  *  sys_kill - send a signal to a process
3736  *  @pid: the PID of the process
3737  *  @sig: signal to be sent
3738  */
3739 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3740 {
3741 	struct kernel_siginfo info;
3742 
3743 	prepare_kill_siginfo(sig, &info);
3744 
3745 	return kill_something_info(sig, &info, pid);
3746 }
3747 
3748 /*
3749  * Verify that the signaler and signalee either are in the same pid namespace
3750  * or that the signaler's pid namespace is an ancestor of the signalee's pid
3751  * namespace.
3752  */
3753 static bool access_pidfd_pidns(struct pid *pid)
3754 {
3755 	struct pid_namespace *active = task_active_pid_ns(current);
3756 	struct pid_namespace *p = ns_of_pid(pid);
3757 
3758 	for (;;) {
3759 		if (!p)
3760 			return false;
3761 		if (p == active)
3762 			break;
3763 		p = p->parent;
3764 	}
3765 
3766 	return true;
3767 }
3768 
3769 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3770 		siginfo_t __user *info)
3771 {
3772 #ifdef CONFIG_COMPAT
3773 	/*
3774 	 * Avoid hooking up compat syscalls and instead handle necessary
3775 	 * conversions here. Note, this is a stop-gap measure and should not be
3776 	 * considered a generic solution.
3777 	 */
3778 	if (in_compat_syscall())
3779 		return copy_siginfo_from_user32(
3780 			kinfo, (struct compat_siginfo __user *)info);
3781 #endif
3782 	return copy_siginfo_from_user(kinfo, info);
3783 }
3784 
3785 static struct pid *pidfd_to_pid(const struct file *file)
3786 {
3787 	struct pid *pid;
3788 
3789 	pid = pidfd_pid(file);
3790 	if (!IS_ERR(pid))
3791 		return pid;
3792 
3793 	return tgid_pidfd_to_pid(file);
3794 }
3795 
3796 /**
3797  * sys_pidfd_send_signal - Signal a process through a pidfd
3798  * @pidfd:  file descriptor of the process
3799  * @sig:    signal to send
3800  * @info:   signal info
3801  * @flags:  future flags
3802  *
3803  * The syscall currently only signals via PIDTYPE_PID which covers
3804  * kill(<positive-pid>, <signal>. It does not signal threads or process
3805  * groups.
3806  * In order to extend the syscall to threads and process groups the @flags
3807  * argument should be used. In essence, the @flags argument will determine
3808  * what is signaled and not the file descriptor itself. Put in other words,
3809  * grouping is a property of the flags argument not a property of the file
3810  * descriptor.
3811  *
3812  * Return: 0 on success, negative errno on failure
3813  */
3814 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3815 		siginfo_t __user *, info, unsigned int, flags)
3816 {
3817 	int ret;
3818 	struct fd f;
3819 	struct pid *pid;
3820 	kernel_siginfo_t kinfo;
3821 
3822 	/* Enforce flags be set to 0 until we add an extension. */
3823 	if (flags)
3824 		return -EINVAL;
3825 
3826 	f = fdget(pidfd);
3827 	if (!f.file)
3828 		return -EBADF;
3829 
3830 	/* Is this a pidfd? */
3831 	pid = pidfd_to_pid(f.file);
3832 	if (IS_ERR(pid)) {
3833 		ret = PTR_ERR(pid);
3834 		goto err;
3835 	}
3836 
3837 	ret = -EINVAL;
3838 	if (!access_pidfd_pidns(pid))
3839 		goto err;
3840 
3841 	if (info) {
3842 		ret = copy_siginfo_from_user_any(&kinfo, info);
3843 		if (unlikely(ret))
3844 			goto err;
3845 
3846 		ret = -EINVAL;
3847 		if (unlikely(sig != kinfo.si_signo))
3848 			goto err;
3849 
3850 		/* Only allow sending arbitrary signals to yourself. */
3851 		ret = -EPERM;
3852 		if ((task_pid(current) != pid) &&
3853 		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3854 			goto err;
3855 	} else {
3856 		prepare_kill_siginfo(sig, &kinfo);
3857 	}
3858 
3859 	ret = kill_pid_info(sig, &kinfo, pid);
3860 
3861 err:
3862 	fdput(f);
3863 	return ret;
3864 }
3865 
3866 static int
3867 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3868 {
3869 	struct task_struct *p;
3870 	int error = -ESRCH;
3871 
3872 	rcu_read_lock();
3873 	p = find_task_by_vpid(pid);
3874 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3875 		error = check_kill_permission(sig, info, p);
3876 		/*
3877 		 * The null signal is a permissions and process existence
3878 		 * probe.  No signal is actually delivered.
3879 		 */
3880 		if (!error && sig) {
3881 			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3882 			/*
3883 			 * If lock_task_sighand() failed we pretend the task
3884 			 * dies after receiving the signal. The window is tiny,
3885 			 * and the signal is private anyway.
3886 			 */
3887 			if (unlikely(error == -ESRCH))
3888 				error = 0;
3889 		}
3890 	}
3891 	rcu_read_unlock();
3892 
3893 	return error;
3894 }
3895 
3896 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3897 {
3898 	struct kernel_siginfo info;
3899 
3900 	clear_siginfo(&info);
3901 	info.si_signo = sig;
3902 	info.si_errno = 0;
3903 	info.si_code = SI_TKILL;
3904 	info.si_pid = task_tgid_vnr(current);
3905 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3906 
3907 	return do_send_specific(tgid, pid, sig, &info);
3908 }
3909 
3910 /**
3911  *  sys_tgkill - send signal to one specific thread
3912  *  @tgid: the thread group ID of the thread
3913  *  @pid: the PID of the thread
3914  *  @sig: signal to be sent
3915  *
3916  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
3917  *  exists but it's not belonging to the target process anymore. This
3918  *  method solves the problem of threads exiting and PIDs getting reused.
3919  */
3920 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3921 {
3922 	/* This is only valid for single tasks */
3923 	if (pid <= 0 || tgid <= 0)
3924 		return -EINVAL;
3925 
3926 	return do_tkill(tgid, pid, sig);
3927 }
3928 
3929 /**
3930  *  sys_tkill - send signal to one specific task
3931  *  @pid: the PID of the task
3932  *  @sig: signal to be sent
3933  *
3934  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
3935  */
3936 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3937 {
3938 	/* This is only valid for single tasks */
3939 	if (pid <= 0)
3940 		return -EINVAL;
3941 
3942 	return do_tkill(0, pid, sig);
3943 }
3944 
3945 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3946 {
3947 	/* Not even root can pretend to send signals from the kernel.
3948 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3949 	 */
3950 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3951 	    (task_pid_vnr(current) != pid))
3952 		return -EPERM;
3953 
3954 	/* POSIX.1b doesn't mention process groups.  */
3955 	return kill_proc_info(sig, info, pid);
3956 }
3957 
3958 /**
3959  *  sys_rt_sigqueueinfo - send signal information to a signal
3960  *  @pid: the PID of the thread
3961  *  @sig: signal to be sent
3962  *  @uinfo: signal info to be sent
3963  */
3964 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3965 		siginfo_t __user *, uinfo)
3966 {
3967 	kernel_siginfo_t info;
3968 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3969 	if (unlikely(ret))
3970 		return ret;
3971 	return do_rt_sigqueueinfo(pid, sig, &info);
3972 }
3973 
3974 #ifdef CONFIG_COMPAT
3975 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3976 			compat_pid_t, pid,
3977 			int, sig,
3978 			struct compat_siginfo __user *, uinfo)
3979 {
3980 	kernel_siginfo_t info;
3981 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3982 	if (unlikely(ret))
3983 		return ret;
3984 	return do_rt_sigqueueinfo(pid, sig, &info);
3985 }
3986 #endif
3987 
3988 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3989 {
3990 	/* This is only valid for single tasks */
3991 	if (pid <= 0 || tgid <= 0)
3992 		return -EINVAL;
3993 
3994 	/* Not even root can pretend to send signals from the kernel.
3995 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3996 	 */
3997 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3998 	    (task_pid_vnr(current) != pid))
3999 		return -EPERM;
4000 
4001 	return do_send_specific(tgid, pid, sig, info);
4002 }
4003 
4004 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4005 		siginfo_t __user *, uinfo)
4006 {
4007 	kernel_siginfo_t info;
4008 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4009 	if (unlikely(ret))
4010 		return ret;
4011 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4012 }
4013 
4014 #ifdef CONFIG_COMPAT
4015 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4016 			compat_pid_t, tgid,
4017 			compat_pid_t, pid,
4018 			int, sig,
4019 			struct compat_siginfo __user *, uinfo)
4020 {
4021 	kernel_siginfo_t info;
4022 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4023 	if (unlikely(ret))
4024 		return ret;
4025 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4026 }
4027 #endif
4028 
4029 /*
4030  * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4031  */
4032 void kernel_sigaction(int sig, __sighandler_t action)
4033 {
4034 	spin_lock_irq(&current->sighand->siglock);
4035 	current->sighand->action[sig - 1].sa.sa_handler = action;
4036 	if (action == SIG_IGN) {
4037 		sigset_t mask;
4038 
4039 		sigemptyset(&mask);
4040 		sigaddset(&mask, sig);
4041 
4042 		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
4043 		flush_sigqueue_mask(&mask, &current->pending);
4044 		recalc_sigpending();
4045 	}
4046 	spin_unlock_irq(&current->sighand->siglock);
4047 }
4048 EXPORT_SYMBOL(kernel_sigaction);
4049 
4050 void __weak sigaction_compat_abi(struct k_sigaction *act,
4051 		struct k_sigaction *oact)
4052 {
4053 }
4054 
4055 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4056 {
4057 	struct task_struct *p = current, *t;
4058 	struct k_sigaction *k;
4059 	sigset_t mask;
4060 
4061 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4062 		return -EINVAL;
4063 
4064 	k = &p->sighand->action[sig-1];
4065 
4066 	spin_lock_irq(&p->sighand->siglock);
4067 	if (oact)
4068 		*oact = *k;
4069 
4070 	/*
4071 	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4072 	 * e.g. by having an architecture use the bit in their uapi.
4073 	 */
4074 	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4075 
4076 	/*
4077 	 * Clear unknown flag bits in order to allow userspace to detect missing
4078 	 * support for flag bits and to allow the kernel to use non-uapi bits
4079 	 * internally.
4080 	 */
4081 	if (act)
4082 		act->sa.sa_flags &= UAPI_SA_FLAGS;
4083 	if (oact)
4084 		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4085 
4086 	sigaction_compat_abi(act, oact);
4087 
4088 	if (act) {
4089 		sigdelsetmask(&act->sa.sa_mask,
4090 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4091 		*k = *act;
4092 		/*
4093 		 * POSIX 3.3.1.3:
4094 		 *  "Setting a signal action to SIG_IGN for a signal that is
4095 		 *   pending shall cause the pending signal to be discarded,
4096 		 *   whether or not it is blocked."
4097 		 *
4098 		 *  "Setting a signal action to SIG_DFL for a signal that is
4099 		 *   pending and whose default action is to ignore the signal
4100 		 *   (for example, SIGCHLD), shall cause the pending signal to
4101 		 *   be discarded, whether or not it is blocked"
4102 		 */
4103 		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4104 			sigemptyset(&mask);
4105 			sigaddset(&mask, sig);
4106 			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4107 			for_each_thread(p, t)
4108 				flush_sigqueue_mask(&mask, &t->pending);
4109 		}
4110 	}
4111 
4112 	spin_unlock_irq(&p->sighand->siglock);
4113 	return 0;
4114 }
4115 
4116 static int
4117 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4118 		size_t min_ss_size)
4119 {
4120 	struct task_struct *t = current;
4121 
4122 	if (oss) {
4123 		memset(oss, 0, sizeof(stack_t));
4124 		oss->ss_sp = (void __user *) t->sas_ss_sp;
4125 		oss->ss_size = t->sas_ss_size;
4126 		oss->ss_flags = sas_ss_flags(sp) |
4127 			(current->sas_ss_flags & SS_FLAG_BITS);
4128 	}
4129 
4130 	if (ss) {
4131 		void __user *ss_sp = ss->ss_sp;
4132 		size_t ss_size = ss->ss_size;
4133 		unsigned ss_flags = ss->ss_flags;
4134 		int ss_mode;
4135 
4136 		if (unlikely(on_sig_stack(sp)))
4137 			return -EPERM;
4138 
4139 		ss_mode = ss_flags & ~SS_FLAG_BITS;
4140 		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4141 				ss_mode != 0))
4142 			return -EINVAL;
4143 
4144 		if (ss_mode == SS_DISABLE) {
4145 			ss_size = 0;
4146 			ss_sp = NULL;
4147 		} else {
4148 			if (unlikely(ss_size < min_ss_size))
4149 				return -ENOMEM;
4150 		}
4151 
4152 		t->sas_ss_sp = (unsigned long) ss_sp;
4153 		t->sas_ss_size = ss_size;
4154 		t->sas_ss_flags = ss_flags;
4155 	}
4156 	return 0;
4157 }
4158 
4159 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4160 {
4161 	stack_t new, old;
4162 	int err;
4163 	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4164 		return -EFAULT;
4165 	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4166 			      current_user_stack_pointer(),
4167 			      MINSIGSTKSZ);
4168 	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4169 		err = -EFAULT;
4170 	return err;
4171 }
4172 
4173 int restore_altstack(const stack_t __user *uss)
4174 {
4175 	stack_t new;
4176 	if (copy_from_user(&new, uss, sizeof(stack_t)))
4177 		return -EFAULT;
4178 	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4179 			     MINSIGSTKSZ);
4180 	/* squash all but EFAULT for now */
4181 	return 0;
4182 }
4183 
4184 int __save_altstack(stack_t __user *uss, unsigned long sp)
4185 {
4186 	struct task_struct *t = current;
4187 	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4188 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4189 		__put_user(t->sas_ss_size, &uss->ss_size);
4190 	if (err)
4191 		return err;
4192 	if (t->sas_ss_flags & SS_AUTODISARM)
4193 		sas_ss_reset(t);
4194 	return 0;
4195 }
4196 
4197 #ifdef CONFIG_COMPAT
4198 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4199 				 compat_stack_t __user *uoss_ptr)
4200 {
4201 	stack_t uss, uoss;
4202 	int ret;
4203 
4204 	if (uss_ptr) {
4205 		compat_stack_t uss32;
4206 		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4207 			return -EFAULT;
4208 		uss.ss_sp = compat_ptr(uss32.ss_sp);
4209 		uss.ss_flags = uss32.ss_flags;
4210 		uss.ss_size = uss32.ss_size;
4211 	}
4212 	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4213 			     compat_user_stack_pointer(),
4214 			     COMPAT_MINSIGSTKSZ);
4215 	if (ret >= 0 && uoss_ptr)  {
4216 		compat_stack_t old;
4217 		memset(&old, 0, sizeof(old));
4218 		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4219 		old.ss_flags = uoss.ss_flags;
4220 		old.ss_size = uoss.ss_size;
4221 		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4222 			ret = -EFAULT;
4223 	}
4224 	return ret;
4225 }
4226 
4227 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4228 			const compat_stack_t __user *, uss_ptr,
4229 			compat_stack_t __user *, uoss_ptr)
4230 {
4231 	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4232 }
4233 
4234 int compat_restore_altstack(const compat_stack_t __user *uss)
4235 {
4236 	int err = do_compat_sigaltstack(uss, NULL);
4237 	/* squash all but -EFAULT for now */
4238 	return err == -EFAULT ? err : 0;
4239 }
4240 
4241 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4242 {
4243 	int err;
4244 	struct task_struct *t = current;
4245 	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4246 			 &uss->ss_sp) |
4247 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4248 		__put_user(t->sas_ss_size, &uss->ss_size);
4249 	if (err)
4250 		return err;
4251 	if (t->sas_ss_flags & SS_AUTODISARM)
4252 		sas_ss_reset(t);
4253 	return 0;
4254 }
4255 #endif
4256 
4257 #ifdef __ARCH_WANT_SYS_SIGPENDING
4258 
4259 /**
4260  *  sys_sigpending - examine pending signals
4261  *  @uset: where mask of pending signal is returned
4262  */
4263 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4264 {
4265 	sigset_t set;
4266 
4267 	if (sizeof(old_sigset_t) > sizeof(*uset))
4268 		return -EINVAL;
4269 
4270 	do_sigpending(&set);
4271 
4272 	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4273 		return -EFAULT;
4274 
4275 	return 0;
4276 }
4277 
4278 #ifdef CONFIG_COMPAT
4279 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4280 {
4281 	sigset_t set;
4282 
4283 	do_sigpending(&set);
4284 
4285 	return put_user(set.sig[0], set32);
4286 }
4287 #endif
4288 
4289 #endif
4290 
4291 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4292 /**
4293  *  sys_sigprocmask - examine and change blocked signals
4294  *  @how: whether to add, remove, or set signals
4295  *  @nset: signals to add or remove (if non-null)
4296  *  @oset: previous value of signal mask if non-null
4297  *
4298  * Some platforms have their own version with special arguments;
4299  * others support only sys_rt_sigprocmask.
4300  */
4301 
4302 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4303 		old_sigset_t __user *, oset)
4304 {
4305 	old_sigset_t old_set, new_set;
4306 	sigset_t new_blocked;
4307 
4308 	old_set = current->blocked.sig[0];
4309 
4310 	if (nset) {
4311 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4312 			return -EFAULT;
4313 
4314 		new_blocked = current->blocked;
4315 
4316 		switch (how) {
4317 		case SIG_BLOCK:
4318 			sigaddsetmask(&new_blocked, new_set);
4319 			break;
4320 		case SIG_UNBLOCK:
4321 			sigdelsetmask(&new_blocked, new_set);
4322 			break;
4323 		case SIG_SETMASK:
4324 			new_blocked.sig[0] = new_set;
4325 			break;
4326 		default:
4327 			return -EINVAL;
4328 		}
4329 
4330 		set_current_blocked(&new_blocked);
4331 	}
4332 
4333 	if (oset) {
4334 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4335 			return -EFAULT;
4336 	}
4337 
4338 	return 0;
4339 }
4340 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4341 
4342 #ifndef CONFIG_ODD_RT_SIGACTION
4343 /**
4344  *  sys_rt_sigaction - alter an action taken by a process
4345  *  @sig: signal to be sent
4346  *  @act: new sigaction
4347  *  @oact: used to save the previous sigaction
4348  *  @sigsetsize: size of sigset_t type
4349  */
4350 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4351 		const struct sigaction __user *, act,
4352 		struct sigaction __user *, oact,
4353 		size_t, sigsetsize)
4354 {
4355 	struct k_sigaction new_sa, old_sa;
4356 	int ret;
4357 
4358 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4359 	if (sigsetsize != sizeof(sigset_t))
4360 		return -EINVAL;
4361 
4362 	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4363 		return -EFAULT;
4364 
4365 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4366 	if (ret)
4367 		return ret;
4368 
4369 	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4370 		return -EFAULT;
4371 
4372 	return 0;
4373 }
4374 #ifdef CONFIG_COMPAT
4375 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4376 		const struct compat_sigaction __user *, act,
4377 		struct compat_sigaction __user *, oact,
4378 		compat_size_t, sigsetsize)
4379 {
4380 	struct k_sigaction new_ka, old_ka;
4381 #ifdef __ARCH_HAS_SA_RESTORER
4382 	compat_uptr_t restorer;
4383 #endif
4384 	int ret;
4385 
4386 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4387 	if (sigsetsize != sizeof(compat_sigset_t))
4388 		return -EINVAL;
4389 
4390 	if (act) {
4391 		compat_uptr_t handler;
4392 		ret = get_user(handler, &act->sa_handler);
4393 		new_ka.sa.sa_handler = compat_ptr(handler);
4394 #ifdef __ARCH_HAS_SA_RESTORER
4395 		ret |= get_user(restorer, &act->sa_restorer);
4396 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4397 #endif
4398 		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4399 		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4400 		if (ret)
4401 			return -EFAULT;
4402 	}
4403 
4404 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4405 	if (!ret && oact) {
4406 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4407 			       &oact->sa_handler);
4408 		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4409 					 sizeof(oact->sa_mask));
4410 		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4411 #ifdef __ARCH_HAS_SA_RESTORER
4412 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4413 				&oact->sa_restorer);
4414 #endif
4415 	}
4416 	return ret;
4417 }
4418 #endif
4419 #endif /* !CONFIG_ODD_RT_SIGACTION */
4420 
4421 #ifdef CONFIG_OLD_SIGACTION
4422 SYSCALL_DEFINE3(sigaction, int, sig,
4423 		const struct old_sigaction __user *, act,
4424 	        struct old_sigaction __user *, oact)
4425 {
4426 	struct k_sigaction new_ka, old_ka;
4427 	int ret;
4428 
4429 	if (act) {
4430 		old_sigset_t mask;
4431 		if (!access_ok(act, sizeof(*act)) ||
4432 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4433 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4434 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4435 		    __get_user(mask, &act->sa_mask))
4436 			return -EFAULT;
4437 #ifdef __ARCH_HAS_KA_RESTORER
4438 		new_ka.ka_restorer = NULL;
4439 #endif
4440 		siginitset(&new_ka.sa.sa_mask, mask);
4441 	}
4442 
4443 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4444 
4445 	if (!ret && oact) {
4446 		if (!access_ok(oact, sizeof(*oact)) ||
4447 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4448 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4449 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4450 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4451 			return -EFAULT;
4452 	}
4453 
4454 	return ret;
4455 }
4456 #endif
4457 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4458 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4459 		const struct compat_old_sigaction __user *, act,
4460 	        struct compat_old_sigaction __user *, oact)
4461 {
4462 	struct k_sigaction new_ka, old_ka;
4463 	int ret;
4464 	compat_old_sigset_t mask;
4465 	compat_uptr_t handler, restorer;
4466 
4467 	if (act) {
4468 		if (!access_ok(act, sizeof(*act)) ||
4469 		    __get_user(handler, &act->sa_handler) ||
4470 		    __get_user(restorer, &act->sa_restorer) ||
4471 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4472 		    __get_user(mask, &act->sa_mask))
4473 			return -EFAULT;
4474 
4475 #ifdef __ARCH_HAS_KA_RESTORER
4476 		new_ka.ka_restorer = NULL;
4477 #endif
4478 		new_ka.sa.sa_handler = compat_ptr(handler);
4479 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4480 		siginitset(&new_ka.sa.sa_mask, mask);
4481 	}
4482 
4483 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4484 
4485 	if (!ret && oact) {
4486 		if (!access_ok(oact, sizeof(*oact)) ||
4487 		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4488 			       &oact->sa_handler) ||
4489 		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4490 			       &oact->sa_restorer) ||
4491 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4492 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4493 			return -EFAULT;
4494 	}
4495 	return ret;
4496 }
4497 #endif
4498 
4499 #ifdef CONFIG_SGETMASK_SYSCALL
4500 
4501 /*
4502  * For backwards compatibility.  Functionality superseded by sigprocmask.
4503  */
4504 SYSCALL_DEFINE0(sgetmask)
4505 {
4506 	/* SMP safe */
4507 	return current->blocked.sig[0];
4508 }
4509 
4510 SYSCALL_DEFINE1(ssetmask, int, newmask)
4511 {
4512 	int old = current->blocked.sig[0];
4513 	sigset_t newset;
4514 
4515 	siginitset(&newset, newmask);
4516 	set_current_blocked(&newset);
4517 
4518 	return old;
4519 }
4520 #endif /* CONFIG_SGETMASK_SYSCALL */
4521 
4522 #ifdef __ARCH_WANT_SYS_SIGNAL
4523 /*
4524  * For backwards compatibility.  Functionality superseded by sigaction.
4525  */
4526 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4527 {
4528 	struct k_sigaction new_sa, old_sa;
4529 	int ret;
4530 
4531 	new_sa.sa.sa_handler = handler;
4532 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4533 	sigemptyset(&new_sa.sa.sa_mask);
4534 
4535 	ret = do_sigaction(sig, &new_sa, &old_sa);
4536 
4537 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4538 }
4539 #endif /* __ARCH_WANT_SYS_SIGNAL */
4540 
4541 #ifdef __ARCH_WANT_SYS_PAUSE
4542 
4543 SYSCALL_DEFINE0(pause)
4544 {
4545 	while (!signal_pending(current)) {
4546 		__set_current_state(TASK_INTERRUPTIBLE);
4547 		schedule();
4548 	}
4549 	return -ERESTARTNOHAND;
4550 }
4551 
4552 #endif
4553 
4554 static int sigsuspend(sigset_t *set)
4555 {
4556 	current->saved_sigmask = current->blocked;
4557 	set_current_blocked(set);
4558 
4559 	while (!signal_pending(current)) {
4560 		__set_current_state(TASK_INTERRUPTIBLE);
4561 		schedule();
4562 	}
4563 	set_restore_sigmask();
4564 	return -ERESTARTNOHAND;
4565 }
4566 
4567 /**
4568  *  sys_rt_sigsuspend - replace the signal mask for a value with the
4569  *	@unewset value until a signal is received
4570  *  @unewset: new signal mask value
4571  *  @sigsetsize: size of sigset_t type
4572  */
4573 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4574 {
4575 	sigset_t newset;
4576 
4577 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4578 	if (sigsetsize != sizeof(sigset_t))
4579 		return -EINVAL;
4580 
4581 	if (copy_from_user(&newset, unewset, sizeof(newset)))
4582 		return -EFAULT;
4583 	return sigsuspend(&newset);
4584 }
4585 
4586 #ifdef CONFIG_COMPAT
4587 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4588 {
4589 	sigset_t newset;
4590 
4591 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4592 	if (sigsetsize != sizeof(sigset_t))
4593 		return -EINVAL;
4594 
4595 	if (get_compat_sigset(&newset, unewset))
4596 		return -EFAULT;
4597 	return sigsuspend(&newset);
4598 }
4599 #endif
4600 
4601 #ifdef CONFIG_OLD_SIGSUSPEND
4602 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4603 {
4604 	sigset_t blocked;
4605 	siginitset(&blocked, mask);
4606 	return sigsuspend(&blocked);
4607 }
4608 #endif
4609 #ifdef CONFIG_OLD_SIGSUSPEND3
4610 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4611 {
4612 	sigset_t blocked;
4613 	siginitset(&blocked, mask);
4614 	return sigsuspend(&blocked);
4615 }
4616 #endif
4617 
4618 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4619 {
4620 	return NULL;
4621 }
4622 
4623 static inline void siginfo_buildtime_checks(void)
4624 {
4625 	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4626 
4627 	/* Verify the offsets in the two siginfos match */
4628 #define CHECK_OFFSET(field) \
4629 	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4630 
4631 	/* kill */
4632 	CHECK_OFFSET(si_pid);
4633 	CHECK_OFFSET(si_uid);
4634 
4635 	/* timer */
4636 	CHECK_OFFSET(si_tid);
4637 	CHECK_OFFSET(si_overrun);
4638 	CHECK_OFFSET(si_value);
4639 
4640 	/* rt */
4641 	CHECK_OFFSET(si_pid);
4642 	CHECK_OFFSET(si_uid);
4643 	CHECK_OFFSET(si_value);
4644 
4645 	/* sigchld */
4646 	CHECK_OFFSET(si_pid);
4647 	CHECK_OFFSET(si_uid);
4648 	CHECK_OFFSET(si_status);
4649 	CHECK_OFFSET(si_utime);
4650 	CHECK_OFFSET(si_stime);
4651 
4652 	/* sigfault */
4653 	CHECK_OFFSET(si_addr);
4654 	CHECK_OFFSET(si_trapno);
4655 	CHECK_OFFSET(si_addr_lsb);
4656 	CHECK_OFFSET(si_lower);
4657 	CHECK_OFFSET(si_upper);
4658 	CHECK_OFFSET(si_pkey);
4659 	CHECK_OFFSET(si_perf_data);
4660 	CHECK_OFFSET(si_perf_type);
4661 
4662 	/* sigpoll */
4663 	CHECK_OFFSET(si_band);
4664 	CHECK_OFFSET(si_fd);
4665 
4666 	/* sigsys */
4667 	CHECK_OFFSET(si_call_addr);
4668 	CHECK_OFFSET(si_syscall);
4669 	CHECK_OFFSET(si_arch);
4670 #undef CHECK_OFFSET
4671 
4672 	/* usb asyncio */
4673 	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4674 		     offsetof(struct siginfo, si_addr));
4675 	if (sizeof(int) == sizeof(void __user *)) {
4676 		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4677 			     sizeof(void __user *));
4678 	} else {
4679 		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4680 			      sizeof_field(struct siginfo, si_uid)) !=
4681 			     sizeof(void __user *));
4682 		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4683 			     offsetof(struct siginfo, si_uid));
4684 	}
4685 #ifdef CONFIG_COMPAT
4686 	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4687 		     offsetof(struct compat_siginfo, si_addr));
4688 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4689 		     sizeof(compat_uptr_t));
4690 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4691 		     sizeof_field(struct siginfo, si_pid));
4692 #endif
4693 }
4694 
4695 void __init signals_init(void)
4696 {
4697 	siginfo_buildtime_checks();
4698 
4699 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4700 }
4701 
4702 #ifdef CONFIG_KGDB_KDB
4703 #include <linux/kdb.h>
4704 /*
4705  * kdb_send_sig - Allows kdb to send signals without exposing
4706  * signal internals.  This function checks if the required locks are
4707  * available before calling the main signal code, to avoid kdb
4708  * deadlocks.
4709  */
4710 void kdb_send_sig(struct task_struct *t, int sig)
4711 {
4712 	static struct task_struct *kdb_prev_t;
4713 	int new_t, ret;
4714 	if (!spin_trylock(&t->sighand->siglock)) {
4715 		kdb_printf("Can't do kill command now.\n"
4716 			   "The sigmask lock is held somewhere else in "
4717 			   "kernel, try again later\n");
4718 		return;
4719 	}
4720 	new_t = kdb_prev_t != t;
4721 	kdb_prev_t = t;
4722 	if (t->state != TASK_RUNNING && new_t) {
4723 		spin_unlock(&t->sighand->siglock);
4724 		kdb_printf("Process is not RUNNING, sending a signal from "
4725 			   "kdb risks deadlock\n"
4726 			   "on the run queue locks. "
4727 			   "The signal has _not_ been sent.\n"
4728 			   "Reissue the kill command if you want to risk "
4729 			   "the deadlock.\n");
4730 		return;
4731 	}
4732 	ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4733 	spin_unlock(&t->sighand->siglock);
4734 	if (ret)
4735 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
4736 			   sig, t->pid);
4737 	else
4738 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4739 }
4740 #endif	/* CONFIG_KGDB_KDB */
4741