xref: /linux/kernel/signal.c (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/signal.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
8  *
9  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
10  *		Changes to use preallocated sigqueue structures
11  *		to allow signals to be sent reliably.
12  */
13 
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/mm.h>
26 #include <linux/proc_fs.h>
27 #include <linux/tty.h>
28 #include <linux/binfmts.h>
29 #include <linux/coredump.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32 #include <linux/ptrace.h>
33 #include <linux/signal.h>
34 #include <linux/signalfd.h>
35 #include <linux/ratelimit.h>
36 #include <linux/task_work.h>
37 #include <linux/capability.h>
38 #include <linux/freezer.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/nsproxy.h>
41 #include <linux/user_namespace.h>
42 #include <linux/uprobes.h>
43 #include <linux/compat.h>
44 #include <linux/cn_proc.h>
45 #include <linux/compiler.h>
46 #include <linux/posix-timers.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
49 #include <linux/sysctl.h>
50 #include <uapi/linux/pidfd.h>
51 
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/signal.h>
54 
55 #include <asm/param.h>
56 #include <linux/uaccess.h>
57 #include <asm/unistd.h>
58 #include <asm/siginfo.h>
59 #include <asm/cacheflush.h>
60 #include <asm/syscall.h>	/* for syscall_get_* */
61 
62 #include "time/posix-timers.h"
63 
64 /*
65  * SLAB caches for signal bits.
66  */
67 
68 static struct kmem_cache *sigqueue_cachep;
69 
70 int print_fatal_signals __read_mostly;
71 
72 static void __user *sig_handler(struct task_struct *t, int sig)
73 {
74 	return t->sighand->action[sig - 1].sa.sa_handler;
75 }
76 
77 static inline bool sig_handler_ignored(void __user *handler, int sig)
78 {
79 	/* Is it explicitly or implicitly ignored? */
80 	return handler == SIG_IGN ||
81 	       (handler == SIG_DFL && sig_kernel_ignore(sig));
82 }
83 
84 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
85 {
86 	void __user *handler;
87 
88 	handler = sig_handler(t, sig);
89 
90 	/* SIGKILL and SIGSTOP may not be sent to the global init */
91 	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
92 		return true;
93 
94 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
95 	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
96 		return true;
97 
98 	/* Only allow kernel generated signals to this kthread */
99 	if (unlikely((t->flags & PF_KTHREAD) &&
100 		     (handler == SIG_KTHREAD_KERNEL) && !force))
101 		return true;
102 
103 	return sig_handler_ignored(handler, sig);
104 }
105 
106 static bool sig_ignored(struct task_struct *t, int sig, bool force)
107 {
108 	/*
109 	 * Blocked signals are never ignored, since the
110 	 * signal handler may change by the time it is
111 	 * unblocked.
112 	 */
113 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
114 		return false;
115 
116 	/*
117 	 * Tracers may want to know about even ignored signal unless it
118 	 * is SIGKILL which can't be reported anyway but can be ignored
119 	 * by SIGNAL_UNKILLABLE task.
120 	 */
121 	if (t->ptrace && sig != SIGKILL)
122 		return false;
123 
124 	return sig_task_ignored(t, sig, force);
125 }
126 
127 /*
128  * Re-calculate pending state from the set of locally pending
129  * signals, globally pending signals, and blocked signals.
130  */
131 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
132 {
133 	unsigned long ready;
134 	long i;
135 
136 	switch (_NSIG_WORDS) {
137 	default:
138 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
139 			ready |= signal->sig[i] &~ blocked->sig[i];
140 		break;
141 
142 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
143 		ready |= signal->sig[2] &~ blocked->sig[2];
144 		ready |= signal->sig[1] &~ blocked->sig[1];
145 		ready |= signal->sig[0] &~ blocked->sig[0];
146 		break;
147 
148 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
149 		ready |= signal->sig[0] &~ blocked->sig[0];
150 		break;
151 
152 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
153 	}
154 	return ready !=	0;
155 }
156 
157 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
158 
159 static bool recalc_sigpending_tsk(struct task_struct *t)
160 {
161 	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
162 	    PENDING(&t->pending, &t->blocked) ||
163 	    PENDING(&t->signal->shared_pending, &t->blocked) ||
164 	    cgroup_task_frozen(t)) {
165 		set_tsk_thread_flag(t, TIF_SIGPENDING);
166 		return true;
167 	}
168 
169 	/*
170 	 * We must never clear the flag in another thread, or in current
171 	 * when it's possible the current syscall is returning -ERESTART*.
172 	 * So we don't clear it here, and only callers who know they should do.
173 	 */
174 	return false;
175 }
176 
177 void recalc_sigpending(void)
178 {
179 	if (!recalc_sigpending_tsk(current) && !freezing(current)) {
180 		if (unlikely(test_thread_flag(TIF_SIGPENDING)))
181 			clear_thread_flag(TIF_SIGPENDING);
182 	}
183 }
184 EXPORT_SYMBOL(recalc_sigpending);
185 
186 void calculate_sigpending(void)
187 {
188 	/* Have any signals or users of TIF_SIGPENDING been delayed
189 	 * until after fork?
190 	 */
191 	spin_lock_irq(&current->sighand->siglock);
192 	set_tsk_thread_flag(current, TIF_SIGPENDING);
193 	recalc_sigpending();
194 	spin_unlock_irq(&current->sighand->siglock);
195 }
196 
197 /* Given the mask, find the first available signal that should be serviced. */
198 
199 #define SYNCHRONOUS_MASK \
200 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
201 	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
202 
203 int next_signal(struct sigpending *pending, sigset_t *mask)
204 {
205 	unsigned long i, *s, *m, x;
206 	int sig = 0;
207 
208 	s = pending->signal.sig;
209 	m = mask->sig;
210 
211 	/*
212 	 * Handle the first word specially: it contains the
213 	 * synchronous signals that need to be dequeued first.
214 	 */
215 	x = *s &~ *m;
216 	if (x) {
217 		if (x & SYNCHRONOUS_MASK)
218 			x &= SYNCHRONOUS_MASK;
219 		sig = ffz(~x) + 1;
220 		return sig;
221 	}
222 
223 	switch (_NSIG_WORDS) {
224 	default:
225 		for (i = 1; i < _NSIG_WORDS; ++i) {
226 			x = *++s &~ *++m;
227 			if (!x)
228 				continue;
229 			sig = ffz(~x) + i*_NSIG_BPW + 1;
230 			break;
231 		}
232 		break;
233 
234 	case 2:
235 		x = s[1] &~ m[1];
236 		if (!x)
237 			break;
238 		sig = ffz(~x) + _NSIG_BPW + 1;
239 		break;
240 
241 	case 1:
242 		/* Nothing to do */
243 		break;
244 	}
245 
246 	return sig;
247 }
248 
249 static inline void print_dropped_signal(int sig)
250 {
251 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
252 
253 	if (!print_fatal_signals)
254 		return;
255 
256 	if (!__ratelimit(&ratelimit_state))
257 		return;
258 
259 	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
260 				current->comm, current->pid, sig);
261 }
262 
263 /**
264  * task_set_jobctl_pending - set jobctl pending bits
265  * @task: target task
266  * @mask: pending bits to set
267  *
268  * Clear @mask from @task->jobctl.  @mask must be subset of
269  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
270  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
271  * cleared.  If @task is already being killed or exiting, this function
272  * becomes noop.
273  *
274  * CONTEXT:
275  * Must be called with @task->sighand->siglock held.
276  *
277  * RETURNS:
278  * %true if @mask is set, %false if made noop because @task was dying.
279  */
280 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
281 {
282 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
283 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
284 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
285 
286 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
287 		return false;
288 
289 	if (mask & JOBCTL_STOP_SIGMASK)
290 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
291 
292 	task->jobctl |= mask;
293 	return true;
294 }
295 
296 /**
297  * task_clear_jobctl_trapping - clear jobctl trapping bit
298  * @task: target task
299  *
300  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
301  * Clear it and wake up the ptracer.  Note that we don't need any further
302  * locking.  @task->siglock guarantees that @task->parent points to the
303  * ptracer.
304  *
305  * CONTEXT:
306  * Must be called with @task->sighand->siglock held.
307  */
308 void task_clear_jobctl_trapping(struct task_struct *task)
309 {
310 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
311 		task->jobctl &= ~JOBCTL_TRAPPING;
312 		smp_mb();	/* advised by wake_up_bit() */
313 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
314 	}
315 }
316 
317 /**
318  * task_clear_jobctl_pending - clear jobctl pending bits
319  * @task: target task
320  * @mask: pending bits to clear
321  *
322  * Clear @mask from @task->jobctl.  @mask must be subset of
323  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
324  * STOP bits are cleared together.
325  *
326  * If clearing of @mask leaves no stop or trap pending, this function calls
327  * task_clear_jobctl_trapping().
328  *
329  * CONTEXT:
330  * Must be called with @task->sighand->siglock held.
331  */
332 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
333 {
334 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
335 
336 	if (mask & JOBCTL_STOP_PENDING)
337 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
338 
339 	task->jobctl &= ~mask;
340 
341 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
342 		task_clear_jobctl_trapping(task);
343 }
344 
345 /**
346  * task_participate_group_stop - participate in a group stop
347  * @task: task participating in a group stop
348  *
349  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
350  * Group stop states are cleared and the group stop count is consumed if
351  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
352  * stop, the appropriate `SIGNAL_*` flags are set.
353  *
354  * CONTEXT:
355  * Must be called with @task->sighand->siglock held.
356  *
357  * RETURNS:
358  * %true if group stop completion should be notified to the parent, %false
359  * otherwise.
360  */
361 static bool task_participate_group_stop(struct task_struct *task)
362 {
363 	struct signal_struct *sig = task->signal;
364 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
365 
366 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
367 
368 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
369 
370 	if (!consume)
371 		return false;
372 
373 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
374 		sig->group_stop_count--;
375 
376 	/*
377 	 * Tell the caller to notify completion iff we are entering into a
378 	 * fresh group stop.  Read comment in do_signal_stop() for details.
379 	 */
380 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
381 		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
382 		return true;
383 	}
384 	return false;
385 }
386 
387 void task_join_group_stop(struct task_struct *task)
388 {
389 	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
390 	struct signal_struct *sig = current->signal;
391 
392 	if (sig->group_stop_count) {
393 		sig->group_stop_count++;
394 		mask |= JOBCTL_STOP_CONSUME;
395 	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
396 		return;
397 
398 	/* Have the new thread join an on-going signal group stop */
399 	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
400 }
401 
402 static struct ucounts *sig_get_ucounts(struct task_struct *t, int sig,
403 				       int override_rlimit)
404 {
405 	struct ucounts *ucounts;
406 	long sigpending;
407 
408 	/*
409 	 * Protect access to @t credentials. This can go away when all
410 	 * callers hold rcu read lock.
411 	 *
412 	 * NOTE! A pending signal will hold on to the user refcount,
413 	 * and we get/put the refcount only when the sigpending count
414 	 * changes from/to zero.
415 	 */
416 	rcu_read_lock();
417 	ucounts = task_ucounts(t);
418 	sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING,
419 					    override_rlimit);
420 	rcu_read_unlock();
421 	if (!sigpending)
422 		return NULL;
423 
424 	if (unlikely(!override_rlimit && sigpending > task_rlimit(t, RLIMIT_SIGPENDING))) {
425 		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
426 		print_dropped_signal(sig);
427 		return NULL;
428 	}
429 
430 	return ucounts;
431 }
432 
433 static void __sigqueue_init(struct sigqueue *q, struct ucounts *ucounts,
434 			    const unsigned int sigqueue_flags)
435 {
436 	INIT_LIST_HEAD(&q->list);
437 	q->flags = sigqueue_flags;
438 	q->ucounts = ucounts;
439 }
440 
441 /*
442  * allocate a new signal queue record
443  * - this may be called without locks if and only if t == current, otherwise an
444  *   appropriate lock must be held to stop the target task from exiting
445  */
446 static struct sigqueue *sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
447 				       int override_rlimit)
448 {
449 	struct ucounts *ucounts = sig_get_ucounts(t, sig, override_rlimit);
450 	struct sigqueue *q;
451 
452 	if (!ucounts)
453 		return NULL;
454 
455 	q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
456 	if (!q) {
457 		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
458 		return NULL;
459 	}
460 
461 	__sigqueue_init(q, ucounts, 0);
462 	return q;
463 }
464 
465 static void __sigqueue_free(struct sigqueue *q)
466 {
467 	if (q->flags & SIGQUEUE_PREALLOC) {
468 		posixtimer_sigqueue_putref(q);
469 		return;
470 	}
471 	if (q->ucounts) {
472 		dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
473 		q->ucounts = NULL;
474 	}
475 	kmem_cache_free(sigqueue_cachep, q);
476 }
477 
478 void flush_sigqueue(struct sigpending *queue)
479 {
480 	struct sigqueue *q;
481 
482 	sigemptyset(&queue->signal);
483 	while (!list_empty(&queue->list)) {
484 		q = list_entry(queue->list.next, struct sigqueue , list);
485 		list_del_init(&q->list);
486 		__sigqueue_free(q);
487 	}
488 }
489 
490 /*
491  * Flush all pending signals for this kthread.
492  */
493 void flush_signals(struct task_struct *t)
494 {
495 	unsigned long flags;
496 
497 	spin_lock_irqsave(&t->sighand->siglock, flags);
498 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
499 	flush_sigqueue(&t->pending);
500 	flush_sigqueue(&t->signal->shared_pending);
501 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
502 }
503 EXPORT_SYMBOL(flush_signals);
504 
505 void ignore_signals(struct task_struct *t)
506 {
507 	int i;
508 
509 	for (i = 0; i < _NSIG; ++i)
510 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
511 
512 	flush_signals(t);
513 }
514 
515 /*
516  * Flush all handlers for a task.
517  */
518 
519 void
520 flush_signal_handlers(struct task_struct *t, int force_default)
521 {
522 	int i;
523 	struct k_sigaction *ka = &t->sighand->action[0];
524 	for (i = _NSIG ; i != 0 ; i--) {
525 		if (force_default || ka->sa.sa_handler != SIG_IGN)
526 			ka->sa.sa_handler = SIG_DFL;
527 		ka->sa.sa_flags = 0;
528 #ifdef __ARCH_HAS_SA_RESTORER
529 		ka->sa.sa_restorer = NULL;
530 #endif
531 		sigemptyset(&ka->sa.sa_mask);
532 		ka++;
533 	}
534 }
535 
536 bool unhandled_signal(struct task_struct *tsk, int sig)
537 {
538 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
539 	if (is_global_init(tsk))
540 		return true;
541 
542 	if (handler != SIG_IGN && handler != SIG_DFL)
543 		return false;
544 
545 	/* If dying, we handle all new signals by ignoring them */
546 	if (fatal_signal_pending(tsk))
547 		return false;
548 
549 	/* if ptraced, let the tracer determine */
550 	return !tsk->ptrace;
551 }
552 
553 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
554 			   struct sigqueue **timer_sigq)
555 {
556 	struct sigqueue *q, *first = NULL;
557 
558 	/*
559 	 * Collect the siginfo appropriate to this signal.  Check if
560 	 * there is another siginfo for the same signal.
561 	*/
562 	list_for_each_entry(q, &list->list, list) {
563 		if (q->info.si_signo == sig) {
564 			if (first)
565 				goto still_pending;
566 			first = q;
567 		}
568 	}
569 
570 	sigdelset(&list->signal, sig);
571 
572 	if (first) {
573 still_pending:
574 		list_del_init(&first->list);
575 		copy_siginfo(info, &first->info);
576 
577 		/*
578 		 * posix-timer signals are preallocated and freed when the last
579 		 * reference count is dropped in posixtimer_deliver_signal() or
580 		 * immediately on timer deletion when the signal is not pending.
581 		 * Spare the extra round through __sigqueue_free() which is
582 		 * ignoring preallocated signals.
583 		 */
584 		if (unlikely((first->flags & SIGQUEUE_PREALLOC) && (info->si_code == SI_TIMER)))
585 			*timer_sigq = first;
586 		else
587 			__sigqueue_free(first);
588 	} else {
589 		/*
590 		 * Ok, it wasn't in the queue.  This must be
591 		 * a fast-pathed signal or we must have been
592 		 * out of queue space.  So zero out the info.
593 		 */
594 		clear_siginfo(info);
595 		info->si_signo = sig;
596 		info->si_errno = 0;
597 		info->si_code = SI_USER;
598 		info->si_pid = 0;
599 		info->si_uid = 0;
600 	}
601 }
602 
603 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
604 			    kernel_siginfo_t *info, struct sigqueue **timer_sigq)
605 {
606 	int sig = next_signal(pending, mask);
607 
608 	if (sig)
609 		collect_signal(sig, pending, info, timer_sigq);
610 	return sig;
611 }
612 
613 /*
614  * Try to dequeue a signal. If a deliverable signal is found fill in the
615  * caller provided siginfo and return the signal number. Otherwise return
616  * 0.
617  */
618 int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type)
619 {
620 	struct task_struct *tsk = current;
621 	struct sigqueue *timer_sigq;
622 	int signr;
623 
624 	lockdep_assert_held(&tsk->sighand->siglock);
625 
626 again:
627 	*type = PIDTYPE_PID;
628 	timer_sigq = NULL;
629 	signr = __dequeue_signal(&tsk->pending, mask, info, &timer_sigq);
630 	if (!signr) {
631 		*type = PIDTYPE_TGID;
632 		signr = __dequeue_signal(&tsk->signal->shared_pending,
633 					 mask, info, &timer_sigq);
634 
635 		if (unlikely(signr == SIGALRM))
636 			posixtimer_rearm_itimer(tsk);
637 	}
638 
639 	recalc_sigpending();
640 	if (!signr)
641 		return 0;
642 
643 	if (unlikely(sig_kernel_stop(signr))) {
644 		/*
645 		 * Set a marker that we have dequeued a stop signal.  Our
646 		 * caller might release the siglock and then the pending
647 		 * stop signal it is about to process is no longer in the
648 		 * pending bitmasks, but must still be cleared by a SIGCONT
649 		 * (and overruled by a SIGKILL).  So those cases clear this
650 		 * shared flag after we've set it.  Note that this flag may
651 		 * remain set after the signal we return is ignored or
652 		 * handled.  That doesn't matter because its only purpose
653 		 * is to alert stop-signal processing code when another
654 		 * processor has come along and cleared the flag.
655 		 */
656 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
657 	}
658 
659 	if (IS_ENABLED(CONFIG_POSIX_TIMERS) && unlikely(timer_sigq)) {
660 		if (!posixtimer_deliver_signal(info, timer_sigq))
661 			goto again;
662 	}
663 
664 	return signr;
665 }
666 EXPORT_SYMBOL_GPL(dequeue_signal);
667 
668 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
669 {
670 	struct task_struct *tsk = current;
671 	struct sigpending *pending = &tsk->pending;
672 	struct sigqueue *q, *sync = NULL;
673 
674 	/*
675 	 * Might a synchronous signal be in the queue?
676 	 */
677 	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
678 		return 0;
679 
680 	/*
681 	 * Return the first synchronous signal in the queue.
682 	 */
683 	list_for_each_entry(q, &pending->list, list) {
684 		/* Synchronous signals have a positive si_code */
685 		if ((q->info.si_code > SI_USER) &&
686 		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
687 			sync = q;
688 			goto next;
689 		}
690 	}
691 	return 0;
692 next:
693 	/*
694 	 * Check if there is another siginfo for the same signal.
695 	 */
696 	list_for_each_entry_continue(q, &pending->list, list) {
697 		if (q->info.si_signo == sync->info.si_signo)
698 			goto still_pending;
699 	}
700 
701 	sigdelset(&pending->signal, sync->info.si_signo);
702 	recalc_sigpending();
703 still_pending:
704 	list_del_init(&sync->list);
705 	copy_siginfo(info, &sync->info);
706 	__sigqueue_free(sync);
707 	return info->si_signo;
708 }
709 
710 /*
711  * Tell a process that it has a new active signal..
712  *
713  * NOTE! we rely on the previous spin_lock to
714  * lock interrupts for us! We can only be called with
715  * "siglock" held, and the local interrupt must
716  * have been disabled when that got acquired!
717  *
718  * No need to set need_resched since signal event passing
719  * goes through ->blocked
720  */
721 void signal_wake_up_state(struct task_struct *t, unsigned int state)
722 {
723 	lockdep_assert_held(&t->sighand->siglock);
724 
725 	set_tsk_thread_flag(t, TIF_SIGPENDING);
726 
727 	/*
728 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
729 	 * case. We don't check t->state here because there is a race with it
730 	 * executing another processor and just now entering stopped state.
731 	 * By using wake_up_state, we ensure the process will wake up and
732 	 * handle its death signal.
733 	 */
734 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
735 		kick_process(t);
736 }
737 
738 static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q);
739 
740 static void sigqueue_free_ignored(struct task_struct *tsk, struct sigqueue *q)
741 {
742 	if (likely(!(q->flags & SIGQUEUE_PREALLOC) || q->info.si_code != SI_TIMER))
743 		__sigqueue_free(q);
744 	else
745 		posixtimer_sig_ignore(tsk, q);
746 }
747 
748 /* Remove signals in mask from the pending set and queue. */
749 static void flush_sigqueue_mask(struct task_struct *p, sigset_t *mask, struct sigpending *s)
750 {
751 	struct sigqueue *q, *n;
752 	sigset_t m;
753 
754 	lockdep_assert_held(&p->sighand->siglock);
755 
756 	sigandsets(&m, mask, &s->signal);
757 	if (sigisemptyset(&m))
758 		return;
759 
760 	sigandnsets(&s->signal, &s->signal, mask);
761 	list_for_each_entry_safe(q, n, &s->list, list) {
762 		if (sigismember(mask, q->info.si_signo)) {
763 			list_del_init(&q->list);
764 			sigqueue_free_ignored(p, q);
765 		}
766 	}
767 }
768 
769 static inline int is_si_special(const struct kernel_siginfo *info)
770 {
771 	return info <= SEND_SIG_PRIV;
772 }
773 
774 static inline bool si_fromuser(const struct kernel_siginfo *info)
775 {
776 	return info == SEND_SIG_NOINFO ||
777 		(!is_si_special(info) && SI_FROMUSER(info));
778 }
779 
780 /*
781  * called with RCU read lock from check_kill_permission()
782  */
783 static bool kill_ok_by_cred(struct task_struct *t)
784 {
785 	const struct cred *cred = current_cred();
786 	const struct cred *tcred = __task_cred(t);
787 
788 	return uid_eq(cred->euid, tcred->suid) ||
789 	       uid_eq(cred->euid, tcred->uid) ||
790 	       uid_eq(cred->uid, tcred->suid) ||
791 	       uid_eq(cred->uid, tcred->uid) ||
792 	       ns_capable(tcred->user_ns, CAP_KILL);
793 }
794 
795 /*
796  * Bad permissions for sending the signal
797  * - the caller must hold the RCU read lock
798  */
799 static int check_kill_permission(int sig, struct kernel_siginfo *info,
800 				 struct task_struct *t)
801 {
802 	struct pid *sid;
803 	int error;
804 
805 	if (!valid_signal(sig))
806 		return -EINVAL;
807 
808 	if (!si_fromuser(info))
809 		return 0;
810 
811 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
812 	if (error)
813 		return error;
814 
815 	if (!same_thread_group(current, t) &&
816 	    !kill_ok_by_cred(t)) {
817 		switch (sig) {
818 		case SIGCONT:
819 			sid = task_session(t);
820 			/*
821 			 * We don't return the error if sid == NULL. The
822 			 * task was unhashed, the caller must notice this.
823 			 */
824 			if (!sid || sid == task_session(current))
825 				break;
826 			fallthrough;
827 		default:
828 			return -EPERM;
829 		}
830 	}
831 
832 	return security_task_kill(t, info, sig, NULL);
833 }
834 
835 /**
836  * ptrace_trap_notify - schedule trap to notify ptracer
837  * @t: tracee wanting to notify tracer
838  *
839  * This function schedules sticky ptrace trap which is cleared on the next
840  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
841  * ptracer.
842  *
843  * If @t is running, STOP trap will be taken.  If trapped for STOP and
844  * ptracer is listening for events, tracee is woken up so that it can
845  * re-trap for the new event.  If trapped otherwise, STOP trap will be
846  * eventually taken without returning to userland after the existing traps
847  * are finished by PTRACE_CONT.
848  *
849  * CONTEXT:
850  * Must be called with @task->sighand->siglock held.
851  */
852 static void ptrace_trap_notify(struct task_struct *t)
853 {
854 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
855 	lockdep_assert_held(&t->sighand->siglock);
856 
857 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
858 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
859 }
860 
861 /*
862  * Handle magic process-wide effects of stop/continue signals. Unlike
863  * the signal actions, these happen immediately at signal-generation
864  * time regardless of blocking, ignoring, or handling.  This does the
865  * actual continuing for SIGCONT, but not the actual stopping for stop
866  * signals. The process stop is done as a signal action for SIG_DFL.
867  *
868  * Returns true if the signal should be actually delivered, otherwise
869  * it should be dropped.
870  */
871 static bool prepare_signal(int sig, struct task_struct *p, bool force)
872 {
873 	struct signal_struct *signal = p->signal;
874 	struct task_struct *t;
875 	sigset_t flush;
876 
877 	if (signal->flags & SIGNAL_GROUP_EXIT) {
878 		if (signal->core_state)
879 			return sig == SIGKILL;
880 		/*
881 		 * The process is in the middle of dying, drop the signal.
882 		 */
883 		return false;
884 	} else if (sig_kernel_stop(sig)) {
885 		/*
886 		 * This is a stop signal.  Remove SIGCONT from all queues.
887 		 */
888 		siginitset(&flush, sigmask(SIGCONT));
889 		flush_sigqueue_mask(p, &flush, &signal->shared_pending);
890 		for_each_thread(p, t)
891 			flush_sigqueue_mask(p, &flush, &t->pending);
892 	} else if (sig == SIGCONT) {
893 		unsigned int why;
894 		/*
895 		 * Remove all stop signals from all queues, wake all threads.
896 		 */
897 		siginitset(&flush, SIG_KERNEL_STOP_MASK);
898 		flush_sigqueue_mask(p, &flush, &signal->shared_pending);
899 		for_each_thread(p, t) {
900 			flush_sigqueue_mask(p, &flush, &t->pending);
901 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
902 			if (likely(!(t->ptrace & PT_SEIZED))) {
903 				t->jobctl &= ~JOBCTL_STOPPED;
904 				wake_up_state(t, __TASK_STOPPED);
905 			} else
906 				ptrace_trap_notify(t);
907 		}
908 
909 		/*
910 		 * Notify the parent with CLD_CONTINUED if we were stopped.
911 		 *
912 		 * If we were in the middle of a group stop, we pretend it
913 		 * was already finished, and then continued. Since SIGCHLD
914 		 * doesn't queue we report only CLD_STOPPED, as if the next
915 		 * CLD_CONTINUED was dropped.
916 		 */
917 		why = 0;
918 		if (signal->flags & SIGNAL_STOP_STOPPED)
919 			why |= SIGNAL_CLD_CONTINUED;
920 		else if (signal->group_stop_count)
921 			why |= SIGNAL_CLD_STOPPED;
922 
923 		if (why) {
924 			/*
925 			 * The first thread which returns from do_signal_stop()
926 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
927 			 * notify its parent. See get_signal().
928 			 */
929 			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
930 			signal->group_stop_count = 0;
931 			signal->group_exit_code = 0;
932 		}
933 	}
934 
935 	return !sig_ignored(p, sig, force);
936 }
937 
938 /*
939  * Test if P wants to take SIG.  After we've checked all threads with this,
940  * it's equivalent to finding no threads not blocking SIG.  Any threads not
941  * blocking SIG were ruled out because they are not running and already
942  * have pending signals.  Such threads will dequeue from the shared queue
943  * as soon as they're available, so putting the signal on the shared queue
944  * will be equivalent to sending it to one such thread.
945  */
946 static inline bool wants_signal(int sig, struct task_struct *p)
947 {
948 	if (sigismember(&p->blocked, sig))
949 		return false;
950 
951 	if (p->flags & PF_EXITING)
952 		return false;
953 
954 	if (sig == SIGKILL)
955 		return true;
956 
957 	if (task_is_stopped_or_traced(p))
958 		return false;
959 
960 	return task_curr(p) || !task_sigpending(p);
961 }
962 
963 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
964 {
965 	struct signal_struct *signal = p->signal;
966 	struct task_struct *t;
967 
968 	/*
969 	 * Now find a thread we can wake up to take the signal off the queue.
970 	 *
971 	 * Try the suggested task first (may or may not be the main thread).
972 	 */
973 	if (wants_signal(sig, p))
974 		t = p;
975 	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
976 		/*
977 		 * There is just one thread and it does not need to be woken.
978 		 * It will dequeue unblocked signals before it runs again.
979 		 */
980 		return;
981 	else {
982 		/*
983 		 * Otherwise try to find a suitable thread.
984 		 */
985 		t = signal->curr_target;
986 		while (!wants_signal(sig, t)) {
987 			t = next_thread(t);
988 			if (t == signal->curr_target)
989 				/*
990 				 * No thread needs to be woken.
991 				 * Any eligible threads will see
992 				 * the signal in the queue soon.
993 				 */
994 				return;
995 		}
996 		signal->curr_target = t;
997 	}
998 
999 	/*
1000 	 * Found a killable thread.  If the signal will be fatal,
1001 	 * then start taking the whole group down immediately.
1002 	 */
1003 	if (sig_fatal(p, sig) && !sigismember(&t->real_blocked, sig) &&
1004 	    (sig == SIGKILL || !p->ptrace)) {
1005 		/*
1006 		 * This signal will be fatal to the whole group.
1007 		 */
1008 		if (!sig_kernel_coredump(sig)) {
1009 			/*
1010 			 * Start a group exit and wake everybody up.
1011 			 * This way we don't have other threads
1012 			 * running and doing things after a slower
1013 			 * thread has the fatal signal pending.
1014 			 */
1015 			signal->flags = SIGNAL_GROUP_EXIT;
1016 			signal->group_exit_code = sig;
1017 			signal->group_stop_count = 0;
1018 			__for_each_thread(signal, t) {
1019 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1020 				sigaddset(&t->pending.signal, SIGKILL);
1021 				signal_wake_up(t, 1);
1022 			}
1023 			return;
1024 		}
1025 	}
1026 
1027 	/*
1028 	 * The signal is already in the shared-pending queue.
1029 	 * Tell the chosen thread to wake up and dequeue it.
1030 	 */
1031 	signal_wake_up(t, sig == SIGKILL);
1032 	return;
1033 }
1034 
1035 static inline bool legacy_queue(struct sigpending *signals, int sig)
1036 {
1037 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1038 }
1039 
1040 static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1041 				struct task_struct *t, enum pid_type type, bool force)
1042 {
1043 	struct sigpending *pending;
1044 	struct sigqueue *q;
1045 	int override_rlimit;
1046 	int ret = 0, result;
1047 
1048 	lockdep_assert_held(&t->sighand->siglock);
1049 
1050 	result = TRACE_SIGNAL_IGNORED;
1051 	if (!prepare_signal(sig, t, force))
1052 		goto ret;
1053 
1054 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1055 	/*
1056 	 * Short-circuit ignored signals and support queuing
1057 	 * exactly one non-rt signal, so that we can get more
1058 	 * detailed information about the cause of the signal.
1059 	 */
1060 	result = TRACE_SIGNAL_ALREADY_PENDING;
1061 	if (legacy_queue(pending, sig))
1062 		goto ret;
1063 
1064 	result = TRACE_SIGNAL_DELIVERED;
1065 	/*
1066 	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1067 	 */
1068 	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1069 		goto out_set;
1070 
1071 	/*
1072 	 * Real-time signals must be queued if sent by sigqueue, or
1073 	 * some other real-time mechanism.  It is implementation
1074 	 * defined whether kill() does so.  We attempt to do so, on
1075 	 * the principle of least surprise, but since kill is not
1076 	 * allowed to fail with EAGAIN when low on memory we just
1077 	 * make sure at least one signal gets delivered and don't
1078 	 * pass on the info struct.
1079 	 */
1080 	if (sig < SIGRTMIN)
1081 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1082 	else
1083 		override_rlimit = 0;
1084 
1085 	q = sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1086 
1087 	if (q) {
1088 		list_add_tail(&q->list, &pending->list);
1089 		switch ((unsigned long) info) {
1090 		case (unsigned long) SEND_SIG_NOINFO:
1091 			clear_siginfo(&q->info);
1092 			q->info.si_signo = sig;
1093 			q->info.si_errno = 0;
1094 			q->info.si_code = SI_USER;
1095 			q->info.si_pid = task_tgid_nr_ns(current,
1096 							task_active_pid_ns(t));
1097 			rcu_read_lock();
1098 			q->info.si_uid =
1099 				from_kuid_munged(task_cred_xxx(t, user_ns),
1100 						 current_uid());
1101 			rcu_read_unlock();
1102 			break;
1103 		case (unsigned long) SEND_SIG_PRIV:
1104 			clear_siginfo(&q->info);
1105 			q->info.si_signo = sig;
1106 			q->info.si_errno = 0;
1107 			q->info.si_code = SI_KERNEL;
1108 			q->info.si_pid = 0;
1109 			q->info.si_uid = 0;
1110 			break;
1111 		default:
1112 			copy_siginfo(&q->info, info);
1113 			break;
1114 		}
1115 	} else if (!is_si_special(info) &&
1116 		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1117 		/*
1118 		 * Queue overflow, abort.  We may abort if the
1119 		 * signal was rt and sent by user using something
1120 		 * other than kill().
1121 		 */
1122 		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1123 		ret = -EAGAIN;
1124 		goto ret;
1125 	} else {
1126 		/*
1127 		 * This is a silent loss of information.  We still
1128 		 * send the signal, but the *info bits are lost.
1129 		 */
1130 		result = TRACE_SIGNAL_LOSE_INFO;
1131 	}
1132 
1133 out_set:
1134 	signalfd_notify(t, sig);
1135 	sigaddset(&pending->signal, sig);
1136 
1137 	/* Let multiprocess signals appear after on-going forks */
1138 	if (type > PIDTYPE_TGID) {
1139 		struct multiprocess_signals *delayed;
1140 		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1141 			sigset_t *signal = &delayed->signal;
1142 			/* Can't queue both a stop and a continue signal */
1143 			if (sig == SIGCONT)
1144 				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1145 			else if (sig_kernel_stop(sig))
1146 				sigdelset(signal, SIGCONT);
1147 			sigaddset(signal, sig);
1148 		}
1149 	}
1150 
1151 	complete_signal(sig, t, type);
1152 ret:
1153 	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1154 	return ret;
1155 }
1156 
1157 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1158 {
1159 	bool ret = false;
1160 	switch (siginfo_layout(info->si_signo, info->si_code)) {
1161 	case SIL_KILL:
1162 	case SIL_CHLD:
1163 	case SIL_RT:
1164 		ret = true;
1165 		break;
1166 	case SIL_TIMER:
1167 	case SIL_POLL:
1168 	case SIL_FAULT:
1169 	case SIL_FAULT_TRAPNO:
1170 	case SIL_FAULT_MCEERR:
1171 	case SIL_FAULT_BNDERR:
1172 	case SIL_FAULT_PKUERR:
1173 	case SIL_FAULT_PERF_EVENT:
1174 	case SIL_SYS:
1175 		ret = false;
1176 		break;
1177 	}
1178 	return ret;
1179 }
1180 
1181 int send_signal_locked(int sig, struct kernel_siginfo *info,
1182 		       struct task_struct *t, enum pid_type type)
1183 {
1184 	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1185 	bool force = false;
1186 
1187 	if (info == SEND_SIG_NOINFO) {
1188 		/* Force if sent from an ancestor pid namespace */
1189 		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1190 	} else if (info == SEND_SIG_PRIV) {
1191 		/* Don't ignore kernel generated signals */
1192 		force = true;
1193 	} else if (has_si_pid_and_uid(info)) {
1194 		/* SIGKILL and SIGSTOP is special or has ids */
1195 		struct user_namespace *t_user_ns;
1196 
1197 		rcu_read_lock();
1198 		t_user_ns = task_cred_xxx(t, user_ns);
1199 		if (current_user_ns() != t_user_ns) {
1200 			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1201 			info->si_uid = from_kuid_munged(t_user_ns, uid);
1202 		}
1203 		rcu_read_unlock();
1204 
1205 		/* A kernel generated signal? */
1206 		force = (info->si_code == SI_KERNEL);
1207 
1208 		/* From an ancestor pid namespace? */
1209 		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1210 			info->si_pid = 0;
1211 			force = true;
1212 		}
1213 	}
1214 	return __send_signal_locked(sig, info, t, type, force);
1215 }
1216 
1217 static void print_fatal_signal(int signr)
1218 {
1219 	struct pt_regs *regs = task_pt_regs(current);
1220 	struct file *exe_file;
1221 
1222 	exe_file = get_task_exe_file(current);
1223 	if (exe_file) {
1224 		pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1225 			exe_file, current->comm, signr);
1226 		fput(exe_file);
1227 	} else {
1228 		pr_info("%s: potentially unexpected fatal signal %d.\n",
1229 			current->comm, signr);
1230 	}
1231 
1232 #if defined(__i386__) && !defined(__arch_um__)
1233 	pr_info("code at %08lx: ", regs->ip);
1234 	{
1235 		int i;
1236 		for (i = 0; i < 16; i++) {
1237 			unsigned char insn;
1238 
1239 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1240 				break;
1241 			pr_cont("%02x ", insn);
1242 		}
1243 	}
1244 	pr_cont("\n");
1245 #endif
1246 	preempt_disable();
1247 	show_regs(regs);
1248 	preempt_enable();
1249 }
1250 
1251 static int __init setup_print_fatal_signals(char *str)
1252 {
1253 	get_option (&str, &print_fatal_signals);
1254 
1255 	return 1;
1256 }
1257 
1258 __setup("print-fatal-signals=", setup_print_fatal_signals);
1259 
1260 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1261 			enum pid_type type)
1262 {
1263 	unsigned long flags;
1264 	int ret = -ESRCH;
1265 
1266 	if (lock_task_sighand(p, &flags)) {
1267 		ret = send_signal_locked(sig, info, p, type);
1268 		unlock_task_sighand(p, &flags);
1269 	}
1270 
1271 	return ret;
1272 }
1273 
1274 enum sig_handler {
1275 	HANDLER_CURRENT, /* If reachable use the current handler */
1276 	HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1277 	HANDLER_EXIT,	 /* Only visible as the process exit code */
1278 };
1279 
1280 /*
1281  * Force a signal that the process can't ignore: if necessary
1282  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1283  *
1284  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1285  * since we do not want to have a signal handler that was blocked
1286  * be invoked when user space had explicitly blocked it.
1287  *
1288  * We don't want to have recursive SIGSEGV's etc, for example,
1289  * that is why we also clear SIGNAL_UNKILLABLE.
1290  */
1291 static int
1292 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1293 	enum sig_handler handler)
1294 {
1295 	unsigned long int flags;
1296 	int ret, blocked, ignored;
1297 	struct k_sigaction *action;
1298 	int sig = info->si_signo;
1299 
1300 	spin_lock_irqsave(&t->sighand->siglock, flags);
1301 	action = &t->sighand->action[sig-1];
1302 	ignored = action->sa.sa_handler == SIG_IGN;
1303 	blocked = sigismember(&t->blocked, sig);
1304 	if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1305 		action->sa.sa_handler = SIG_DFL;
1306 		if (handler == HANDLER_EXIT)
1307 			action->sa.sa_flags |= SA_IMMUTABLE;
1308 		if (blocked)
1309 			sigdelset(&t->blocked, sig);
1310 	}
1311 	/*
1312 	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1313 	 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1314 	 */
1315 	if (action->sa.sa_handler == SIG_DFL &&
1316 	    (!t->ptrace || (handler == HANDLER_EXIT)))
1317 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1318 	ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1319 	/* This can happen if the signal was already pending and blocked */
1320 	if (!task_sigpending(t))
1321 		signal_wake_up(t, 0);
1322 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1323 
1324 	return ret;
1325 }
1326 
1327 int force_sig_info(struct kernel_siginfo *info)
1328 {
1329 	return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1330 }
1331 
1332 /*
1333  * Nuke all other threads in the group.
1334  */
1335 int zap_other_threads(struct task_struct *p)
1336 {
1337 	struct task_struct *t;
1338 	int count = 0;
1339 
1340 	p->signal->group_stop_count = 0;
1341 
1342 	for_other_threads(p, t) {
1343 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1344 		count++;
1345 
1346 		/* Don't bother with already dead threads */
1347 		if (t->exit_state)
1348 			continue;
1349 		sigaddset(&t->pending.signal, SIGKILL);
1350 		signal_wake_up(t, 1);
1351 	}
1352 
1353 	return count;
1354 }
1355 
1356 struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
1357 					 unsigned long *flags)
1358 {
1359 	struct sighand_struct *sighand;
1360 
1361 	rcu_read_lock();
1362 	for (;;) {
1363 		sighand = rcu_dereference(tsk->sighand);
1364 		if (unlikely(sighand == NULL))
1365 			break;
1366 
1367 		/*
1368 		 * This sighand can be already freed and even reused, but
1369 		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1370 		 * initializes ->siglock: this slab can't go away, it has
1371 		 * the same object type, ->siglock can't be reinitialized.
1372 		 *
1373 		 * We need to ensure that tsk->sighand is still the same
1374 		 * after we take the lock, we can race with de_thread() or
1375 		 * __exit_signal(). In the latter case the next iteration
1376 		 * must see ->sighand == NULL.
1377 		 */
1378 		spin_lock_irqsave(&sighand->siglock, *flags);
1379 		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1380 			break;
1381 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1382 	}
1383 	rcu_read_unlock();
1384 
1385 	return sighand;
1386 }
1387 
1388 #ifdef CONFIG_LOCKDEP
1389 void lockdep_assert_task_sighand_held(struct task_struct *task)
1390 {
1391 	struct sighand_struct *sighand;
1392 
1393 	rcu_read_lock();
1394 	sighand = rcu_dereference(task->sighand);
1395 	if (sighand)
1396 		lockdep_assert_held(&sighand->siglock);
1397 	else
1398 		WARN_ON_ONCE(1);
1399 	rcu_read_unlock();
1400 }
1401 #endif
1402 
1403 /*
1404  * send signal info to all the members of a thread group or to the
1405  * individual thread if type == PIDTYPE_PID.
1406  */
1407 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1408 			struct task_struct *p, enum pid_type type)
1409 {
1410 	int ret;
1411 
1412 	rcu_read_lock();
1413 	ret = check_kill_permission(sig, info, p);
1414 	rcu_read_unlock();
1415 
1416 	if (!ret && sig)
1417 		ret = do_send_sig_info(sig, info, p, type);
1418 
1419 	return ret;
1420 }
1421 
1422 /*
1423  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1424  * control characters do (^C, ^Z etc)
1425  * - the caller must hold at least a readlock on tasklist_lock
1426  */
1427 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1428 {
1429 	struct task_struct *p = NULL;
1430 	int ret = -ESRCH;
1431 
1432 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1433 		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1434 		/*
1435 		 * If group_send_sig_info() succeeds at least once ret
1436 		 * becomes 0 and after that the code below has no effect.
1437 		 * Otherwise we return the last err or -ESRCH if this
1438 		 * process group is empty.
1439 		 */
1440 		if (ret)
1441 			ret = err;
1442 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1443 
1444 	return ret;
1445 }
1446 
1447 static int kill_pid_info_type(int sig, struct kernel_siginfo *info,
1448 				struct pid *pid, enum pid_type type)
1449 {
1450 	int error = -ESRCH;
1451 	struct task_struct *p;
1452 
1453 	for (;;) {
1454 		rcu_read_lock();
1455 		p = pid_task(pid, PIDTYPE_PID);
1456 		if (p)
1457 			error = group_send_sig_info(sig, info, p, type);
1458 		rcu_read_unlock();
1459 		if (likely(!p || error != -ESRCH))
1460 			return error;
1461 		/*
1462 		 * The task was unhashed in between, try again.  If it
1463 		 * is dead, pid_task() will return NULL, if we race with
1464 		 * de_thread() it will find the new leader.
1465 		 */
1466 	}
1467 }
1468 
1469 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1470 {
1471 	return kill_pid_info_type(sig, info, pid, PIDTYPE_TGID);
1472 }
1473 
1474 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1475 {
1476 	int error;
1477 	rcu_read_lock();
1478 	error = kill_pid_info(sig, info, find_vpid(pid));
1479 	rcu_read_unlock();
1480 	return error;
1481 }
1482 
1483 static inline bool kill_as_cred_perm(const struct cred *cred,
1484 				     struct task_struct *target)
1485 {
1486 	const struct cred *pcred = __task_cred(target);
1487 
1488 	return uid_eq(cred->euid, pcred->suid) ||
1489 	       uid_eq(cred->euid, pcred->uid) ||
1490 	       uid_eq(cred->uid, pcred->suid) ||
1491 	       uid_eq(cred->uid, pcred->uid);
1492 }
1493 
1494 /*
1495  * The usb asyncio usage of siginfo is wrong.  The glibc support
1496  * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1497  * AKA after the generic fields:
1498  *	kernel_pid_t	si_pid;
1499  *	kernel_uid32_t	si_uid;
1500  *	sigval_t	si_value;
1501  *
1502  * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1503  * after the generic fields is:
1504  *	void __user 	*si_addr;
1505  *
1506  * This is a practical problem when there is a 64bit big endian kernel
1507  * and a 32bit userspace.  As the 32bit address will encoded in the low
1508  * 32bits of the pointer.  Those low 32bits will be stored at higher
1509  * address than appear in a 32 bit pointer.  So userspace will not
1510  * see the address it was expecting for it's completions.
1511  *
1512  * There is nothing in the encoding that can allow
1513  * copy_siginfo_to_user32 to detect this confusion of formats, so
1514  * handle this by requiring the caller of kill_pid_usb_asyncio to
1515  * notice when this situration takes place and to store the 32bit
1516  * pointer in sival_int, instead of sival_addr of the sigval_t addr
1517  * parameter.
1518  */
1519 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1520 			 struct pid *pid, const struct cred *cred)
1521 {
1522 	struct kernel_siginfo info;
1523 	struct task_struct *p;
1524 	unsigned long flags;
1525 	int ret = -EINVAL;
1526 
1527 	if (!valid_signal(sig))
1528 		return ret;
1529 
1530 	clear_siginfo(&info);
1531 	info.si_signo = sig;
1532 	info.si_errno = errno;
1533 	info.si_code = SI_ASYNCIO;
1534 	*((sigval_t *)&info.si_pid) = addr;
1535 
1536 	rcu_read_lock();
1537 	p = pid_task(pid, PIDTYPE_PID);
1538 	if (!p) {
1539 		ret = -ESRCH;
1540 		goto out_unlock;
1541 	}
1542 	if (!kill_as_cred_perm(cred, p)) {
1543 		ret = -EPERM;
1544 		goto out_unlock;
1545 	}
1546 	ret = security_task_kill(p, &info, sig, cred);
1547 	if (ret)
1548 		goto out_unlock;
1549 
1550 	if (sig) {
1551 		if (lock_task_sighand(p, &flags)) {
1552 			ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1553 			unlock_task_sighand(p, &flags);
1554 		} else
1555 			ret = -ESRCH;
1556 	}
1557 out_unlock:
1558 	rcu_read_unlock();
1559 	return ret;
1560 }
1561 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1562 
1563 /*
1564  * kill_something_info() interprets pid in interesting ways just like kill(2).
1565  *
1566  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1567  * is probably wrong.  Should make it like BSD or SYSV.
1568  */
1569 
1570 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1571 {
1572 	int ret;
1573 
1574 	if (pid > 0)
1575 		return kill_proc_info(sig, info, pid);
1576 
1577 	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1578 	if (pid == INT_MIN)
1579 		return -ESRCH;
1580 
1581 	read_lock(&tasklist_lock);
1582 	if (pid != -1) {
1583 		ret = __kill_pgrp_info(sig, info,
1584 				pid ? find_vpid(-pid) : task_pgrp(current));
1585 	} else {
1586 		int retval = 0, count = 0;
1587 		struct task_struct * p;
1588 
1589 		for_each_process(p) {
1590 			if (task_pid_vnr(p) > 1 &&
1591 					!same_thread_group(p, current)) {
1592 				int err = group_send_sig_info(sig, info, p,
1593 							      PIDTYPE_MAX);
1594 				++count;
1595 				if (err != -EPERM)
1596 					retval = err;
1597 			}
1598 		}
1599 		ret = count ? retval : -ESRCH;
1600 	}
1601 	read_unlock(&tasklist_lock);
1602 
1603 	return ret;
1604 }
1605 
1606 /*
1607  * These are for backward compatibility with the rest of the kernel source.
1608  */
1609 
1610 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1611 {
1612 	/*
1613 	 * Make sure legacy kernel users don't send in bad values
1614 	 * (normal paths check this in check_kill_permission).
1615 	 */
1616 	if (!valid_signal(sig))
1617 		return -EINVAL;
1618 
1619 	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1620 }
1621 EXPORT_SYMBOL(send_sig_info);
1622 
1623 #define __si_special(priv) \
1624 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1625 
1626 int
1627 send_sig(int sig, struct task_struct *p, int priv)
1628 {
1629 	return send_sig_info(sig, __si_special(priv), p);
1630 }
1631 EXPORT_SYMBOL(send_sig);
1632 
1633 void force_sig(int sig)
1634 {
1635 	struct kernel_siginfo info;
1636 
1637 	clear_siginfo(&info);
1638 	info.si_signo = sig;
1639 	info.si_errno = 0;
1640 	info.si_code = SI_KERNEL;
1641 	info.si_pid = 0;
1642 	info.si_uid = 0;
1643 	force_sig_info(&info);
1644 }
1645 EXPORT_SYMBOL(force_sig);
1646 
1647 void force_fatal_sig(int sig)
1648 {
1649 	struct kernel_siginfo info;
1650 
1651 	clear_siginfo(&info);
1652 	info.si_signo = sig;
1653 	info.si_errno = 0;
1654 	info.si_code = SI_KERNEL;
1655 	info.si_pid = 0;
1656 	info.si_uid = 0;
1657 	force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1658 }
1659 
1660 void force_exit_sig(int sig)
1661 {
1662 	struct kernel_siginfo info;
1663 
1664 	clear_siginfo(&info);
1665 	info.si_signo = sig;
1666 	info.si_errno = 0;
1667 	info.si_code = SI_KERNEL;
1668 	info.si_pid = 0;
1669 	info.si_uid = 0;
1670 	force_sig_info_to_task(&info, current, HANDLER_EXIT);
1671 }
1672 
1673 /*
1674  * When things go south during signal handling, we
1675  * will force a SIGSEGV. And if the signal that caused
1676  * the problem was already a SIGSEGV, we'll want to
1677  * make sure we don't even try to deliver the signal..
1678  */
1679 void force_sigsegv(int sig)
1680 {
1681 	if (sig == SIGSEGV)
1682 		force_fatal_sig(SIGSEGV);
1683 	else
1684 		force_sig(SIGSEGV);
1685 }
1686 
1687 int force_sig_fault_to_task(int sig, int code, void __user *addr,
1688 			    struct task_struct *t)
1689 {
1690 	struct kernel_siginfo info;
1691 
1692 	clear_siginfo(&info);
1693 	info.si_signo = sig;
1694 	info.si_errno = 0;
1695 	info.si_code  = code;
1696 	info.si_addr  = addr;
1697 	return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1698 }
1699 
1700 int force_sig_fault(int sig, int code, void __user *addr)
1701 {
1702 	return force_sig_fault_to_task(sig, code, addr, current);
1703 }
1704 
1705 int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t)
1706 {
1707 	struct kernel_siginfo info;
1708 
1709 	clear_siginfo(&info);
1710 	info.si_signo = sig;
1711 	info.si_errno = 0;
1712 	info.si_code  = code;
1713 	info.si_addr  = addr;
1714 	return send_sig_info(info.si_signo, &info, t);
1715 }
1716 
1717 int force_sig_mceerr(int code, void __user *addr, short lsb)
1718 {
1719 	struct kernel_siginfo info;
1720 
1721 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1722 	clear_siginfo(&info);
1723 	info.si_signo = SIGBUS;
1724 	info.si_errno = 0;
1725 	info.si_code = code;
1726 	info.si_addr = addr;
1727 	info.si_addr_lsb = lsb;
1728 	return force_sig_info(&info);
1729 }
1730 
1731 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1732 {
1733 	struct kernel_siginfo info;
1734 
1735 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1736 	clear_siginfo(&info);
1737 	info.si_signo = SIGBUS;
1738 	info.si_errno = 0;
1739 	info.si_code = code;
1740 	info.si_addr = addr;
1741 	info.si_addr_lsb = lsb;
1742 	return send_sig_info(info.si_signo, &info, t);
1743 }
1744 EXPORT_SYMBOL(send_sig_mceerr);
1745 
1746 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1747 {
1748 	struct kernel_siginfo info;
1749 
1750 	clear_siginfo(&info);
1751 	info.si_signo = SIGSEGV;
1752 	info.si_errno = 0;
1753 	info.si_code  = SEGV_BNDERR;
1754 	info.si_addr  = addr;
1755 	info.si_lower = lower;
1756 	info.si_upper = upper;
1757 	return force_sig_info(&info);
1758 }
1759 
1760 #ifdef SEGV_PKUERR
1761 int force_sig_pkuerr(void __user *addr, u32 pkey)
1762 {
1763 	struct kernel_siginfo info;
1764 
1765 	clear_siginfo(&info);
1766 	info.si_signo = SIGSEGV;
1767 	info.si_errno = 0;
1768 	info.si_code  = SEGV_PKUERR;
1769 	info.si_addr  = addr;
1770 	info.si_pkey  = pkey;
1771 	return force_sig_info(&info);
1772 }
1773 #endif
1774 
1775 int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1776 {
1777 	struct kernel_siginfo info;
1778 
1779 	clear_siginfo(&info);
1780 	info.si_signo     = SIGTRAP;
1781 	info.si_errno     = 0;
1782 	info.si_code      = TRAP_PERF;
1783 	info.si_addr      = addr;
1784 	info.si_perf_data = sig_data;
1785 	info.si_perf_type = type;
1786 
1787 	/*
1788 	 * Signals generated by perf events should not terminate the whole
1789 	 * process if SIGTRAP is blocked, however, delivering the signal
1790 	 * asynchronously is better than not delivering at all. But tell user
1791 	 * space if the signal was asynchronous, so it can clearly be
1792 	 * distinguished from normal synchronous ones.
1793 	 */
1794 	info.si_perf_flags = sigismember(&current->blocked, info.si_signo) ?
1795 				     TRAP_PERF_FLAG_ASYNC :
1796 				     0;
1797 
1798 	return send_sig_info(info.si_signo, &info, current);
1799 }
1800 
1801 /**
1802  * force_sig_seccomp - signals the task to allow in-process syscall emulation
1803  * @syscall: syscall number to send to userland
1804  * @reason: filter-supplied reason code to send to userland (via si_errno)
1805  * @force_coredump: true to trigger a coredump
1806  *
1807  * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1808  */
1809 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1810 {
1811 	struct kernel_siginfo info;
1812 
1813 	clear_siginfo(&info);
1814 	info.si_signo = SIGSYS;
1815 	info.si_code = SYS_SECCOMP;
1816 	info.si_call_addr = (void __user *)KSTK_EIP(current);
1817 	info.si_errno = reason;
1818 	info.si_arch = syscall_get_arch(current);
1819 	info.si_syscall = syscall;
1820 	return force_sig_info_to_task(&info, current,
1821 		force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1822 }
1823 
1824 /* For the crazy architectures that include trap information in
1825  * the errno field, instead of an actual errno value.
1826  */
1827 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1828 {
1829 	struct kernel_siginfo info;
1830 
1831 	clear_siginfo(&info);
1832 	info.si_signo = SIGTRAP;
1833 	info.si_errno = errno;
1834 	info.si_code  = TRAP_HWBKPT;
1835 	info.si_addr  = addr;
1836 	return force_sig_info(&info);
1837 }
1838 
1839 /* For the rare architectures that include trap information using
1840  * si_trapno.
1841  */
1842 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1843 {
1844 	struct kernel_siginfo info;
1845 
1846 	clear_siginfo(&info);
1847 	info.si_signo = sig;
1848 	info.si_errno = 0;
1849 	info.si_code  = code;
1850 	info.si_addr  = addr;
1851 	info.si_trapno = trapno;
1852 	return force_sig_info(&info);
1853 }
1854 
1855 /* For the rare architectures that include trap information using
1856  * si_trapno.
1857  */
1858 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1859 			  struct task_struct *t)
1860 {
1861 	struct kernel_siginfo info;
1862 
1863 	clear_siginfo(&info);
1864 	info.si_signo = sig;
1865 	info.si_errno = 0;
1866 	info.si_code  = code;
1867 	info.si_addr  = addr;
1868 	info.si_trapno = trapno;
1869 	return send_sig_info(info.si_signo, &info, t);
1870 }
1871 
1872 static int kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1873 {
1874 	int ret;
1875 	read_lock(&tasklist_lock);
1876 	ret = __kill_pgrp_info(sig, info, pgrp);
1877 	read_unlock(&tasklist_lock);
1878 	return ret;
1879 }
1880 
1881 int kill_pgrp(struct pid *pid, int sig, int priv)
1882 {
1883 	return kill_pgrp_info(sig, __si_special(priv), pid);
1884 }
1885 EXPORT_SYMBOL(kill_pgrp);
1886 
1887 int kill_pid(struct pid *pid, int sig, int priv)
1888 {
1889 	return kill_pid_info(sig, __si_special(priv), pid);
1890 }
1891 EXPORT_SYMBOL(kill_pid);
1892 
1893 #ifdef CONFIG_POSIX_TIMERS
1894 /*
1895  * These functions handle POSIX timer signals. POSIX timers use
1896  * preallocated sigqueue structs for sending signals.
1897  */
1898 static void __flush_itimer_signals(struct sigpending *pending)
1899 {
1900 	sigset_t signal, retain;
1901 	struct sigqueue *q, *n;
1902 
1903 	signal = pending->signal;
1904 	sigemptyset(&retain);
1905 
1906 	list_for_each_entry_safe(q, n, &pending->list, list) {
1907 		int sig = q->info.si_signo;
1908 
1909 		if (likely(q->info.si_code != SI_TIMER)) {
1910 			sigaddset(&retain, sig);
1911 		} else {
1912 			sigdelset(&signal, sig);
1913 			list_del_init(&q->list);
1914 			__sigqueue_free(q);
1915 		}
1916 	}
1917 
1918 	sigorsets(&pending->signal, &signal, &retain);
1919 }
1920 
1921 void flush_itimer_signals(void)
1922 {
1923 	struct task_struct *tsk = current;
1924 
1925 	guard(spinlock_irqsave)(&tsk->sighand->siglock);
1926 	__flush_itimer_signals(&tsk->pending);
1927 	__flush_itimer_signals(&tsk->signal->shared_pending);
1928 }
1929 
1930 bool posixtimer_init_sigqueue(struct sigqueue *q)
1931 {
1932 	struct ucounts *ucounts = sig_get_ucounts(current, -1, 0);
1933 
1934 	if (!ucounts)
1935 		return false;
1936 	clear_siginfo(&q->info);
1937 	__sigqueue_init(q, ucounts, SIGQUEUE_PREALLOC);
1938 	return true;
1939 }
1940 
1941 static void posixtimer_queue_sigqueue(struct sigqueue *q, struct task_struct *t, enum pid_type type)
1942 {
1943 	struct sigpending *pending;
1944 	int sig = q->info.si_signo;
1945 
1946 	signalfd_notify(t, sig);
1947 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1948 	list_add_tail(&q->list, &pending->list);
1949 	sigaddset(&pending->signal, sig);
1950 	complete_signal(sig, t, type);
1951 }
1952 
1953 /*
1954  * This function is used by POSIX timers to deliver a timer signal.
1955  * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1956  * set), the signal must be delivered to the specific thread (queues
1957  * into t->pending).
1958  *
1959  * Where type is not PIDTYPE_PID, signals must be delivered to the
1960  * process. In this case, prefer to deliver to current if it is in
1961  * the same thread group as the target process and its sighand is
1962  * stable, which avoids unnecessarily waking up a potentially idle task.
1963  */
1964 static inline struct task_struct *posixtimer_get_target(struct k_itimer *tmr)
1965 {
1966 	struct task_struct *t = pid_task(tmr->it_pid, tmr->it_pid_type);
1967 
1968 	if (t && tmr->it_pid_type != PIDTYPE_PID &&
1969 	    same_thread_group(t, current) && !current->exit_state)
1970 		t = current;
1971 	return t;
1972 }
1973 
1974 void posixtimer_send_sigqueue(struct k_itimer *tmr)
1975 {
1976 	struct sigqueue *q = &tmr->sigq;
1977 	int sig = q->info.si_signo;
1978 	struct task_struct *t;
1979 	unsigned long flags;
1980 	int result;
1981 
1982 	guard(rcu)();
1983 
1984 	t = posixtimer_get_target(tmr);
1985 	if (!t)
1986 		return;
1987 
1988 	if (!likely(lock_task_sighand(t, &flags)))
1989 		return;
1990 
1991 	/*
1992 	 * Update @tmr::sigqueue_seq for posix timer signals with sighand
1993 	 * locked to prevent a race against dequeue_signal().
1994 	 */
1995 	tmr->it_sigqueue_seq = tmr->it_signal_seq;
1996 
1997 	/*
1998 	 * Set the signal delivery status under sighand lock, so that the
1999 	 * ignored signal handling can distinguish between a periodic and a
2000 	 * non-periodic timer.
2001 	 */
2002 	tmr->it_sig_periodic = tmr->it_status == POSIX_TIMER_REQUEUE_PENDING;
2003 
2004 	if (!prepare_signal(sig, t, false)) {
2005 		result = TRACE_SIGNAL_IGNORED;
2006 
2007 		if (!list_empty(&q->list)) {
2008 			/*
2009 			 * The signal was ignored and blocked. The timer
2010 			 * expiry queued it because blocked signals are
2011 			 * queued independent of the ignored state.
2012 			 *
2013 			 * The unblocking set SIGPENDING, but the signal
2014 			 * was not yet dequeued from the pending list.
2015 			 * So prepare_signal() sees unblocked and ignored,
2016 			 * which ends up here. Leave it queued like a
2017 			 * regular signal.
2018 			 *
2019 			 * The same happens when the task group is exiting
2020 			 * and the signal is already queued.
2021 			 * prepare_signal() treats SIGNAL_GROUP_EXIT as
2022 			 * ignored independent of its queued state. This
2023 			 * gets cleaned up in __exit_signal().
2024 			 */
2025 			goto out;
2026 		}
2027 
2028 		/* Periodic timers with SIG_IGN are queued on the ignored list */
2029 		if (tmr->it_sig_periodic) {
2030 			/*
2031 			 * Already queued means the timer was rearmed after
2032 			 * the previous expiry got it on the ignore list.
2033 			 * Nothing to do for that case.
2034 			 */
2035 			if (hlist_unhashed(&tmr->ignored_list)) {
2036 				/*
2037 				 * Take a signal reference and queue it on
2038 				 * the ignored list.
2039 				 */
2040 				posixtimer_sigqueue_getref(q);
2041 				posixtimer_sig_ignore(t, q);
2042 			}
2043 		} else if (!hlist_unhashed(&tmr->ignored_list)) {
2044 			/*
2045 			 * Covers the case where a timer was periodic and
2046 			 * then the signal was ignored. Later it was rearmed
2047 			 * as oneshot timer. The previous signal is invalid
2048 			 * now, and this oneshot signal has to be dropped.
2049 			 * Remove it from the ignored list and drop the
2050 			 * reference count as the signal is not longer
2051 			 * queued.
2052 			 */
2053 			hlist_del_init(&tmr->ignored_list);
2054 			posixtimer_putref(tmr);
2055 		}
2056 		goto out;
2057 	}
2058 
2059 	if (unlikely(!list_empty(&q->list))) {
2060 		/* This holds a reference count already */
2061 		result = TRACE_SIGNAL_ALREADY_PENDING;
2062 		goto out;
2063 	}
2064 
2065 	/*
2066 	 * If the signal is on the ignore list, it got blocked after it was
2067 	 * ignored earlier. But nothing lifted the ignore. Move it back to
2068 	 * the pending list to be consistent with the regular signal
2069 	 * handling. This already holds a reference count.
2070 	 *
2071 	 * If it's not on the ignore list acquire a reference count.
2072 	 */
2073 	if (likely(hlist_unhashed(&tmr->ignored_list)))
2074 		posixtimer_sigqueue_getref(q);
2075 	else
2076 		hlist_del_init(&tmr->ignored_list);
2077 
2078 	posixtimer_queue_sigqueue(q, t, tmr->it_pid_type);
2079 	result = TRACE_SIGNAL_DELIVERED;
2080 out:
2081 	trace_signal_generate(sig, &q->info, t, tmr->it_pid_type != PIDTYPE_PID, result);
2082 	unlock_task_sighand(t, &flags);
2083 }
2084 
2085 static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q)
2086 {
2087 	struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
2088 
2089 	/*
2090 	 * If the timer is marked deleted already or the signal originates
2091 	 * from a non-periodic timer, then just drop the reference
2092 	 * count. Otherwise queue it on the ignored list.
2093 	 */
2094 	if (posixtimer_valid(tmr) && tmr->it_sig_periodic)
2095 		hlist_add_head(&tmr->ignored_list, &tsk->signal->ignored_posix_timers);
2096 	else
2097 		posixtimer_putref(tmr);
2098 }
2099 
2100 static void posixtimer_sig_unignore(struct task_struct *tsk, int sig)
2101 {
2102 	struct hlist_head *head = &tsk->signal->ignored_posix_timers;
2103 	struct hlist_node *tmp;
2104 	struct k_itimer *tmr;
2105 
2106 	if (likely(hlist_empty(head)))
2107 		return;
2108 
2109 	/*
2110 	 * Rearming a timer with sighand lock held is not possible due to
2111 	 * lock ordering vs. tmr::it_lock. Just stick the sigqueue back and
2112 	 * let the signal delivery path deal with it whether it needs to be
2113 	 * rearmed or not. This cannot be decided here w/o dropping sighand
2114 	 * lock and creating a loop retry horror show.
2115 	 */
2116 	hlist_for_each_entry_safe(tmr, tmp , head, ignored_list) {
2117 		struct task_struct *target;
2118 
2119 		/*
2120 		 * tmr::sigq.info.si_signo is immutable, so accessing it
2121 		 * without holding tmr::it_lock is safe.
2122 		 */
2123 		if (tmr->sigq.info.si_signo != sig)
2124 			continue;
2125 
2126 		hlist_del_init(&tmr->ignored_list);
2127 
2128 		/* This should never happen and leaks a reference count */
2129 		if (WARN_ON_ONCE(!list_empty(&tmr->sigq.list)))
2130 			continue;
2131 
2132 		/*
2133 		 * Get the target for the signal. If target is a thread and
2134 		 * has exited by now, drop the reference count.
2135 		 */
2136 		guard(rcu)();
2137 		target = posixtimer_get_target(tmr);
2138 		if (target)
2139 			posixtimer_queue_sigqueue(&tmr->sigq, target, tmr->it_pid_type);
2140 		else
2141 			posixtimer_putref(tmr);
2142 	}
2143 }
2144 #else /* CONFIG_POSIX_TIMERS */
2145 static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q) { }
2146 static inline void posixtimer_sig_unignore(struct task_struct *tsk, int sig) { }
2147 #endif /* !CONFIG_POSIX_TIMERS */
2148 
2149 void do_notify_pidfd(struct task_struct *task)
2150 {
2151 	struct pid *pid = task_pid(task);
2152 
2153 	WARN_ON(task->exit_state == 0);
2154 
2155 	__wake_up(&pid->wait_pidfd, TASK_NORMAL, 0,
2156 			poll_to_key(EPOLLIN | EPOLLRDNORM));
2157 }
2158 
2159 /*
2160  * Let a parent know about the death of a child.
2161  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2162  *
2163  * Returns true if our parent ignored us and so we've switched to
2164  * self-reaping.
2165  */
2166 bool do_notify_parent(struct task_struct *tsk, int sig)
2167 {
2168 	struct kernel_siginfo info;
2169 	unsigned long flags;
2170 	struct sighand_struct *psig;
2171 	bool autoreap = false;
2172 	u64 utime, stime;
2173 
2174 	if (WARN_ON_ONCE(!valid_signal(sig)))
2175 		return false;
2176 
2177 	/* do_notify_parent_cldstop should have been called instead.  */
2178 	WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2179 
2180 	WARN_ON_ONCE(!tsk->ptrace && !thread_group_empty(tsk));
2181 
2182 	/* ptraced, or group-leader without sub-threads */
2183 	do_notify_pidfd(tsk);
2184 
2185 	if (sig != SIGCHLD) {
2186 		/*
2187 		 * This is only possible if parent == real_parent.
2188 		 * Check if it has changed security domain.
2189 		 */
2190 		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2191 			sig = SIGCHLD;
2192 	}
2193 
2194 	clear_siginfo(&info);
2195 	info.si_signo = sig;
2196 	info.si_errno = 0;
2197 	/*
2198 	 * We are under tasklist_lock here so our parent is tied to
2199 	 * us and cannot change.
2200 	 *
2201 	 * task_active_pid_ns will always return the same pid namespace
2202 	 * until a task passes through release_task.
2203 	 *
2204 	 * write_lock() currently calls preempt_disable() which is the
2205 	 * same as rcu_read_lock(), but according to Oleg, this is not
2206 	 * correct to rely on this
2207 	 */
2208 	rcu_read_lock();
2209 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2210 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2211 				       task_uid(tsk));
2212 	rcu_read_unlock();
2213 
2214 	task_cputime(tsk, &utime, &stime);
2215 	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2216 	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2217 
2218 	info.si_status = tsk->exit_code & 0x7f;
2219 	if (tsk->exit_code & 0x80)
2220 		info.si_code = CLD_DUMPED;
2221 	else if (tsk->exit_code & 0x7f)
2222 		info.si_code = CLD_KILLED;
2223 	else {
2224 		info.si_code = CLD_EXITED;
2225 		info.si_status = tsk->exit_code >> 8;
2226 	}
2227 
2228 	psig = tsk->parent->sighand;
2229 	spin_lock_irqsave(&psig->siglock, flags);
2230 	if (!tsk->ptrace && sig == SIGCHLD &&
2231 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2232 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2233 		/*
2234 		 * We are exiting and our parent doesn't care.  POSIX.1
2235 		 * defines special semantics for setting SIGCHLD to SIG_IGN
2236 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
2237 		 * automatically and not left for our parent's wait4 call.
2238 		 * Rather than having the parent do it as a magic kind of
2239 		 * signal handler, we just set this to tell do_exit that we
2240 		 * can be cleaned up without becoming a zombie.  Note that
2241 		 * we still call __wake_up_parent in this case, because a
2242 		 * blocked sys_wait4 might now return -ECHILD.
2243 		 *
2244 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2245 		 * is implementation-defined: we do (if you don't want
2246 		 * it, just use SIG_IGN instead).
2247 		 */
2248 		autoreap = true;
2249 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2250 			sig = 0;
2251 	}
2252 	if (!tsk->ptrace && tsk->signal->autoreap) {
2253 		autoreap = true;
2254 		sig = 0;
2255 	}
2256 	/*
2257 	 * Send with __send_signal as si_pid and si_uid are in the
2258 	 * parent's namespaces.
2259 	 */
2260 	if (sig)
2261 		__send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2262 	__wake_up_parent(tsk, tsk->parent);
2263 	spin_unlock_irqrestore(&psig->siglock, flags);
2264 
2265 	return autoreap;
2266 }
2267 
2268 /**
2269  * do_notify_parent_cldstop - notify parent of stopped/continued state change
2270  * @tsk: task reporting the state change
2271  * @for_ptracer: the notification is for ptracer
2272  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2273  *
2274  * Notify @tsk's parent that the stopped/continued state has changed.  If
2275  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2276  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2277  *
2278  * CONTEXT:
2279  * Must be called with tasklist_lock at least read locked.
2280  */
2281 static void do_notify_parent_cldstop(struct task_struct *tsk,
2282 				     bool for_ptracer, int why)
2283 {
2284 	struct kernel_siginfo info;
2285 	unsigned long flags;
2286 	struct task_struct *parent;
2287 	struct sighand_struct *sighand;
2288 	u64 utime, stime;
2289 
2290 	if (for_ptracer) {
2291 		parent = tsk->parent;
2292 	} else {
2293 		tsk = tsk->group_leader;
2294 		parent = tsk->real_parent;
2295 	}
2296 
2297 	clear_siginfo(&info);
2298 	info.si_signo = SIGCHLD;
2299 	info.si_errno = 0;
2300 	/*
2301 	 * see comment in do_notify_parent() about the following 4 lines
2302 	 */
2303 	rcu_read_lock();
2304 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2305 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2306 	rcu_read_unlock();
2307 
2308 	task_cputime(tsk, &utime, &stime);
2309 	info.si_utime = nsec_to_clock_t(utime);
2310 	info.si_stime = nsec_to_clock_t(stime);
2311 
2312  	info.si_code = why;
2313  	switch (why) {
2314  	case CLD_CONTINUED:
2315  		info.si_status = SIGCONT;
2316  		break;
2317  	case CLD_STOPPED:
2318  		info.si_status = tsk->signal->group_exit_code & 0x7f;
2319  		break;
2320  	case CLD_TRAPPED:
2321  		info.si_status = tsk->exit_code & 0x7f;
2322  		break;
2323  	default:
2324  		BUG();
2325  	}
2326 
2327 	sighand = parent->sighand;
2328 	spin_lock_irqsave(&sighand->siglock, flags);
2329 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2330 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2331 		send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2332 	/*
2333 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2334 	 */
2335 	__wake_up_parent(tsk, parent);
2336 	spin_unlock_irqrestore(&sighand->siglock, flags);
2337 }
2338 
2339 /*
2340  * This must be called with current->sighand->siglock held.
2341  *
2342  * This should be the path for all ptrace stops.
2343  * We always set current->last_siginfo while stopped here.
2344  * That makes it a way to test a stopped process for
2345  * being ptrace-stopped vs being job-control-stopped.
2346  *
2347  * Returns the signal the ptracer requested the code resume
2348  * with.  If the code did not stop because the tracer is gone,
2349  * the stop signal remains unchanged unless clear_code.
2350  */
2351 static int ptrace_stop(int exit_code, int why, unsigned long message,
2352 		       kernel_siginfo_t *info)
2353 	__releases(&current->sighand->siglock)
2354 	__acquires(&current->sighand->siglock)
2355 {
2356 	bool gstop_done = false;
2357 
2358 	if (arch_ptrace_stop_needed()) {
2359 		/*
2360 		 * The arch code has something special to do before a
2361 		 * ptrace stop.  This is allowed to block, e.g. for faults
2362 		 * on user stack pages.  We can't keep the siglock while
2363 		 * calling arch_ptrace_stop, so we must release it now.
2364 		 * To preserve proper semantics, we must do this before
2365 		 * any signal bookkeeping like checking group_stop_count.
2366 		 */
2367 		spin_unlock_irq(&current->sighand->siglock);
2368 		arch_ptrace_stop();
2369 		spin_lock_irq(&current->sighand->siglock);
2370 	}
2371 
2372 	/*
2373 	 * After this point ptrace_signal_wake_up or signal_wake_up
2374 	 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2375 	 * signal comes in.  Handle previous ptrace_unlinks and fatal
2376 	 * signals here to prevent ptrace_stop sleeping in schedule.
2377 	 */
2378 	if (!current->ptrace || __fatal_signal_pending(current))
2379 		return exit_code;
2380 
2381 	set_special_state(TASK_TRACED);
2382 	current->jobctl |= JOBCTL_TRACED;
2383 
2384 	/*
2385 	 * We're committing to trapping.  TRACED should be visible before
2386 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2387 	 * Also, transition to TRACED and updates to ->jobctl should be
2388 	 * atomic with respect to siglock and should be done after the arch
2389 	 * hook as siglock is released and regrabbed across it.
2390 	 *
2391 	 *     TRACER				    TRACEE
2392 	 *
2393 	 *     ptrace_attach()
2394 	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2395 	 *     do_wait()
2396 	 *       set_current_state()                smp_wmb();
2397 	 *       ptrace_do_wait()
2398 	 *         wait_task_stopped()
2399 	 *           task_stopped_code()
2400 	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2401 	 */
2402 	smp_wmb();
2403 
2404 	current->ptrace_message = message;
2405 	current->last_siginfo = info;
2406 	current->exit_code = exit_code;
2407 
2408 	/*
2409 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2410 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2411 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2412 	 * could be clear now.  We act as if SIGCONT is received after
2413 	 * TASK_TRACED is entered - ignore it.
2414 	 */
2415 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2416 		gstop_done = task_participate_group_stop(current);
2417 
2418 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2419 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2420 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2421 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2422 
2423 	/* entering a trap, clear TRAPPING */
2424 	task_clear_jobctl_trapping(current);
2425 
2426 	spin_unlock_irq(&current->sighand->siglock);
2427 	read_lock(&tasklist_lock);
2428 	/*
2429 	 * Notify parents of the stop.
2430 	 *
2431 	 * While ptraced, there are two parents - the ptracer and
2432 	 * the real_parent of the group_leader.  The ptracer should
2433 	 * know about every stop while the real parent is only
2434 	 * interested in the completion of group stop.  The states
2435 	 * for the two don't interact with each other.  Notify
2436 	 * separately unless they're gonna be duplicates.
2437 	 */
2438 	if (current->ptrace)
2439 		do_notify_parent_cldstop(current, true, why);
2440 	if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2441 		do_notify_parent_cldstop(current, false, why);
2442 
2443 	/*
2444 	 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2445 	 * One a PREEMPTION kernel this can result in preemption requirement
2446 	 * which will be fulfilled after read_unlock() and the ptracer will be
2447 	 * put on the CPU.
2448 	 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2449 	 * this task wait in schedule(). If this task gets preempted then it
2450 	 * remains enqueued on the runqueue. The ptracer will observe this and
2451 	 * then sleep for a delay of one HZ tick. In the meantime this task
2452 	 * gets scheduled, enters schedule() and will wait for the ptracer.
2453 	 *
2454 	 * This preemption point is not bad from a correctness point of
2455 	 * view but extends the runtime by one HZ tick time due to the
2456 	 * ptracer's sleep.  The preempt-disable section ensures that there
2457 	 * will be no preemption between unlock and schedule() and so
2458 	 * improving the performance since the ptracer will observe that
2459 	 * the tracee is scheduled out once it gets on the CPU.
2460 	 *
2461 	 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2462 	 * Therefore the task can be preempted after do_notify_parent_cldstop()
2463 	 * before unlocking tasklist_lock so there is no benefit in doing this.
2464 	 *
2465 	 * In fact disabling preemption is harmful on PREEMPT_RT because
2466 	 * the spinlock_t in cgroup_enter_frozen() must not be acquired
2467 	 * with preemption disabled due to the 'sleeping' spinlock
2468 	 * substitution of RT.
2469 	 */
2470 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2471 		preempt_disable();
2472 	read_unlock(&tasklist_lock);
2473 	cgroup_enter_frozen();
2474 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2475 		preempt_enable_no_resched();
2476 	schedule();
2477 	cgroup_leave_frozen(true);
2478 
2479 	/*
2480 	 * We are back.  Now reacquire the siglock before touching
2481 	 * last_siginfo, so that we are sure to have synchronized with
2482 	 * any signal-sending on another CPU that wants to examine it.
2483 	 */
2484 	spin_lock_irq(&current->sighand->siglock);
2485 	exit_code = current->exit_code;
2486 	current->last_siginfo = NULL;
2487 	current->ptrace_message = 0;
2488 	current->exit_code = 0;
2489 
2490 	/* LISTENING can be set only during STOP traps, clear it */
2491 	current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2492 
2493 	/*
2494 	 * Queued signals ignored us while we were stopped for tracing.
2495 	 * So check for any that we should take before resuming user mode.
2496 	 * This sets TIF_SIGPENDING, but never clears it.
2497 	 */
2498 	recalc_sigpending_tsk(current);
2499 	return exit_code;
2500 }
2501 
2502 static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2503 {
2504 	kernel_siginfo_t info;
2505 
2506 	clear_siginfo(&info);
2507 	info.si_signo = signr;
2508 	info.si_code = exit_code;
2509 	info.si_pid = task_pid_vnr(current);
2510 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2511 
2512 	/* Let the debugger run.  */
2513 	return ptrace_stop(exit_code, why, message, &info);
2514 }
2515 
2516 int ptrace_notify(int exit_code, unsigned long message)
2517 {
2518 	int signr;
2519 
2520 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2521 	if (unlikely(task_work_pending(current)))
2522 		task_work_run();
2523 
2524 	spin_lock_irq(&current->sighand->siglock);
2525 	signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2526 	spin_unlock_irq(&current->sighand->siglock);
2527 	return signr;
2528 }
2529 
2530 /**
2531  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2532  * @signr: signr causing group stop if initiating
2533  *
2534  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2535  * and participate in it.  If already set, participate in the existing
2536  * group stop.  If participated in a group stop (and thus slept), %true is
2537  * returned with siglock released.
2538  *
2539  * If ptraced, this function doesn't handle stop itself.  Instead,
2540  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2541  * untouched.  The caller must ensure that INTERRUPT trap handling takes
2542  * places afterwards.
2543  *
2544  * CONTEXT:
2545  * Must be called with @current->sighand->siglock held, which is released
2546  * on %true return.
2547  *
2548  * RETURNS:
2549  * %false if group stop is already cancelled or ptrace trap is scheduled.
2550  * %true if participated in group stop.
2551  */
2552 static bool do_signal_stop(int signr)
2553 	__releases(&current->sighand->siglock)
2554 {
2555 	struct signal_struct *sig = current->signal;
2556 
2557 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2558 		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2559 		struct task_struct *t;
2560 
2561 		/* signr will be recorded in task->jobctl for retries */
2562 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2563 
2564 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2565 		    unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2566 		    unlikely(sig->group_exec_task))
2567 			return false;
2568 		/*
2569 		 * There is no group stop already in progress.  We must
2570 		 * initiate one now.
2571 		 *
2572 		 * While ptraced, a task may be resumed while group stop is
2573 		 * still in effect and then receive a stop signal and
2574 		 * initiate another group stop.  This deviates from the
2575 		 * usual behavior as two consecutive stop signals can't
2576 		 * cause two group stops when !ptraced.  That is why we
2577 		 * also check !task_is_stopped(t) below.
2578 		 *
2579 		 * The condition can be distinguished by testing whether
2580 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2581 		 * group_exit_code in such case.
2582 		 *
2583 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2584 		 * an intervening stop signal is required to cause two
2585 		 * continued events regardless of ptrace.
2586 		 */
2587 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2588 			sig->group_exit_code = signr;
2589 
2590 		sig->group_stop_count = 0;
2591 		if (task_set_jobctl_pending(current, signr | gstop))
2592 			sig->group_stop_count++;
2593 
2594 		for_other_threads(current, t) {
2595 			/*
2596 			 * Setting state to TASK_STOPPED for a group
2597 			 * stop is always done with the siglock held,
2598 			 * so this check has no races.
2599 			 */
2600 			if (!task_is_stopped(t) &&
2601 			    task_set_jobctl_pending(t, signr | gstop)) {
2602 				sig->group_stop_count++;
2603 				if (likely(!(t->ptrace & PT_SEIZED)))
2604 					signal_wake_up(t, 0);
2605 				else
2606 					ptrace_trap_notify(t);
2607 			}
2608 		}
2609 	}
2610 
2611 	if (likely(!current->ptrace)) {
2612 		int notify = 0;
2613 
2614 		/*
2615 		 * If there are no other threads in the group, or if there
2616 		 * is a group stop in progress and we are the last to stop,
2617 		 * report to the parent.
2618 		 */
2619 		if (task_participate_group_stop(current))
2620 			notify = CLD_STOPPED;
2621 
2622 		current->jobctl |= JOBCTL_STOPPED;
2623 		set_special_state(TASK_STOPPED);
2624 		spin_unlock_irq(&current->sighand->siglock);
2625 
2626 		/*
2627 		 * Notify the parent of the group stop completion.  Because
2628 		 * we're not holding either the siglock or tasklist_lock
2629 		 * here, ptracer may attach inbetween; however, this is for
2630 		 * group stop and should always be delivered to the real
2631 		 * parent of the group leader.  The new ptracer will get
2632 		 * its notification when this task transitions into
2633 		 * TASK_TRACED.
2634 		 */
2635 		if (notify) {
2636 			read_lock(&tasklist_lock);
2637 			do_notify_parent_cldstop(current, false, notify);
2638 			read_unlock(&tasklist_lock);
2639 		}
2640 
2641 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2642 		cgroup_enter_frozen();
2643 		schedule();
2644 		return true;
2645 	} else {
2646 		/*
2647 		 * While ptraced, group stop is handled by STOP trap.
2648 		 * Schedule it and let the caller deal with it.
2649 		 */
2650 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2651 		return false;
2652 	}
2653 }
2654 
2655 /**
2656  * do_jobctl_trap - take care of ptrace jobctl traps
2657  *
2658  * When PT_SEIZED, it's used for both group stop and explicit
2659  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2660  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2661  * the stop signal; otherwise, %SIGTRAP.
2662  *
2663  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2664  * number as exit_code and no siginfo.
2665  *
2666  * CONTEXT:
2667  * Must be called with @current->sighand->siglock held, which may be
2668  * released and re-acquired before returning with intervening sleep.
2669  */
2670 static void do_jobctl_trap(void)
2671 {
2672 	struct signal_struct *signal = current->signal;
2673 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2674 
2675 	if (current->ptrace & PT_SEIZED) {
2676 		if (!signal->group_stop_count &&
2677 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2678 			signr = SIGTRAP;
2679 		WARN_ON_ONCE(!signr);
2680 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2681 				 CLD_STOPPED, 0);
2682 	} else {
2683 		WARN_ON_ONCE(!signr);
2684 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2685 	}
2686 }
2687 
2688 /**
2689  * do_freezer_trap - handle the freezer jobctl trap
2690  *
2691  * Puts the task into frozen state, if only the task is not about to quit.
2692  * In this case it drops JOBCTL_TRAP_FREEZE.
2693  *
2694  * CONTEXT:
2695  * Must be called with @current->sighand->siglock held,
2696  * which is always released before returning.
2697  */
2698 static void do_freezer_trap(void)
2699 	__releases(&current->sighand->siglock)
2700 {
2701 	/*
2702 	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2703 	 * let's make another loop to give it a chance to be handled.
2704 	 * In any case, we'll return back.
2705 	 */
2706 	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2707 	     JOBCTL_TRAP_FREEZE) {
2708 		spin_unlock_irq(&current->sighand->siglock);
2709 		return;
2710 	}
2711 
2712 	/*
2713 	 * Now we're sure that there is no pending fatal signal and no
2714 	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2715 	 * immediately (if there is a non-fatal signal pending), and
2716 	 * put the task into sleep.
2717 	 */
2718 	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2719 	clear_thread_flag(TIF_SIGPENDING);
2720 	spin_unlock_irq(&current->sighand->siglock);
2721 	cgroup_enter_frozen();
2722 	schedule();
2723 
2724 	/*
2725 	 * We could've been woken by task_work, run it to clear
2726 	 * TIF_NOTIFY_SIGNAL. The caller will retry if necessary.
2727 	 */
2728 	clear_notify_signal();
2729 	if (unlikely(task_work_pending(current)))
2730 		task_work_run();
2731 }
2732 
2733 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2734 {
2735 	/*
2736 	 * We do not check sig_kernel_stop(signr) but set this marker
2737 	 * unconditionally because we do not know whether debugger will
2738 	 * change signr. This flag has no meaning unless we are going
2739 	 * to stop after return from ptrace_stop(). In this case it will
2740 	 * be checked in do_signal_stop(), we should only stop if it was
2741 	 * not cleared by SIGCONT while we were sleeping. See also the
2742 	 * comment in dequeue_signal().
2743 	 */
2744 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2745 	signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2746 
2747 	/* We're back.  Did the debugger cancel the sig?  */
2748 	if (signr == 0)
2749 		return signr;
2750 
2751 	/*
2752 	 * Update the siginfo structure if the signal has
2753 	 * changed.  If the debugger wanted something
2754 	 * specific in the siginfo structure then it should
2755 	 * have updated *info via PTRACE_SETSIGINFO.
2756 	 */
2757 	if (signr != info->si_signo) {
2758 		clear_siginfo(info);
2759 		info->si_signo = signr;
2760 		info->si_errno = 0;
2761 		info->si_code = SI_USER;
2762 		rcu_read_lock();
2763 		info->si_pid = task_pid_vnr(current->parent);
2764 		info->si_uid = from_kuid_munged(current_user_ns(),
2765 						task_uid(current->parent));
2766 		rcu_read_unlock();
2767 	}
2768 
2769 	/* If the (new) signal is now blocked, requeue it.  */
2770 	if (sigismember(&current->blocked, signr) ||
2771 	    fatal_signal_pending(current)) {
2772 		send_signal_locked(signr, info, current, type);
2773 		signr = 0;
2774 	}
2775 
2776 	return signr;
2777 }
2778 
2779 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2780 {
2781 	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2782 	case SIL_FAULT:
2783 	case SIL_FAULT_TRAPNO:
2784 	case SIL_FAULT_MCEERR:
2785 	case SIL_FAULT_BNDERR:
2786 	case SIL_FAULT_PKUERR:
2787 	case SIL_FAULT_PERF_EVENT:
2788 		ksig->info.si_addr = arch_untagged_si_addr(
2789 			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2790 		break;
2791 	case SIL_KILL:
2792 	case SIL_TIMER:
2793 	case SIL_POLL:
2794 	case SIL_CHLD:
2795 	case SIL_RT:
2796 	case SIL_SYS:
2797 		break;
2798 	}
2799 }
2800 
2801 bool get_signal(struct ksignal *ksig)
2802 {
2803 	struct sighand_struct *sighand = current->sighand;
2804 	struct signal_struct *signal = current->signal;
2805 	int signr;
2806 
2807 	clear_notify_signal();
2808 	if (unlikely(task_work_pending(current)))
2809 		task_work_run();
2810 
2811 	if (!task_sigpending(current))
2812 		return false;
2813 
2814 	if (unlikely(uprobe_deny_signal()))
2815 		return false;
2816 
2817 	/*
2818 	 * Do this once, we can't return to user-mode if freezing() == T.
2819 	 * do_signal_stop() and ptrace_stop() set TASK_STOPPED/TASK_TRACED
2820 	 * and the freezer handles those states via TASK_FROZEN, thus they
2821 	 * do not need another check after return.
2822 	 */
2823 	try_to_freeze();
2824 
2825 relock:
2826 	spin_lock_irq(&sighand->siglock);
2827 
2828 	/*
2829 	 * Every stopped thread goes here after wakeup. Check to see if
2830 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2831 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2832 	 */
2833 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2834 		int why;
2835 
2836 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2837 			why = CLD_CONTINUED;
2838 		else
2839 			why = CLD_STOPPED;
2840 
2841 		signal->flags &= ~SIGNAL_CLD_MASK;
2842 
2843 		spin_unlock_irq(&sighand->siglock);
2844 
2845 		/*
2846 		 * Notify the parent that we're continuing.  This event is
2847 		 * always per-process and doesn't make whole lot of sense
2848 		 * for ptracers, who shouldn't consume the state via
2849 		 * wait(2) either, but, for backward compatibility, notify
2850 		 * the ptracer of the group leader too unless it's gonna be
2851 		 * a duplicate.
2852 		 */
2853 		read_lock(&tasklist_lock);
2854 		do_notify_parent_cldstop(current, false, why);
2855 
2856 		if (ptrace_reparented(current->group_leader))
2857 			do_notify_parent_cldstop(current->group_leader,
2858 						true, why);
2859 		read_unlock(&tasklist_lock);
2860 
2861 		goto relock;
2862 	}
2863 
2864 	for (;;) {
2865 		struct k_sigaction *ka;
2866 		enum pid_type type;
2867 
2868 		/* Has this task already been marked for death? */
2869 		if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2870 		     signal->group_exec_task) {
2871 			signr = SIGKILL;
2872 			sigdelset(&current->pending.signal, SIGKILL);
2873 			trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2874 					     &sighand->action[SIGKILL-1]);
2875 			recalc_sigpending();
2876 			/*
2877 			 * implies do_group_exit() or return to PF_USER_WORKER,
2878 			 * no need to initialize ksig->info/etc.
2879 			 */
2880 			goto fatal;
2881 		}
2882 
2883 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2884 		    do_signal_stop(0))
2885 			goto relock;
2886 
2887 		if (unlikely(current->jobctl &
2888 			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2889 			if (current->jobctl & JOBCTL_TRAP_MASK) {
2890 				do_jobctl_trap();
2891 				spin_unlock_irq(&sighand->siglock);
2892 			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2893 				do_freezer_trap();
2894 
2895 			goto relock;
2896 		}
2897 
2898 		/*
2899 		 * If the task is leaving the frozen state, let's update
2900 		 * cgroup counters and reset the frozen bit.
2901 		 */
2902 		if (unlikely(cgroup_task_frozen(current))) {
2903 			spin_unlock_irq(&sighand->siglock);
2904 			cgroup_leave_frozen(false);
2905 			goto relock;
2906 		}
2907 
2908 		/*
2909 		 * Signals generated by the execution of an instruction
2910 		 * need to be delivered before any other pending signals
2911 		 * so that the instruction pointer in the signal stack
2912 		 * frame points to the faulting instruction.
2913 		 */
2914 		type = PIDTYPE_PID;
2915 		signr = dequeue_synchronous_signal(&ksig->info);
2916 		if (!signr)
2917 			signr = dequeue_signal(&current->blocked, &ksig->info, &type);
2918 
2919 		if (!signr)
2920 			break; /* will return 0 */
2921 
2922 		if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2923 		    !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2924 			signr = ptrace_signal(signr, &ksig->info, type);
2925 			if (!signr)
2926 				continue;
2927 		}
2928 
2929 		ka = &sighand->action[signr-1];
2930 
2931 		/* Trace actually delivered signals. */
2932 		trace_signal_deliver(signr, &ksig->info, ka);
2933 
2934 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2935 			continue;
2936 		if (ka->sa.sa_handler != SIG_DFL) {
2937 			/* Run the handler.  */
2938 			ksig->ka = *ka;
2939 
2940 			if (ka->sa.sa_flags & SA_ONESHOT)
2941 				ka->sa.sa_handler = SIG_DFL;
2942 
2943 			break; /* will return non-zero "signr" value */
2944 		}
2945 
2946 		/*
2947 		 * Now we are doing the default action for this signal.
2948 		 */
2949 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2950 			continue;
2951 
2952 		/*
2953 		 * Global init gets no signals it doesn't want.
2954 		 * Container-init gets no signals it doesn't want from same
2955 		 * container.
2956 		 *
2957 		 * Note that if global/container-init sees a sig_kernel_only()
2958 		 * signal here, the signal must have been generated internally
2959 		 * or must have come from an ancestor namespace. In either
2960 		 * case, the signal cannot be dropped.
2961 		 */
2962 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2963 				!sig_kernel_only(signr))
2964 			continue;
2965 
2966 		if (sig_kernel_stop(signr)) {
2967 			/*
2968 			 * The default action is to stop all threads in
2969 			 * the thread group.  The job control signals
2970 			 * do nothing in an orphaned pgrp, but SIGSTOP
2971 			 * always works.  Note that siglock needs to be
2972 			 * dropped during the call to is_orphaned_pgrp()
2973 			 * because of lock ordering with tasklist_lock.
2974 			 * This allows an intervening SIGCONT to be posted.
2975 			 * We need to check for that and bail out if necessary.
2976 			 */
2977 			if (signr != SIGSTOP) {
2978 				spin_unlock_irq(&sighand->siglock);
2979 
2980 				/* signals can be posted during this window */
2981 
2982 				if (is_current_pgrp_orphaned())
2983 					goto relock;
2984 
2985 				spin_lock_irq(&sighand->siglock);
2986 			}
2987 
2988 			if (likely(do_signal_stop(signr))) {
2989 				/* It released the siglock.  */
2990 				goto relock;
2991 			}
2992 
2993 			/*
2994 			 * We didn't actually stop, due to a race
2995 			 * with SIGCONT or something like that.
2996 			 */
2997 			continue;
2998 		}
2999 
3000 	fatal:
3001 		spin_unlock_irq(&sighand->siglock);
3002 		if (unlikely(cgroup_task_frozen(current)))
3003 			cgroup_leave_frozen(true);
3004 
3005 		/*
3006 		 * Anything else is fatal, maybe with a core dump.
3007 		 */
3008 		current->flags |= PF_SIGNALED;
3009 
3010 		if (sig_kernel_coredump(signr)) {
3011 			if (print_fatal_signals)
3012 				print_fatal_signal(signr);
3013 			proc_coredump_connector(current);
3014 			/*
3015 			 * If it was able to dump core, this kills all
3016 			 * other threads in the group and synchronizes with
3017 			 * their demise.  If we lost the race with another
3018 			 * thread getting here, it set group_exit_code
3019 			 * first and our do_group_exit call below will use
3020 			 * that value and ignore the one we pass it.
3021 			 */
3022 			vfs_coredump(&ksig->info);
3023 		}
3024 
3025 		/*
3026 		 * PF_USER_WORKER threads will catch and exit on fatal signals
3027 		 * themselves. They have cleanup that must be performed, so we
3028 		 * cannot call do_exit() on their behalf. Note that ksig won't
3029 		 * be properly initialized, PF_USER_WORKER's shouldn't use it.
3030 		 */
3031 		if (current->flags & PF_USER_WORKER)
3032 			goto out;
3033 
3034 		/*
3035 		 * Death signals, no core dump.
3036 		 */
3037 		do_group_exit(signr);
3038 		/* NOTREACHED */
3039 	}
3040 	spin_unlock_irq(&sighand->siglock);
3041 
3042 	ksig->sig = signr;
3043 
3044 	if (signr && !(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
3045 		hide_si_addr_tag_bits(ksig);
3046 out:
3047 	return signr > 0;
3048 }
3049 
3050 /**
3051  * signal_delivered - called after signal delivery to update blocked signals
3052  * @ksig:		kernel signal struct
3053  * @stepping:		nonzero if debugger single-step or block-step in use
3054  *
3055  * This function should be called when a signal has successfully been
3056  * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
3057  * is always blocked), and the signal itself is blocked unless %SA_NODEFER
3058  * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
3059  */
3060 static void signal_delivered(struct ksignal *ksig, int stepping)
3061 {
3062 	sigset_t blocked;
3063 
3064 	/* A signal was successfully delivered, and the
3065 	   saved sigmask was stored on the signal frame,
3066 	   and will be restored by sigreturn.  So we can
3067 	   simply clear the restore sigmask flag.  */
3068 	clear_restore_sigmask();
3069 
3070 	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
3071 	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
3072 		sigaddset(&blocked, ksig->sig);
3073 	set_current_blocked(&blocked);
3074 	if (current->sas_ss_flags & SS_AUTODISARM)
3075 		sas_ss_reset(current);
3076 	if (stepping)
3077 		ptrace_notify(SIGTRAP, 0);
3078 }
3079 
3080 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
3081 {
3082 	if (failed)
3083 		force_sigsegv(ksig->sig);
3084 	else
3085 		signal_delivered(ksig, stepping);
3086 }
3087 
3088 /*
3089  * It could be that complete_signal() picked us to notify about the
3090  * group-wide signal. Other threads should be notified now to take
3091  * the shared signals in @which since we will not.
3092  */
3093 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
3094 {
3095 	sigset_t retarget;
3096 	struct task_struct *t;
3097 
3098 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
3099 	if (sigisemptyset(&retarget))
3100 		return;
3101 
3102 	for_other_threads(tsk, t) {
3103 		if (t->flags & PF_EXITING)
3104 			continue;
3105 
3106 		if (!has_pending_signals(&retarget, &t->blocked))
3107 			continue;
3108 		/* Remove the signals this thread can handle. */
3109 		sigandsets(&retarget, &retarget, &t->blocked);
3110 
3111 		if (!task_sigpending(t))
3112 			signal_wake_up(t, 0);
3113 
3114 		if (sigisemptyset(&retarget))
3115 			break;
3116 	}
3117 }
3118 
3119 void exit_signals(struct task_struct *tsk)
3120 {
3121 	int group_stop = 0;
3122 	sigset_t unblocked;
3123 
3124 	/*
3125 	 * @tsk is about to have PF_EXITING set - lock out users which
3126 	 * expect stable threadgroup.
3127 	 */
3128 	cgroup_threadgroup_change_begin(tsk);
3129 
3130 	if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
3131 		tsk->flags |= PF_EXITING;
3132 		cgroup_threadgroup_change_end(tsk);
3133 		return;
3134 	}
3135 
3136 	spin_lock_irq(&tsk->sighand->siglock);
3137 	/*
3138 	 * From now this task is not visible for group-wide signals,
3139 	 * see wants_signal(), do_signal_stop().
3140 	 */
3141 	tsk->flags |= PF_EXITING;
3142 
3143 	cgroup_threadgroup_change_end(tsk);
3144 
3145 	if (!task_sigpending(tsk))
3146 		goto out;
3147 
3148 	unblocked = tsk->blocked;
3149 	signotset(&unblocked);
3150 	retarget_shared_pending(tsk, &unblocked);
3151 
3152 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3153 	    task_participate_group_stop(tsk))
3154 		group_stop = CLD_STOPPED;
3155 out:
3156 	spin_unlock_irq(&tsk->sighand->siglock);
3157 
3158 	/*
3159 	 * If group stop has completed, deliver the notification.  This
3160 	 * should always go to the real parent of the group leader.
3161 	 */
3162 	if (unlikely(group_stop)) {
3163 		read_lock(&tasklist_lock);
3164 		do_notify_parent_cldstop(tsk, false, group_stop);
3165 		read_unlock(&tasklist_lock);
3166 	}
3167 }
3168 
3169 /*
3170  * System call entry points.
3171  */
3172 
3173 /**
3174  *  sys_restart_syscall - restart a system call
3175  */
3176 SYSCALL_DEFINE0(restart_syscall)
3177 {
3178 	struct restart_block *restart = &current->restart_block;
3179 	return restart->fn(restart);
3180 }
3181 
3182 long do_no_restart_syscall(struct restart_block *param)
3183 {
3184 	return -EINTR;
3185 }
3186 
3187 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3188 {
3189 	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3190 		sigset_t newblocked;
3191 		/* A set of now blocked but previously unblocked signals. */
3192 		sigandnsets(&newblocked, newset, &current->blocked);
3193 		retarget_shared_pending(tsk, &newblocked);
3194 	}
3195 	tsk->blocked = *newset;
3196 	recalc_sigpending();
3197 }
3198 
3199 /**
3200  * set_current_blocked - change current->blocked mask
3201  * @newset: new mask
3202  *
3203  * It is wrong to change ->blocked directly, this helper should be used
3204  * to ensure the process can't miss a shared signal we are going to block.
3205  */
3206 void set_current_blocked(sigset_t *newset)
3207 {
3208 	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3209 	__set_current_blocked(newset);
3210 }
3211 
3212 void __set_current_blocked(const sigset_t *newset)
3213 {
3214 	struct task_struct *tsk = current;
3215 
3216 	/*
3217 	 * In case the signal mask hasn't changed, there is nothing we need
3218 	 * to do. The current->blocked shouldn't be modified by other task.
3219 	 */
3220 	if (sigequalsets(&tsk->blocked, newset))
3221 		return;
3222 
3223 	spin_lock_irq(&tsk->sighand->siglock);
3224 	__set_task_blocked(tsk, newset);
3225 	spin_unlock_irq(&tsk->sighand->siglock);
3226 }
3227 
3228 /*
3229  * This is also useful for kernel threads that want to temporarily
3230  * (or permanently) block certain signals.
3231  *
3232  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3233  * interface happily blocks "unblockable" signals like SIGKILL
3234  * and friends.
3235  */
3236 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3237 {
3238 	struct task_struct *tsk = current;
3239 	sigset_t newset;
3240 
3241 	/* Lockless, only current can change ->blocked, never from irq */
3242 	if (oldset)
3243 		*oldset = tsk->blocked;
3244 
3245 	switch (how) {
3246 	case SIG_BLOCK:
3247 		sigorsets(&newset, &tsk->blocked, set);
3248 		break;
3249 	case SIG_UNBLOCK:
3250 		sigandnsets(&newset, &tsk->blocked, set);
3251 		break;
3252 	case SIG_SETMASK:
3253 		newset = *set;
3254 		break;
3255 	default:
3256 		return -EINVAL;
3257 	}
3258 
3259 	__set_current_blocked(&newset);
3260 	return 0;
3261 }
3262 EXPORT_SYMBOL(sigprocmask);
3263 
3264 /*
3265  * The api helps set app-provided sigmasks.
3266  *
3267  * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3268  * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3269  *
3270  * Note that it does set_restore_sigmask() in advance, so it must be always
3271  * paired with restore_saved_sigmask_unless() before return from syscall.
3272  */
3273 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3274 {
3275 	sigset_t kmask;
3276 
3277 	if (!umask)
3278 		return 0;
3279 	if (sigsetsize != sizeof(sigset_t))
3280 		return -EINVAL;
3281 	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3282 		return -EFAULT;
3283 
3284 	set_restore_sigmask();
3285 	current->saved_sigmask = current->blocked;
3286 	set_current_blocked(&kmask);
3287 
3288 	return 0;
3289 }
3290 
3291 #ifdef CONFIG_COMPAT
3292 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3293 			    size_t sigsetsize)
3294 {
3295 	sigset_t kmask;
3296 
3297 	if (!umask)
3298 		return 0;
3299 	if (sigsetsize != sizeof(compat_sigset_t))
3300 		return -EINVAL;
3301 	if (get_compat_sigset(&kmask, umask))
3302 		return -EFAULT;
3303 
3304 	set_restore_sigmask();
3305 	current->saved_sigmask = current->blocked;
3306 	set_current_blocked(&kmask);
3307 
3308 	return 0;
3309 }
3310 #endif
3311 
3312 /**
3313  *  sys_rt_sigprocmask - change the list of currently blocked signals
3314  *  @how: whether to add, remove, or set signals
3315  *  @nset: stores pending signals
3316  *  @oset: previous value of signal mask if non-null
3317  *  @sigsetsize: size of sigset_t type
3318  */
3319 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3320 		sigset_t __user *, oset, size_t, sigsetsize)
3321 {
3322 	sigset_t old_set, new_set;
3323 	int error;
3324 
3325 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3326 	if (sigsetsize != sizeof(sigset_t))
3327 		return -EINVAL;
3328 
3329 	old_set = current->blocked;
3330 
3331 	if (nset) {
3332 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3333 			return -EFAULT;
3334 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3335 
3336 		error = sigprocmask(how, &new_set, NULL);
3337 		if (error)
3338 			return error;
3339 	}
3340 
3341 	if (oset) {
3342 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3343 			return -EFAULT;
3344 	}
3345 
3346 	return 0;
3347 }
3348 
3349 #ifdef CONFIG_COMPAT
3350 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3351 		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3352 {
3353 	sigset_t old_set = current->blocked;
3354 
3355 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3356 	if (sigsetsize != sizeof(sigset_t))
3357 		return -EINVAL;
3358 
3359 	if (nset) {
3360 		sigset_t new_set;
3361 		int error;
3362 		if (get_compat_sigset(&new_set, nset))
3363 			return -EFAULT;
3364 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3365 
3366 		error = sigprocmask(how, &new_set, NULL);
3367 		if (error)
3368 			return error;
3369 	}
3370 	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3371 }
3372 #endif
3373 
3374 static void do_sigpending(sigset_t *set)
3375 {
3376 	spin_lock_irq(&current->sighand->siglock);
3377 	sigorsets(set, &current->pending.signal,
3378 		  &current->signal->shared_pending.signal);
3379 	spin_unlock_irq(&current->sighand->siglock);
3380 
3381 	/* Outside the lock because only this thread touches it.  */
3382 	sigandsets(set, &current->blocked, set);
3383 }
3384 
3385 /**
3386  *  sys_rt_sigpending - examine a pending signal that has been raised
3387  *			while blocked
3388  *  @uset: stores pending signals
3389  *  @sigsetsize: size of sigset_t type or larger
3390  */
3391 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3392 {
3393 	sigset_t set;
3394 
3395 	if (sigsetsize > sizeof(*uset))
3396 		return -EINVAL;
3397 
3398 	do_sigpending(&set);
3399 
3400 	if (copy_to_user(uset, &set, sigsetsize))
3401 		return -EFAULT;
3402 
3403 	return 0;
3404 }
3405 
3406 #ifdef CONFIG_COMPAT
3407 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3408 		compat_size_t, sigsetsize)
3409 {
3410 	sigset_t set;
3411 
3412 	if (sigsetsize > sizeof(*uset))
3413 		return -EINVAL;
3414 
3415 	do_sigpending(&set);
3416 
3417 	return put_compat_sigset(uset, &set, sigsetsize);
3418 }
3419 #endif
3420 
3421 static const struct {
3422 	unsigned char limit, layout;
3423 } sig_sicodes[] = {
3424 	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3425 	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3426 	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3427 	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3428 	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3429 #if defined(SIGEMT)
3430 	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3431 #endif
3432 	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3433 	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3434 	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3435 };
3436 
3437 static bool known_siginfo_layout(unsigned sig, int si_code)
3438 {
3439 	if (si_code == SI_KERNEL)
3440 		return true;
3441 	else if ((si_code > SI_USER)) {
3442 		if (sig_specific_sicodes(sig)) {
3443 			if (si_code <= sig_sicodes[sig].limit)
3444 				return true;
3445 		}
3446 		else if (si_code <= NSIGPOLL)
3447 			return true;
3448 	}
3449 	else if (si_code >= SI_DETHREAD)
3450 		return true;
3451 	else if (si_code == SI_ASYNCNL)
3452 		return true;
3453 	return false;
3454 }
3455 
3456 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3457 {
3458 	enum siginfo_layout layout = SIL_KILL;
3459 	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3460 		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3461 		    (si_code <= sig_sicodes[sig].limit)) {
3462 			layout = sig_sicodes[sig].layout;
3463 			/* Handle the exceptions */
3464 			if ((sig == SIGBUS) &&
3465 			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3466 				layout = SIL_FAULT_MCEERR;
3467 			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3468 				layout = SIL_FAULT_BNDERR;
3469 #ifdef SEGV_PKUERR
3470 			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3471 				layout = SIL_FAULT_PKUERR;
3472 #endif
3473 			else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3474 				layout = SIL_FAULT_PERF_EVENT;
3475 			else if (IS_ENABLED(CONFIG_SPARC) &&
3476 				 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3477 				layout = SIL_FAULT_TRAPNO;
3478 			else if (IS_ENABLED(CONFIG_ALPHA) &&
3479 				 ((sig == SIGFPE) ||
3480 				  ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3481 				layout = SIL_FAULT_TRAPNO;
3482 		}
3483 		else if (si_code <= NSIGPOLL)
3484 			layout = SIL_POLL;
3485 	} else {
3486 		if (si_code == SI_TIMER)
3487 			layout = SIL_TIMER;
3488 		else if (si_code == SI_SIGIO)
3489 			layout = SIL_POLL;
3490 		else if (si_code < 0)
3491 			layout = SIL_RT;
3492 	}
3493 	return layout;
3494 }
3495 
3496 static inline char __user *si_expansion(const siginfo_t __user *info)
3497 {
3498 	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3499 }
3500 
3501 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3502 {
3503 	char __user *expansion = si_expansion(to);
3504 	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3505 		return -EFAULT;
3506 	if (clear_user(expansion, SI_EXPANSION_SIZE))
3507 		return -EFAULT;
3508 	return 0;
3509 }
3510 
3511 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3512 				       const siginfo_t __user *from)
3513 {
3514 	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3515 		char __user *expansion = si_expansion(from);
3516 		char buf[SI_EXPANSION_SIZE];
3517 		int i;
3518 		/*
3519 		 * An unknown si_code might need more than
3520 		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3521 		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3522 		 * will return this data to userspace exactly.
3523 		 */
3524 		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3525 			return -EFAULT;
3526 		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3527 			if (buf[i] != 0)
3528 				return -E2BIG;
3529 		}
3530 	}
3531 	return 0;
3532 }
3533 
3534 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3535 				    const siginfo_t __user *from)
3536 {
3537 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3538 		return -EFAULT;
3539 	to->si_signo = signo;
3540 	return post_copy_siginfo_from_user(to, from);
3541 }
3542 
3543 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3544 {
3545 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3546 		return -EFAULT;
3547 	return post_copy_siginfo_from_user(to, from);
3548 }
3549 
3550 #ifdef CONFIG_COMPAT
3551 /**
3552  * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3553  * @to: compat siginfo destination
3554  * @from: kernel siginfo source
3555  *
3556  * Note: This function does not work properly for the SIGCHLD on x32, but
3557  * fortunately it doesn't have to.  The only valid callers for this function are
3558  * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3559  * The latter does not care because SIGCHLD will never cause a coredump.
3560  */
3561 void copy_siginfo_to_external32(struct compat_siginfo *to,
3562 		const struct kernel_siginfo *from)
3563 {
3564 	memset(to, 0, sizeof(*to));
3565 
3566 	to->si_signo = from->si_signo;
3567 	to->si_errno = from->si_errno;
3568 	to->si_code  = from->si_code;
3569 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3570 	case SIL_KILL:
3571 		to->si_pid = from->si_pid;
3572 		to->si_uid = from->si_uid;
3573 		break;
3574 	case SIL_TIMER:
3575 		to->si_tid     = from->si_tid;
3576 		to->si_overrun = from->si_overrun;
3577 		to->si_int     = from->si_int;
3578 		break;
3579 	case SIL_POLL:
3580 		to->si_band = from->si_band;
3581 		to->si_fd   = from->si_fd;
3582 		break;
3583 	case SIL_FAULT:
3584 		to->si_addr = ptr_to_compat(from->si_addr);
3585 		break;
3586 	case SIL_FAULT_TRAPNO:
3587 		to->si_addr = ptr_to_compat(from->si_addr);
3588 		to->si_trapno = from->si_trapno;
3589 		break;
3590 	case SIL_FAULT_MCEERR:
3591 		to->si_addr = ptr_to_compat(from->si_addr);
3592 		to->si_addr_lsb = from->si_addr_lsb;
3593 		break;
3594 	case SIL_FAULT_BNDERR:
3595 		to->si_addr = ptr_to_compat(from->si_addr);
3596 		to->si_lower = ptr_to_compat(from->si_lower);
3597 		to->si_upper = ptr_to_compat(from->si_upper);
3598 		break;
3599 	case SIL_FAULT_PKUERR:
3600 		to->si_addr = ptr_to_compat(from->si_addr);
3601 		to->si_pkey = from->si_pkey;
3602 		break;
3603 	case SIL_FAULT_PERF_EVENT:
3604 		to->si_addr = ptr_to_compat(from->si_addr);
3605 		to->si_perf_data = from->si_perf_data;
3606 		to->si_perf_type = from->si_perf_type;
3607 		to->si_perf_flags = from->si_perf_flags;
3608 		break;
3609 	case SIL_CHLD:
3610 		to->si_pid = from->si_pid;
3611 		to->si_uid = from->si_uid;
3612 		to->si_status = from->si_status;
3613 		to->si_utime = from->si_utime;
3614 		to->si_stime = from->si_stime;
3615 		break;
3616 	case SIL_RT:
3617 		to->si_pid = from->si_pid;
3618 		to->si_uid = from->si_uid;
3619 		to->si_int = from->si_int;
3620 		break;
3621 	case SIL_SYS:
3622 		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3623 		to->si_syscall   = from->si_syscall;
3624 		to->si_arch      = from->si_arch;
3625 		break;
3626 	}
3627 }
3628 
3629 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3630 			   const struct kernel_siginfo *from)
3631 {
3632 	struct compat_siginfo new;
3633 
3634 	copy_siginfo_to_external32(&new, from);
3635 	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3636 		return -EFAULT;
3637 	return 0;
3638 }
3639 
3640 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3641 					 const struct compat_siginfo *from)
3642 {
3643 	clear_siginfo(to);
3644 	to->si_signo = from->si_signo;
3645 	to->si_errno = from->si_errno;
3646 	to->si_code  = from->si_code;
3647 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3648 	case SIL_KILL:
3649 		to->si_pid = from->si_pid;
3650 		to->si_uid = from->si_uid;
3651 		break;
3652 	case SIL_TIMER:
3653 		to->si_tid     = from->si_tid;
3654 		to->si_overrun = from->si_overrun;
3655 		to->si_int     = from->si_int;
3656 		break;
3657 	case SIL_POLL:
3658 		to->si_band = from->si_band;
3659 		to->si_fd   = from->si_fd;
3660 		break;
3661 	case SIL_FAULT:
3662 		to->si_addr = compat_ptr(from->si_addr);
3663 		break;
3664 	case SIL_FAULT_TRAPNO:
3665 		to->si_addr = compat_ptr(from->si_addr);
3666 		to->si_trapno = from->si_trapno;
3667 		break;
3668 	case SIL_FAULT_MCEERR:
3669 		to->si_addr = compat_ptr(from->si_addr);
3670 		to->si_addr_lsb = from->si_addr_lsb;
3671 		break;
3672 	case SIL_FAULT_BNDERR:
3673 		to->si_addr = compat_ptr(from->si_addr);
3674 		to->si_lower = compat_ptr(from->si_lower);
3675 		to->si_upper = compat_ptr(from->si_upper);
3676 		break;
3677 	case SIL_FAULT_PKUERR:
3678 		to->si_addr = compat_ptr(from->si_addr);
3679 		to->si_pkey = from->si_pkey;
3680 		break;
3681 	case SIL_FAULT_PERF_EVENT:
3682 		to->si_addr = compat_ptr(from->si_addr);
3683 		to->si_perf_data = from->si_perf_data;
3684 		to->si_perf_type = from->si_perf_type;
3685 		to->si_perf_flags = from->si_perf_flags;
3686 		break;
3687 	case SIL_CHLD:
3688 		to->si_pid    = from->si_pid;
3689 		to->si_uid    = from->si_uid;
3690 		to->si_status = from->si_status;
3691 #ifdef CONFIG_X86_X32_ABI
3692 		if (in_x32_syscall()) {
3693 			to->si_utime = from->_sifields._sigchld_x32._utime;
3694 			to->si_stime = from->_sifields._sigchld_x32._stime;
3695 		} else
3696 #endif
3697 		{
3698 			to->si_utime = from->si_utime;
3699 			to->si_stime = from->si_stime;
3700 		}
3701 		break;
3702 	case SIL_RT:
3703 		to->si_pid = from->si_pid;
3704 		to->si_uid = from->si_uid;
3705 		to->si_int = from->si_int;
3706 		break;
3707 	case SIL_SYS:
3708 		to->si_call_addr = compat_ptr(from->si_call_addr);
3709 		to->si_syscall   = from->si_syscall;
3710 		to->si_arch      = from->si_arch;
3711 		break;
3712 	}
3713 	return 0;
3714 }
3715 
3716 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3717 				      const struct compat_siginfo __user *ufrom)
3718 {
3719 	struct compat_siginfo from;
3720 
3721 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3722 		return -EFAULT;
3723 
3724 	from.si_signo = signo;
3725 	return post_copy_siginfo_from_user32(to, &from);
3726 }
3727 
3728 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3729 			     const struct compat_siginfo __user *ufrom)
3730 {
3731 	struct compat_siginfo from;
3732 
3733 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3734 		return -EFAULT;
3735 
3736 	return post_copy_siginfo_from_user32(to, &from);
3737 }
3738 #endif /* CONFIG_COMPAT */
3739 
3740 /**
3741  *  do_sigtimedwait - wait for queued signals specified in @which
3742  *  @which: queued signals to wait for
3743  *  @info: if non-null, the signal's siginfo is returned here
3744  *  @ts: upper bound on process time suspension
3745  */
3746 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3747 		    const struct timespec64 *ts)
3748 {
3749 	ktime_t *to = NULL, timeout = KTIME_MAX;
3750 	struct task_struct *tsk = current;
3751 	sigset_t mask = *which;
3752 	enum pid_type type;
3753 	int sig, ret = 0;
3754 
3755 	if (ts) {
3756 		if (!timespec64_valid(ts))
3757 			return -EINVAL;
3758 		timeout = timespec64_to_ktime(*ts);
3759 		to = &timeout;
3760 	}
3761 
3762 	/*
3763 	 * Invert the set of allowed signals to get those we want to block.
3764 	 */
3765 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3766 	signotset(&mask);
3767 
3768 	spin_lock_irq(&tsk->sighand->siglock);
3769 	sig = dequeue_signal(&mask, info, &type);
3770 	if (!sig && timeout) {
3771 		/*
3772 		 * None ready, temporarily unblock those we're interested
3773 		 * while we are sleeping in so that we'll be awakened when
3774 		 * they arrive. Unblocking is always fine, we can avoid
3775 		 * set_current_blocked().
3776 		 */
3777 		tsk->real_blocked = tsk->blocked;
3778 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3779 		recalc_sigpending();
3780 		spin_unlock_irq(&tsk->sighand->siglock);
3781 
3782 		__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3783 		ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3784 					       HRTIMER_MODE_REL);
3785 		spin_lock_irq(&tsk->sighand->siglock);
3786 		__set_task_blocked(tsk, &tsk->real_blocked);
3787 		sigemptyset(&tsk->real_blocked);
3788 		sig = dequeue_signal(&mask, info, &type);
3789 	}
3790 	spin_unlock_irq(&tsk->sighand->siglock);
3791 
3792 	if (sig)
3793 		return sig;
3794 	return ret ? -EINTR : -EAGAIN;
3795 }
3796 
3797 /**
3798  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3799  *			in @uthese
3800  *  @uthese: queued signals to wait for
3801  *  @uinfo: if non-null, the signal's siginfo is returned here
3802  *  @uts: upper bound on process time suspension
3803  *  @sigsetsize: size of sigset_t type
3804  */
3805 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3806 		siginfo_t __user *, uinfo,
3807 		const struct __kernel_timespec __user *, uts,
3808 		size_t, sigsetsize)
3809 {
3810 	sigset_t these;
3811 	struct timespec64 ts;
3812 	kernel_siginfo_t info;
3813 	int ret;
3814 
3815 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3816 	if (sigsetsize != sizeof(sigset_t))
3817 		return -EINVAL;
3818 
3819 	if (copy_from_user(&these, uthese, sizeof(these)))
3820 		return -EFAULT;
3821 
3822 	if (uts) {
3823 		if (get_timespec64(&ts, uts))
3824 			return -EFAULT;
3825 	}
3826 
3827 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3828 
3829 	if (ret > 0 && uinfo) {
3830 		if (copy_siginfo_to_user(uinfo, &info))
3831 			ret = -EFAULT;
3832 	}
3833 
3834 	return ret;
3835 }
3836 
3837 #ifdef CONFIG_COMPAT_32BIT_TIME
3838 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3839 		siginfo_t __user *, uinfo,
3840 		const struct old_timespec32 __user *, uts,
3841 		size_t, sigsetsize)
3842 {
3843 	sigset_t these;
3844 	struct timespec64 ts;
3845 	kernel_siginfo_t info;
3846 	int ret;
3847 
3848 	if (sigsetsize != sizeof(sigset_t))
3849 		return -EINVAL;
3850 
3851 	if (copy_from_user(&these, uthese, sizeof(these)))
3852 		return -EFAULT;
3853 
3854 	if (uts) {
3855 		if (get_old_timespec32(&ts, uts))
3856 			return -EFAULT;
3857 	}
3858 
3859 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3860 
3861 	if (ret > 0 && uinfo) {
3862 		if (copy_siginfo_to_user(uinfo, &info))
3863 			ret = -EFAULT;
3864 	}
3865 
3866 	return ret;
3867 }
3868 #endif
3869 
3870 #ifdef CONFIG_COMPAT
3871 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3872 		struct compat_siginfo __user *, uinfo,
3873 		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3874 {
3875 	sigset_t s;
3876 	struct timespec64 t;
3877 	kernel_siginfo_t info;
3878 	long ret;
3879 
3880 	if (sigsetsize != sizeof(sigset_t))
3881 		return -EINVAL;
3882 
3883 	if (get_compat_sigset(&s, uthese))
3884 		return -EFAULT;
3885 
3886 	if (uts) {
3887 		if (get_timespec64(&t, uts))
3888 			return -EFAULT;
3889 	}
3890 
3891 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3892 
3893 	if (ret > 0 && uinfo) {
3894 		if (copy_siginfo_to_user32(uinfo, &info))
3895 			ret = -EFAULT;
3896 	}
3897 
3898 	return ret;
3899 }
3900 
3901 #ifdef CONFIG_COMPAT_32BIT_TIME
3902 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3903 		struct compat_siginfo __user *, uinfo,
3904 		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3905 {
3906 	sigset_t s;
3907 	struct timespec64 t;
3908 	kernel_siginfo_t info;
3909 	long ret;
3910 
3911 	if (sigsetsize != sizeof(sigset_t))
3912 		return -EINVAL;
3913 
3914 	if (get_compat_sigset(&s, uthese))
3915 		return -EFAULT;
3916 
3917 	if (uts) {
3918 		if (get_old_timespec32(&t, uts))
3919 			return -EFAULT;
3920 	}
3921 
3922 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3923 
3924 	if (ret > 0 && uinfo) {
3925 		if (copy_siginfo_to_user32(uinfo, &info))
3926 			ret = -EFAULT;
3927 	}
3928 
3929 	return ret;
3930 }
3931 #endif
3932 #endif
3933 
3934 static void prepare_kill_siginfo(int sig, struct kernel_siginfo *info,
3935 				 enum pid_type type)
3936 {
3937 	clear_siginfo(info);
3938 	info->si_signo = sig;
3939 	info->si_errno = 0;
3940 	info->si_code = (type == PIDTYPE_PID) ? SI_TKILL : SI_USER;
3941 	info->si_pid = task_tgid_vnr(current);
3942 	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3943 }
3944 
3945 /**
3946  *  sys_kill - send a signal to a process
3947  *  @pid: the PID of the process
3948  *  @sig: signal to be sent
3949  */
3950 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3951 {
3952 	struct kernel_siginfo info;
3953 
3954 	prepare_kill_siginfo(sig, &info, PIDTYPE_TGID);
3955 
3956 	return kill_something_info(sig, &info, pid);
3957 }
3958 
3959 /*
3960  * Verify that the signaler and signalee either are in the same pid namespace
3961  * or that the signaler's pid namespace is an ancestor of the signalee's pid
3962  * namespace.
3963  */
3964 static bool access_pidfd_pidns(struct pid *pid)
3965 {
3966 	struct pid_namespace *active = task_active_pid_ns(current);
3967 	struct pid_namespace *p = ns_of_pid(pid);
3968 
3969 	for (;;) {
3970 		if (!p)
3971 			return false;
3972 		if (p == active)
3973 			break;
3974 		p = p->parent;
3975 	}
3976 
3977 	return true;
3978 }
3979 
3980 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3981 		siginfo_t __user *info)
3982 {
3983 #ifdef CONFIG_COMPAT
3984 	/*
3985 	 * Avoid hooking up compat syscalls and instead handle necessary
3986 	 * conversions here. Note, this is a stop-gap measure and should not be
3987 	 * considered a generic solution.
3988 	 */
3989 	if (in_compat_syscall())
3990 		return copy_siginfo_from_user32(
3991 			kinfo, (struct compat_siginfo __user *)info);
3992 #endif
3993 	return copy_siginfo_from_user(kinfo, info);
3994 }
3995 
3996 static struct pid *pidfd_to_pid(const struct file *file)
3997 {
3998 	struct pid *pid;
3999 
4000 	pid = pidfd_pid(file);
4001 	if (!IS_ERR(pid))
4002 		return pid;
4003 
4004 	return tgid_pidfd_to_pid(file);
4005 }
4006 
4007 #define PIDFD_SEND_SIGNAL_FLAGS                            \
4008 	(PIDFD_SIGNAL_THREAD | PIDFD_SIGNAL_THREAD_GROUP | \
4009 	 PIDFD_SIGNAL_PROCESS_GROUP)
4010 
4011 static int do_pidfd_send_signal(struct pid *pid, int sig, enum pid_type type,
4012 				siginfo_t __user *info, unsigned int flags)
4013 {
4014 	kernel_siginfo_t kinfo;
4015 
4016 	switch (flags) {
4017 	case PIDFD_SIGNAL_THREAD:
4018 		type = PIDTYPE_PID;
4019 		break;
4020 	case PIDFD_SIGNAL_THREAD_GROUP:
4021 		type = PIDTYPE_TGID;
4022 		break;
4023 	case PIDFD_SIGNAL_PROCESS_GROUP:
4024 		type = PIDTYPE_PGID;
4025 		break;
4026 	}
4027 
4028 	if (info) {
4029 		int ret;
4030 
4031 		ret = copy_siginfo_from_user_any(&kinfo, info);
4032 		if (unlikely(ret))
4033 			return ret;
4034 
4035 		if (unlikely(sig != kinfo.si_signo))
4036 			return -EINVAL;
4037 
4038 		/* Only allow sending arbitrary signals to yourself. */
4039 		if ((task_pid(current) != pid || type > PIDTYPE_TGID) &&
4040 		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
4041 			return -EPERM;
4042 	} else {
4043 		prepare_kill_siginfo(sig, &kinfo, type);
4044 	}
4045 
4046 	if (type == PIDTYPE_PGID)
4047 		return kill_pgrp_info(sig, &kinfo, pid);
4048 
4049 	return kill_pid_info_type(sig, &kinfo, pid, type);
4050 }
4051 
4052 /**
4053  * sys_pidfd_send_signal - Signal a process through a pidfd
4054  * @pidfd:  file descriptor of the process
4055  * @sig:    signal to send
4056  * @info:   signal info
4057  * @flags:  future flags
4058  *
4059  * Send the signal to the thread group or to the individual thread depending
4060  * on PIDFD_THREAD.
4061  * In the future extension to @flags may be used to override the default scope
4062  * of @pidfd.
4063  *
4064  * Return: 0 on success, negative errno on failure
4065  */
4066 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
4067 		siginfo_t __user *, info, unsigned int, flags)
4068 {
4069 	struct pid *pid;
4070 	enum pid_type type;
4071 	int ret;
4072 
4073 	/* Enforce flags be set to 0 until we add an extension. */
4074 	if (flags & ~PIDFD_SEND_SIGNAL_FLAGS)
4075 		return -EINVAL;
4076 
4077 	/* Ensure that only a single signal scope determining flag is set. */
4078 	if (hweight32(flags & PIDFD_SEND_SIGNAL_FLAGS) > 1)
4079 		return -EINVAL;
4080 
4081 	switch (pidfd) {
4082 	case PIDFD_SELF_THREAD:
4083 		pid = get_task_pid(current, PIDTYPE_PID);
4084 		type = PIDTYPE_PID;
4085 		break;
4086 	case PIDFD_SELF_THREAD_GROUP:
4087 		pid = get_task_pid(current, PIDTYPE_TGID);
4088 		type = PIDTYPE_TGID;
4089 		break;
4090 	default: {
4091 		CLASS(fd, f)(pidfd);
4092 		if (fd_empty(f))
4093 			return -EBADF;
4094 
4095 		/* Is this a pidfd? */
4096 		pid = pidfd_to_pid(fd_file(f));
4097 		if (IS_ERR(pid))
4098 			return PTR_ERR(pid);
4099 
4100 		if (!access_pidfd_pidns(pid))
4101 			return -EINVAL;
4102 
4103 		/* Infer scope from the type of pidfd. */
4104 		if (fd_file(f)->f_flags & PIDFD_THREAD)
4105 			type = PIDTYPE_PID;
4106 		else
4107 			type = PIDTYPE_TGID;
4108 
4109 		return do_pidfd_send_signal(pid, sig, type, info, flags);
4110 	}
4111 	}
4112 
4113 	ret = do_pidfd_send_signal(pid, sig, type, info, flags);
4114 	put_pid(pid);
4115 
4116 	return ret;
4117 }
4118 
4119 static int
4120 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
4121 {
4122 	struct task_struct *p;
4123 	int error = -ESRCH;
4124 
4125 	rcu_read_lock();
4126 	p = find_task_by_vpid(pid);
4127 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
4128 		error = check_kill_permission(sig, info, p);
4129 		/*
4130 		 * The null signal is a permissions and process existence
4131 		 * probe.  No signal is actually delivered.
4132 		 */
4133 		if (!error && sig) {
4134 			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
4135 			/*
4136 			 * If lock_task_sighand() failed we pretend the task
4137 			 * dies after receiving the signal. The window is tiny,
4138 			 * and the signal is private anyway.
4139 			 */
4140 			if (unlikely(error == -ESRCH))
4141 				error = 0;
4142 		}
4143 	}
4144 	rcu_read_unlock();
4145 
4146 	return error;
4147 }
4148 
4149 static int do_tkill(pid_t tgid, pid_t pid, int sig)
4150 {
4151 	struct kernel_siginfo info;
4152 
4153 	prepare_kill_siginfo(sig, &info, PIDTYPE_PID);
4154 
4155 	return do_send_specific(tgid, pid, sig, &info);
4156 }
4157 
4158 /**
4159  *  sys_tgkill - send signal to one specific thread
4160  *  @tgid: the thread group ID of the thread
4161  *  @pid: the PID of the thread
4162  *  @sig: signal to be sent
4163  *
4164  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
4165  *  exists but it's not belonging to the target process anymore. This
4166  *  method solves the problem of threads exiting and PIDs getting reused.
4167  */
4168 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
4169 {
4170 	/* This is only valid for single tasks */
4171 	if (pid <= 0 || tgid <= 0)
4172 		return -EINVAL;
4173 
4174 	return do_tkill(tgid, pid, sig);
4175 }
4176 
4177 /**
4178  *  sys_tkill - send signal to one specific task
4179  *  @pid: the PID of the task
4180  *  @sig: signal to be sent
4181  *
4182  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
4183  */
4184 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4185 {
4186 	/* This is only valid for single tasks */
4187 	if (pid <= 0)
4188 		return -EINVAL;
4189 
4190 	return do_tkill(0, pid, sig);
4191 }
4192 
4193 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4194 {
4195 	/* Not even root can pretend to send signals from the kernel.
4196 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4197 	 */
4198 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4199 	    (task_pid_vnr(current) != pid))
4200 		return -EPERM;
4201 
4202 	/* POSIX.1b doesn't mention process groups.  */
4203 	return kill_proc_info(sig, info, pid);
4204 }
4205 
4206 /**
4207  *  sys_rt_sigqueueinfo - send signal information to a signal
4208  *  @pid: the PID of the thread
4209  *  @sig: signal to be sent
4210  *  @uinfo: signal info to be sent
4211  */
4212 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4213 		siginfo_t __user *, uinfo)
4214 {
4215 	kernel_siginfo_t info;
4216 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4217 	if (unlikely(ret))
4218 		return ret;
4219 	return do_rt_sigqueueinfo(pid, sig, &info);
4220 }
4221 
4222 #ifdef CONFIG_COMPAT
4223 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4224 			compat_pid_t, pid,
4225 			int, sig,
4226 			struct compat_siginfo __user *, uinfo)
4227 {
4228 	kernel_siginfo_t info;
4229 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4230 	if (unlikely(ret))
4231 		return ret;
4232 	return do_rt_sigqueueinfo(pid, sig, &info);
4233 }
4234 #endif
4235 
4236 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4237 {
4238 	/* This is only valid for single tasks */
4239 	if (pid <= 0 || tgid <= 0)
4240 		return -EINVAL;
4241 
4242 	/* Not even root can pretend to send signals from the kernel.
4243 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4244 	 */
4245 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4246 	    (task_pid_vnr(current) != pid))
4247 		return -EPERM;
4248 
4249 	return do_send_specific(tgid, pid, sig, info);
4250 }
4251 
4252 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4253 		siginfo_t __user *, uinfo)
4254 {
4255 	kernel_siginfo_t info;
4256 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4257 	if (unlikely(ret))
4258 		return ret;
4259 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4260 }
4261 
4262 #ifdef CONFIG_COMPAT
4263 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4264 			compat_pid_t, tgid,
4265 			compat_pid_t, pid,
4266 			int, sig,
4267 			struct compat_siginfo __user *, uinfo)
4268 {
4269 	kernel_siginfo_t info;
4270 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4271 	if (unlikely(ret))
4272 		return ret;
4273 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4274 }
4275 #endif
4276 
4277 /*
4278  * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4279  */
4280 void kernel_sigaction(int sig, __sighandler_t action)
4281 {
4282 	spin_lock_irq(&current->sighand->siglock);
4283 	current->sighand->action[sig - 1].sa.sa_handler = action;
4284 	if (action == SIG_IGN) {
4285 		sigset_t mask;
4286 
4287 		sigemptyset(&mask);
4288 		sigaddset(&mask, sig);
4289 
4290 		flush_sigqueue_mask(current, &mask, &current->signal->shared_pending);
4291 		flush_sigqueue_mask(current, &mask, &current->pending);
4292 		recalc_sigpending();
4293 	}
4294 	spin_unlock_irq(&current->sighand->siglock);
4295 }
4296 EXPORT_SYMBOL(kernel_sigaction);
4297 
4298 void __weak sigaction_compat_abi(struct k_sigaction *act,
4299 		struct k_sigaction *oact)
4300 {
4301 }
4302 
4303 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4304 {
4305 	struct task_struct *p = current, *t;
4306 	struct k_sigaction *k;
4307 	sigset_t mask;
4308 
4309 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4310 		return -EINVAL;
4311 
4312 	k = &p->sighand->action[sig-1];
4313 
4314 	spin_lock_irq(&p->sighand->siglock);
4315 	if (k->sa.sa_flags & SA_IMMUTABLE) {
4316 		spin_unlock_irq(&p->sighand->siglock);
4317 		return -EINVAL;
4318 	}
4319 	if (oact)
4320 		*oact = *k;
4321 
4322 	/*
4323 	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4324 	 * e.g. by having an architecture use the bit in their uapi.
4325 	 */
4326 	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4327 
4328 	/*
4329 	 * Clear unknown flag bits in order to allow userspace to detect missing
4330 	 * support for flag bits and to allow the kernel to use non-uapi bits
4331 	 * internally.
4332 	 */
4333 	if (act)
4334 		act->sa.sa_flags &= UAPI_SA_FLAGS;
4335 	if (oact)
4336 		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4337 
4338 	sigaction_compat_abi(act, oact);
4339 
4340 	if (act) {
4341 		bool was_ignored = k->sa.sa_handler == SIG_IGN;
4342 
4343 		sigdelsetmask(&act->sa.sa_mask,
4344 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4345 		*k = *act;
4346 		/*
4347 		 * POSIX 3.3.1.3:
4348 		 *  "Setting a signal action to SIG_IGN for a signal that is
4349 		 *   pending shall cause the pending signal to be discarded,
4350 		 *   whether or not it is blocked."
4351 		 *
4352 		 *  "Setting a signal action to SIG_DFL for a signal that is
4353 		 *   pending and whose default action is to ignore the signal
4354 		 *   (for example, SIGCHLD), shall cause the pending signal to
4355 		 *   be discarded, whether or not it is blocked"
4356 		 */
4357 		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4358 			sigemptyset(&mask);
4359 			sigaddset(&mask, sig);
4360 			flush_sigqueue_mask(p, &mask, &p->signal->shared_pending);
4361 			for_each_thread(p, t)
4362 				flush_sigqueue_mask(p, &mask, &t->pending);
4363 		} else if (was_ignored) {
4364 			posixtimer_sig_unignore(p, sig);
4365 		}
4366 	}
4367 
4368 	spin_unlock_irq(&p->sighand->siglock);
4369 	return 0;
4370 }
4371 
4372 #ifdef CONFIG_DYNAMIC_SIGFRAME
4373 static inline void sigaltstack_lock(void)
4374 	__acquires(&current->sighand->siglock)
4375 {
4376 	spin_lock_irq(&current->sighand->siglock);
4377 }
4378 
4379 static inline void sigaltstack_unlock(void)
4380 	__releases(&current->sighand->siglock)
4381 {
4382 	spin_unlock_irq(&current->sighand->siglock);
4383 }
4384 #else
4385 static inline void sigaltstack_lock(void) { }
4386 static inline void sigaltstack_unlock(void) { }
4387 #endif
4388 
4389 static int
4390 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4391 		size_t min_ss_size)
4392 {
4393 	struct task_struct *t = current;
4394 	int ret = 0;
4395 
4396 	if (oss) {
4397 		memset(oss, 0, sizeof(stack_t));
4398 		oss->ss_sp = (void __user *) t->sas_ss_sp;
4399 		oss->ss_size = t->sas_ss_size;
4400 		oss->ss_flags = sas_ss_flags(sp) |
4401 			(current->sas_ss_flags & SS_FLAG_BITS);
4402 	}
4403 
4404 	if (ss) {
4405 		void __user *ss_sp = ss->ss_sp;
4406 		size_t ss_size = ss->ss_size;
4407 		unsigned ss_flags = ss->ss_flags;
4408 		int ss_mode;
4409 
4410 		if (unlikely(on_sig_stack(sp)))
4411 			return -EPERM;
4412 
4413 		ss_mode = ss_flags & ~SS_FLAG_BITS;
4414 		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4415 				ss_mode != 0))
4416 			return -EINVAL;
4417 
4418 		/*
4419 		 * Return before taking any locks if no actual
4420 		 * sigaltstack changes were requested.
4421 		 */
4422 		if (t->sas_ss_sp == (unsigned long)ss_sp &&
4423 		    t->sas_ss_size == ss_size &&
4424 		    t->sas_ss_flags == ss_flags)
4425 			return 0;
4426 
4427 		sigaltstack_lock();
4428 		if (ss_mode == SS_DISABLE) {
4429 			ss_size = 0;
4430 			ss_sp = NULL;
4431 		} else {
4432 			if (unlikely(ss_size < min_ss_size))
4433 				ret = -ENOMEM;
4434 			if (!sigaltstack_size_valid(ss_size))
4435 				ret = -ENOMEM;
4436 		}
4437 		if (!ret) {
4438 			t->sas_ss_sp = (unsigned long) ss_sp;
4439 			t->sas_ss_size = ss_size;
4440 			t->sas_ss_flags = ss_flags;
4441 		}
4442 		sigaltstack_unlock();
4443 	}
4444 	return ret;
4445 }
4446 
4447 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4448 {
4449 	stack_t new, old;
4450 	int err;
4451 	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4452 		return -EFAULT;
4453 	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4454 			      current_user_stack_pointer(),
4455 			      MINSIGSTKSZ);
4456 	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4457 		err = -EFAULT;
4458 	return err;
4459 }
4460 
4461 int restore_altstack(const stack_t __user *uss)
4462 {
4463 	stack_t new;
4464 	if (copy_from_user(&new, uss, sizeof(stack_t)))
4465 		return -EFAULT;
4466 	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4467 			     MINSIGSTKSZ);
4468 	/* squash all but EFAULT for now */
4469 	return 0;
4470 }
4471 
4472 int __save_altstack(stack_t __user *uss, unsigned long sp)
4473 {
4474 	struct task_struct *t = current;
4475 	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4476 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4477 		__put_user(t->sas_ss_size, &uss->ss_size);
4478 	return err;
4479 }
4480 
4481 #ifdef CONFIG_COMPAT
4482 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4483 				 compat_stack_t __user *uoss_ptr)
4484 {
4485 	stack_t uss, uoss;
4486 	int ret;
4487 
4488 	if (uss_ptr) {
4489 		compat_stack_t uss32;
4490 		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4491 			return -EFAULT;
4492 		uss.ss_sp = compat_ptr(uss32.ss_sp);
4493 		uss.ss_flags = uss32.ss_flags;
4494 		uss.ss_size = uss32.ss_size;
4495 	}
4496 	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4497 			     compat_user_stack_pointer(),
4498 			     COMPAT_MINSIGSTKSZ);
4499 	if (ret >= 0 && uoss_ptr)  {
4500 		compat_stack_t old;
4501 		memset(&old, 0, sizeof(old));
4502 		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4503 		old.ss_flags = uoss.ss_flags;
4504 		old.ss_size = uoss.ss_size;
4505 		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4506 			ret = -EFAULT;
4507 	}
4508 	return ret;
4509 }
4510 
4511 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4512 			const compat_stack_t __user *, uss_ptr,
4513 			compat_stack_t __user *, uoss_ptr)
4514 {
4515 	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4516 }
4517 
4518 int compat_restore_altstack(const compat_stack_t __user *uss)
4519 {
4520 	int err = do_compat_sigaltstack(uss, NULL);
4521 	/* squash all but -EFAULT for now */
4522 	return err == -EFAULT ? err : 0;
4523 }
4524 
4525 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4526 {
4527 	int err;
4528 	struct task_struct *t = current;
4529 	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4530 			 &uss->ss_sp) |
4531 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4532 		__put_user(t->sas_ss_size, &uss->ss_size);
4533 	return err;
4534 }
4535 #endif
4536 
4537 #ifdef __ARCH_WANT_SYS_SIGPENDING
4538 
4539 /**
4540  *  sys_sigpending - examine pending signals
4541  *  @uset: where mask of pending signal is returned
4542  */
4543 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4544 {
4545 	sigset_t set;
4546 
4547 	if (sizeof(old_sigset_t) > sizeof(*uset))
4548 		return -EINVAL;
4549 
4550 	do_sigpending(&set);
4551 
4552 	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4553 		return -EFAULT;
4554 
4555 	return 0;
4556 }
4557 
4558 #ifdef CONFIG_COMPAT
4559 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4560 {
4561 	sigset_t set;
4562 
4563 	do_sigpending(&set);
4564 
4565 	return put_user(set.sig[0], set32);
4566 }
4567 #endif
4568 
4569 #endif
4570 
4571 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4572 /**
4573  *  sys_sigprocmask - examine and change blocked signals
4574  *  @how: whether to add, remove, or set signals
4575  *  @nset: signals to add or remove (if non-null)
4576  *  @oset: previous value of signal mask if non-null
4577  *
4578  * Some platforms have their own version with special arguments;
4579  * others support only sys_rt_sigprocmask.
4580  */
4581 
4582 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4583 		old_sigset_t __user *, oset)
4584 {
4585 	old_sigset_t old_set, new_set;
4586 	sigset_t new_blocked;
4587 
4588 	old_set = current->blocked.sig[0];
4589 
4590 	if (nset) {
4591 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4592 			return -EFAULT;
4593 
4594 		new_blocked = current->blocked;
4595 
4596 		switch (how) {
4597 		case SIG_BLOCK:
4598 			sigaddsetmask(&new_blocked, new_set);
4599 			break;
4600 		case SIG_UNBLOCK:
4601 			sigdelsetmask(&new_blocked, new_set);
4602 			break;
4603 		case SIG_SETMASK:
4604 			new_blocked.sig[0] = new_set;
4605 			break;
4606 		default:
4607 			return -EINVAL;
4608 		}
4609 
4610 		set_current_blocked(&new_blocked);
4611 	}
4612 
4613 	if (oset) {
4614 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4615 			return -EFAULT;
4616 	}
4617 
4618 	return 0;
4619 }
4620 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4621 
4622 #ifndef CONFIG_ODD_RT_SIGACTION
4623 /**
4624  *  sys_rt_sigaction - alter an action taken by a process
4625  *  @sig: signal to be sent
4626  *  @act: new sigaction
4627  *  @oact: used to save the previous sigaction
4628  *  @sigsetsize: size of sigset_t type
4629  */
4630 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4631 		const struct sigaction __user *, act,
4632 		struct sigaction __user *, oact,
4633 		size_t, sigsetsize)
4634 {
4635 	struct k_sigaction new_sa, old_sa;
4636 	int ret;
4637 
4638 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4639 	if (sigsetsize != sizeof(sigset_t))
4640 		return -EINVAL;
4641 
4642 	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4643 		return -EFAULT;
4644 
4645 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4646 	if (ret)
4647 		return ret;
4648 
4649 	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4650 		return -EFAULT;
4651 
4652 	return 0;
4653 }
4654 #ifdef CONFIG_COMPAT
4655 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4656 		const struct compat_sigaction __user *, act,
4657 		struct compat_sigaction __user *, oact,
4658 		compat_size_t, sigsetsize)
4659 {
4660 	struct k_sigaction new_ka, old_ka;
4661 #ifdef __ARCH_HAS_SA_RESTORER
4662 	compat_uptr_t restorer;
4663 #endif
4664 	int ret;
4665 
4666 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4667 	if (sigsetsize != sizeof(compat_sigset_t))
4668 		return -EINVAL;
4669 
4670 	if (act) {
4671 		compat_uptr_t handler;
4672 		ret = get_user(handler, &act->sa_handler);
4673 		new_ka.sa.sa_handler = compat_ptr(handler);
4674 #ifdef __ARCH_HAS_SA_RESTORER
4675 		ret |= get_user(restorer, &act->sa_restorer);
4676 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4677 #endif
4678 		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4679 		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4680 		if (ret)
4681 			return -EFAULT;
4682 	}
4683 
4684 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4685 	if (!ret && oact) {
4686 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4687 			       &oact->sa_handler);
4688 		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4689 					 sizeof(oact->sa_mask));
4690 		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4691 #ifdef __ARCH_HAS_SA_RESTORER
4692 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4693 				&oact->sa_restorer);
4694 #endif
4695 	}
4696 	return ret;
4697 }
4698 #endif
4699 #endif /* !CONFIG_ODD_RT_SIGACTION */
4700 
4701 #ifdef CONFIG_OLD_SIGACTION
4702 SYSCALL_DEFINE3(sigaction, int, sig,
4703 		const struct old_sigaction __user *, act,
4704 	        struct old_sigaction __user *, oact)
4705 {
4706 	struct k_sigaction new_ka, old_ka;
4707 	int ret;
4708 
4709 	if (act) {
4710 		old_sigset_t mask;
4711 		if (!access_ok(act, sizeof(*act)) ||
4712 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4713 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4714 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4715 		    __get_user(mask, &act->sa_mask))
4716 			return -EFAULT;
4717 #ifdef __ARCH_HAS_KA_RESTORER
4718 		new_ka.ka_restorer = NULL;
4719 #endif
4720 		siginitset(&new_ka.sa.sa_mask, mask);
4721 	}
4722 
4723 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4724 
4725 	if (!ret && oact) {
4726 		if (!access_ok(oact, sizeof(*oact)) ||
4727 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4728 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4729 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4730 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4731 			return -EFAULT;
4732 	}
4733 
4734 	return ret;
4735 }
4736 #endif
4737 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4738 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4739 		const struct compat_old_sigaction __user *, act,
4740 	        struct compat_old_sigaction __user *, oact)
4741 {
4742 	struct k_sigaction new_ka, old_ka;
4743 	int ret;
4744 	compat_old_sigset_t mask;
4745 	compat_uptr_t handler, restorer;
4746 
4747 	if (act) {
4748 		if (!access_ok(act, sizeof(*act)) ||
4749 		    __get_user(handler, &act->sa_handler) ||
4750 		    __get_user(restorer, &act->sa_restorer) ||
4751 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4752 		    __get_user(mask, &act->sa_mask))
4753 			return -EFAULT;
4754 
4755 #ifdef __ARCH_HAS_KA_RESTORER
4756 		new_ka.ka_restorer = NULL;
4757 #endif
4758 		new_ka.sa.sa_handler = compat_ptr(handler);
4759 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4760 		siginitset(&new_ka.sa.sa_mask, mask);
4761 	}
4762 
4763 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4764 
4765 	if (!ret && oact) {
4766 		if (!access_ok(oact, sizeof(*oact)) ||
4767 		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4768 			       &oact->sa_handler) ||
4769 		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4770 			       &oact->sa_restorer) ||
4771 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4772 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4773 			return -EFAULT;
4774 	}
4775 	return ret;
4776 }
4777 #endif
4778 
4779 #ifdef CONFIG_SGETMASK_SYSCALL
4780 
4781 /*
4782  * For backwards compatibility.  Functionality superseded by sigprocmask.
4783  */
4784 SYSCALL_DEFINE0(sgetmask)
4785 {
4786 	/* SMP safe */
4787 	return current->blocked.sig[0];
4788 }
4789 
4790 SYSCALL_DEFINE1(ssetmask, int, newmask)
4791 {
4792 	int old = current->blocked.sig[0];
4793 	sigset_t newset;
4794 
4795 	siginitset(&newset, newmask);
4796 	set_current_blocked(&newset);
4797 
4798 	return old;
4799 }
4800 #endif /* CONFIG_SGETMASK_SYSCALL */
4801 
4802 #ifdef __ARCH_WANT_SYS_SIGNAL
4803 /*
4804  * For backwards compatibility.  Functionality superseded by sigaction.
4805  */
4806 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4807 {
4808 	struct k_sigaction new_sa, old_sa;
4809 	int ret;
4810 
4811 	new_sa.sa.sa_handler = handler;
4812 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4813 	sigemptyset(&new_sa.sa.sa_mask);
4814 
4815 	ret = do_sigaction(sig, &new_sa, &old_sa);
4816 
4817 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4818 }
4819 #endif /* __ARCH_WANT_SYS_SIGNAL */
4820 
4821 #ifdef __ARCH_WANT_SYS_PAUSE
4822 
4823 SYSCALL_DEFINE0(pause)
4824 {
4825 	while (!signal_pending(current)) {
4826 		__set_current_state(TASK_INTERRUPTIBLE);
4827 		schedule();
4828 	}
4829 	return -ERESTARTNOHAND;
4830 }
4831 
4832 #endif
4833 
4834 static int sigsuspend(sigset_t *set)
4835 {
4836 	current->saved_sigmask = current->blocked;
4837 	set_current_blocked(set);
4838 
4839 	while (!signal_pending(current)) {
4840 		__set_current_state(TASK_INTERRUPTIBLE);
4841 		schedule();
4842 	}
4843 	set_restore_sigmask();
4844 	return -ERESTARTNOHAND;
4845 }
4846 
4847 /**
4848  *  sys_rt_sigsuspend - replace the signal mask for a value with the
4849  *	@unewset value until a signal is received
4850  *  @unewset: new signal mask value
4851  *  @sigsetsize: size of sigset_t type
4852  */
4853 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4854 {
4855 	sigset_t newset;
4856 
4857 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4858 	if (sigsetsize != sizeof(sigset_t))
4859 		return -EINVAL;
4860 
4861 	if (copy_from_user(&newset, unewset, sizeof(newset)))
4862 		return -EFAULT;
4863 	return sigsuspend(&newset);
4864 }
4865 
4866 #ifdef CONFIG_COMPAT
4867 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4868 {
4869 	sigset_t newset;
4870 
4871 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4872 	if (sigsetsize != sizeof(sigset_t))
4873 		return -EINVAL;
4874 
4875 	if (get_compat_sigset(&newset, unewset))
4876 		return -EFAULT;
4877 	return sigsuspend(&newset);
4878 }
4879 #endif
4880 
4881 #ifdef CONFIG_OLD_SIGSUSPEND
4882 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4883 {
4884 	sigset_t blocked;
4885 	siginitset(&blocked, mask);
4886 	return sigsuspend(&blocked);
4887 }
4888 #endif
4889 #ifdef CONFIG_OLD_SIGSUSPEND3
4890 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4891 {
4892 	sigset_t blocked;
4893 	siginitset(&blocked, mask);
4894 	return sigsuspend(&blocked);
4895 }
4896 #endif
4897 
4898 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4899 {
4900 	return NULL;
4901 }
4902 
4903 static inline void siginfo_buildtime_checks(void)
4904 {
4905 	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4906 
4907 	/* Verify the offsets in the two siginfos match */
4908 #define CHECK_OFFSET(field) \
4909 	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4910 
4911 	/* kill */
4912 	CHECK_OFFSET(si_pid);
4913 	CHECK_OFFSET(si_uid);
4914 
4915 	/* timer */
4916 	CHECK_OFFSET(si_tid);
4917 	CHECK_OFFSET(si_overrun);
4918 	CHECK_OFFSET(si_value);
4919 
4920 	/* rt */
4921 	CHECK_OFFSET(si_pid);
4922 	CHECK_OFFSET(si_uid);
4923 	CHECK_OFFSET(si_value);
4924 
4925 	/* sigchld */
4926 	CHECK_OFFSET(si_pid);
4927 	CHECK_OFFSET(si_uid);
4928 	CHECK_OFFSET(si_status);
4929 	CHECK_OFFSET(si_utime);
4930 	CHECK_OFFSET(si_stime);
4931 
4932 	/* sigfault */
4933 	CHECK_OFFSET(si_addr);
4934 	CHECK_OFFSET(si_trapno);
4935 	CHECK_OFFSET(si_addr_lsb);
4936 	CHECK_OFFSET(si_lower);
4937 	CHECK_OFFSET(si_upper);
4938 	CHECK_OFFSET(si_pkey);
4939 	CHECK_OFFSET(si_perf_data);
4940 	CHECK_OFFSET(si_perf_type);
4941 	CHECK_OFFSET(si_perf_flags);
4942 
4943 	/* sigpoll */
4944 	CHECK_OFFSET(si_band);
4945 	CHECK_OFFSET(si_fd);
4946 
4947 	/* sigsys */
4948 	CHECK_OFFSET(si_call_addr);
4949 	CHECK_OFFSET(si_syscall);
4950 	CHECK_OFFSET(si_arch);
4951 #undef CHECK_OFFSET
4952 
4953 	/* usb asyncio */
4954 	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4955 		     offsetof(struct siginfo, si_addr));
4956 	if (sizeof(int) == sizeof(void __user *)) {
4957 		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4958 			     sizeof(void __user *));
4959 	} else {
4960 		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4961 			      sizeof_field(struct siginfo, si_uid)) !=
4962 			     sizeof(void __user *));
4963 		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4964 			     offsetof(struct siginfo, si_uid));
4965 	}
4966 #ifdef CONFIG_COMPAT
4967 	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4968 		     offsetof(struct compat_siginfo, si_addr));
4969 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4970 		     sizeof(compat_uptr_t));
4971 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4972 		     sizeof_field(struct siginfo, si_pid));
4973 #endif
4974 }
4975 
4976 #if defined(CONFIG_SYSCTL)
4977 static const struct ctl_table signal_debug_table[] = {
4978 #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4979 	{
4980 		.procname	= "exception-trace",
4981 		.data		= &show_unhandled_signals,
4982 		.maxlen		= sizeof(int),
4983 		.mode		= 0644,
4984 		.proc_handler	= proc_dointvec
4985 	},
4986 #endif
4987 };
4988 
4989 static const struct ctl_table signal_table[] = {
4990 	{
4991 		.procname	= "print-fatal-signals",
4992 		.data		= &print_fatal_signals,
4993 		.maxlen		= sizeof(int),
4994 		.mode		= 0644,
4995 		.proc_handler	= proc_dointvec,
4996 	},
4997 };
4998 
4999 static int __init init_signal_sysctls(void)
5000 {
5001 	register_sysctl_init("debug", signal_debug_table);
5002 	register_sysctl_init("kernel", signal_table);
5003 	return 0;
5004 }
5005 early_initcall(init_signal_sysctls);
5006 #endif /* CONFIG_SYSCTL */
5007 
5008 void __init signals_init(void)
5009 {
5010 	siginfo_buildtime_checks();
5011 
5012 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
5013 }
5014 
5015 #ifdef CONFIG_KGDB_KDB
5016 #include <linux/kdb.h>
5017 /*
5018  * kdb_send_sig - Allows kdb to send signals without exposing
5019  * signal internals.  This function checks if the required locks are
5020  * available before calling the main signal code, to avoid kdb
5021  * deadlocks.
5022  */
5023 void kdb_send_sig(struct task_struct *t, int sig)
5024 {
5025 	static struct task_struct *kdb_prev_t;
5026 	int new_t, ret;
5027 	if (!spin_trylock(&t->sighand->siglock)) {
5028 		kdb_printf("Can't do kill command now.\n"
5029 			   "The sigmask lock is held somewhere else in "
5030 			   "kernel, try again later\n");
5031 		return;
5032 	}
5033 	new_t = kdb_prev_t != t;
5034 	kdb_prev_t = t;
5035 	if (!task_is_running(t) && new_t) {
5036 		spin_unlock(&t->sighand->siglock);
5037 		kdb_printf("Process is not RUNNING, sending a signal from "
5038 			   "kdb risks deadlock\n"
5039 			   "on the run queue locks. "
5040 			   "The signal has _not_ been sent.\n"
5041 			   "Reissue the kill command if you want to risk "
5042 			   "the deadlock.\n");
5043 		return;
5044 	}
5045 	ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
5046 	spin_unlock(&t->sighand->siglock);
5047 	if (ret)
5048 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
5049 			   sig, t->pid);
5050 	else
5051 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
5052 }
5053 #endif	/* CONFIG_KGDB_KDB */
5054