xref: /linux/kernel/signal.c (revision e20706d5385b10a6f6a2fe5ad6b1333dad2d1416)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/signal.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
8  *
9  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
10  *		Changes to use preallocated sigqueue structures
11  *		to allow signals to be sent reliably.
12  */
13 
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/mm.h>
26 #include <linux/proc_fs.h>
27 #include <linux/tty.h>
28 #include <linux/binfmts.h>
29 #include <linux/coredump.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32 #include <linux/ptrace.h>
33 #include <linux/signal.h>
34 #include <linux/signalfd.h>
35 #include <linux/ratelimit.h>
36 #include <linux/task_work.h>
37 #include <linux/capability.h>
38 #include <linux/freezer.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/nsproxy.h>
41 #include <linux/user_namespace.h>
42 #include <linux/uprobes.h>
43 #include <linux/compat.h>
44 #include <linux/cn_proc.h>
45 #include <linux/compiler.h>
46 #include <linux/posix-timers.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
49 #include <linux/sysctl.h>
50 #include <uapi/linux/pidfd.h>
51 
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/signal.h>
54 
55 #include <asm/param.h>
56 #include <linux/uaccess.h>
57 #include <asm/unistd.h>
58 #include <asm/siginfo.h>
59 #include <asm/cacheflush.h>
60 #include <asm/syscall.h>	/* for syscall_get_* */
61 
62 #include "time/posix-timers.h"
63 
64 /*
65  * SLAB caches for signal bits.
66  */
67 
68 static struct kmem_cache *sigqueue_cachep;
69 
70 int print_fatal_signals __read_mostly;
71 
72 static void __user *sig_handler(struct task_struct *t, int sig)
73 {
74 	return t->sighand->action[sig - 1].sa.sa_handler;
75 }
76 
77 static inline bool sig_handler_ignored(void __user *handler, int sig)
78 {
79 	/* Is it explicitly or implicitly ignored? */
80 	return handler == SIG_IGN ||
81 	       (handler == SIG_DFL && sig_kernel_ignore(sig));
82 }
83 
84 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
85 {
86 	void __user *handler;
87 
88 	handler = sig_handler(t, sig);
89 
90 	/* SIGKILL and SIGSTOP may not be sent to the global init */
91 	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
92 		return true;
93 
94 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
95 	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
96 		return true;
97 
98 	/* Only allow kernel generated signals to this kthread */
99 	if (unlikely((t->flags & PF_KTHREAD) &&
100 		     (handler == SIG_KTHREAD_KERNEL) && !force))
101 		return true;
102 
103 	return sig_handler_ignored(handler, sig);
104 }
105 
106 static bool sig_ignored(struct task_struct *t, int sig, bool force)
107 {
108 	/*
109 	 * Blocked signals are never ignored, since the
110 	 * signal handler may change by the time it is
111 	 * unblocked.
112 	 */
113 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
114 		return false;
115 
116 	/*
117 	 * Tracers may want to know about even ignored signal unless it
118 	 * is SIGKILL which can't be reported anyway but can be ignored
119 	 * by SIGNAL_UNKILLABLE task.
120 	 */
121 	if (t->ptrace && sig != SIGKILL)
122 		return false;
123 
124 	return sig_task_ignored(t, sig, force);
125 }
126 
127 /*
128  * Re-calculate pending state from the set of locally pending
129  * signals, globally pending signals, and blocked signals.
130  */
131 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
132 {
133 	unsigned long ready;
134 	long i;
135 
136 	switch (_NSIG_WORDS) {
137 	default:
138 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
139 			ready |= signal->sig[i] &~ blocked->sig[i];
140 		break;
141 
142 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
143 		ready |= signal->sig[2] &~ blocked->sig[2];
144 		ready |= signal->sig[1] &~ blocked->sig[1];
145 		ready |= signal->sig[0] &~ blocked->sig[0];
146 		break;
147 
148 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
149 		ready |= signal->sig[0] &~ blocked->sig[0];
150 		break;
151 
152 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
153 	}
154 	return ready !=	0;
155 }
156 
157 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
158 
159 static bool recalc_sigpending_tsk(struct task_struct *t)
160 {
161 	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
162 	    PENDING(&t->pending, &t->blocked) ||
163 	    PENDING(&t->signal->shared_pending, &t->blocked) ||
164 	    cgroup_task_frozen(t)) {
165 		set_tsk_thread_flag(t, TIF_SIGPENDING);
166 		return true;
167 	}
168 
169 	/*
170 	 * We must never clear the flag in another thread, or in current
171 	 * when it's possible the current syscall is returning -ERESTART*.
172 	 * So we don't clear it here, and only callers who know they should do.
173 	 */
174 	return false;
175 }
176 
177 void recalc_sigpending(void)
178 {
179 	if (!recalc_sigpending_tsk(current) && !freezing(current)) {
180 		if (unlikely(test_thread_flag(TIF_SIGPENDING)))
181 			clear_thread_flag(TIF_SIGPENDING);
182 	}
183 }
184 EXPORT_SYMBOL(recalc_sigpending);
185 
186 void calculate_sigpending(void)
187 {
188 	/* Have any signals or users of TIF_SIGPENDING been delayed
189 	 * until after fork?
190 	 */
191 	spin_lock_irq(&current->sighand->siglock);
192 	set_tsk_thread_flag(current, TIF_SIGPENDING);
193 	recalc_sigpending();
194 	spin_unlock_irq(&current->sighand->siglock);
195 }
196 
197 /* Given the mask, find the first available signal that should be serviced. */
198 
199 #define SYNCHRONOUS_MASK \
200 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
201 	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
202 
203 int next_signal(struct sigpending *pending, sigset_t *mask)
204 {
205 	unsigned long i, *s, *m, x;
206 	int sig = 0;
207 
208 	s = pending->signal.sig;
209 	m = mask->sig;
210 
211 	/*
212 	 * Handle the first word specially: it contains the
213 	 * synchronous signals that need to be dequeued first.
214 	 */
215 	x = *s &~ *m;
216 	if (x) {
217 		if (x & SYNCHRONOUS_MASK)
218 			x &= SYNCHRONOUS_MASK;
219 		sig = ffz(~x) + 1;
220 		return sig;
221 	}
222 
223 	switch (_NSIG_WORDS) {
224 	default:
225 		for (i = 1; i < _NSIG_WORDS; ++i) {
226 			x = *++s &~ *++m;
227 			if (!x)
228 				continue;
229 			sig = ffz(~x) + i*_NSIG_BPW + 1;
230 			break;
231 		}
232 		break;
233 
234 	case 2:
235 		x = s[1] &~ m[1];
236 		if (!x)
237 			break;
238 		sig = ffz(~x) + _NSIG_BPW + 1;
239 		break;
240 
241 	case 1:
242 		/* Nothing to do */
243 		break;
244 	}
245 
246 	return sig;
247 }
248 
249 static inline void print_dropped_signal(int sig)
250 {
251 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
252 
253 	if (!print_fatal_signals)
254 		return;
255 
256 	if (!__ratelimit(&ratelimit_state))
257 		return;
258 
259 	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
260 				current->comm, current->pid, sig);
261 }
262 
263 /**
264  * task_set_jobctl_pending - set jobctl pending bits
265  * @task: target task
266  * @mask: pending bits to set
267  *
268  * Clear @mask from @task->jobctl.  @mask must be subset of
269  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
270  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
271  * cleared.  If @task is already being killed or exiting, this function
272  * becomes noop.
273  *
274  * CONTEXT:
275  * Must be called with @task->sighand->siglock held.
276  *
277  * RETURNS:
278  * %true if @mask is set, %false if made noop because @task was dying.
279  */
280 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
281 {
282 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
283 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
284 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
285 
286 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
287 		return false;
288 
289 	if (mask & JOBCTL_STOP_SIGMASK)
290 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
291 
292 	task->jobctl |= mask;
293 	return true;
294 }
295 
296 /**
297  * task_clear_jobctl_trapping - clear jobctl trapping bit
298  * @task: target task
299  *
300  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
301  * Clear it and wake up the ptracer.  Note that we don't need any further
302  * locking.  @task->siglock guarantees that @task->parent points to the
303  * ptracer.
304  *
305  * CONTEXT:
306  * Must be called with @task->sighand->siglock held.
307  */
308 void task_clear_jobctl_trapping(struct task_struct *task)
309 {
310 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
311 		task->jobctl &= ~JOBCTL_TRAPPING;
312 		smp_mb();	/* advised by wake_up_bit() */
313 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
314 	}
315 }
316 
317 /**
318  * task_clear_jobctl_pending - clear jobctl pending bits
319  * @task: target task
320  * @mask: pending bits to clear
321  *
322  * Clear @mask from @task->jobctl.  @mask must be subset of
323  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
324  * STOP bits are cleared together.
325  *
326  * If clearing of @mask leaves no stop or trap pending, this function calls
327  * task_clear_jobctl_trapping().
328  *
329  * CONTEXT:
330  * Must be called with @task->sighand->siglock held.
331  */
332 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
333 {
334 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
335 
336 	if (mask & JOBCTL_STOP_PENDING)
337 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
338 
339 	task->jobctl &= ~mask;
340 
341 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
342 		task_clear_jobctl_trapping(task);
343 }
344 
345 /**
346  * task_participate_group_stop - participate in a group stop
347  * @task: task participating in a group stop
348  *
349  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
350  * Group stop states are cleared and the group stop count is consumed if
351  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
352  * stop, the appropriate `SIGNAL_*` flags are set.
353  *
354  * CONTEXT:
355  * Must be called with @task->sighand->siglock held.
356  *
357  * RETURNS:
358  * %true if group stop completion should be notified to the parent, %false
359  * otherwise.
360  */
361 static bool task_participate_group_stop(struct task_struct *task)
362 {
363 	struct signal_struct *sig = task->signal;
364 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
365 
366 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
367 
368 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
369 
370 	if (!consume)
371 		return false;
372 
373 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
374 		sig->group_stop_count--;
375 
376 	/*
377 	 * Tell the caller to notify completion iff we are entering into a
378 	 * fresh group stop.  Read comment in do_signal_stop() for details.
379 	 */
380 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
381 		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
382 		return true;
383 	}
384 	return false;
385 }
386 
387 void task_join_group_stop(struct task_struct *task)
388 {
389 	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
390 	struct signal_struct *sig = current->signal;
391 
392 	if (sig->group_stop_count) {
393 		sig->group_stop_count++;
394 		mask |= JOBCTL_STOP_CONSUME;
395 	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
396 		return;
397 
398 	/* Have the new thread join an on-going signal group stop */
399 	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
400 }
401 
402 static struct ucounts *sig_get_ucounts(struct task_struct *t, int sig,
403 				       int override_rlimit)
404 {
405 	struct ucounts *ucounts;
406 	long sigpending;
407 
408 	/*
409 	 * Protect access to @t credentials. This can go away when all
410 	 * callers hold rcu read lock.
411 	 *
412 	 * NOTE! A pending signal will hold on to the user refcount,
413 	 * and we get/put the refcount only when the sigpending count
414 	 * changes from/to zero.
415 	 */
416 	rcu_read_lock();
417 	ucounts = task_ucounts(t);
418 	sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING,
419 					    override_rlimit);
420 	rcu_read_unlock();
421 	if (!sigpending)
422 		return NULL;
423 
424 	if (unlikely(!override_rlimit && sigpending > task_rlimit(t, RLIMIT_SIGPENDING))) {
425 		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
426 		print_dropped_signal(sig);
427 		return NULL;
428 	}
429 
430 	return ucounts;
431 }
432 
433 static void __sigqueue_init(struct sigqueue *q, struct ucounts *ucounts,
434 			    const unsigned int sigqueue_flags)
435 {
436 	INIT_LIST_HEAD(&q->list);
437 	q->flags = sigqueue_flags;
438 	q->ucounts = ucounts;
439 }
440 
441 /*
442  * allocate a new signal queue record
443  * - this may be called without locks if and only if t == current, otherwise an
444  *   appropriate lock must be held to stop the target task from exiting
445  */
446 static struct sigqueue *sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
447 				       int override_rlimit)
448 {
449 	struct ucounts *ucounts = sig_get_ucounts(t, sig, override_rlimit);
450 	struct sigqueue *q;
451 
452 	if (!ucounts)
453 		return NULL;
454 
455 	q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
456 	if (!q) {
457 		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
458 		return NULL;
459 	}
460 
461 	__sigqueue_init(q, ucounts, 0);
462 	return q;
463 }
464 
465 static void __sigqueue_free(struct sigqueue *q)
466 {
467 	if (q->flags & SIGQUEUE_PREALLOC) {
468 		posixtimer_sigqueue_putref(q);
469 		return;
470 	}
471 	if (q->ucounts) {
472 		dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
473 		q->ucounts = NULL;
474 	}
475 	kmem_cache_free(sigqueue_cachep, q);
476 }
477 
478 void flush_sigqueue(struct sigpending *queue)
479 {
480 	struct sigqueue *q;
481 
482 	sigemptyset(&queue->signal);
483 	while (!list_empty(&queue->list)) {
484 		q = list_entry(queue->list.next, struct sigqueue , list);
485 		list_del_init(&q->list);
486 		__sigqueue_free(q);
487 	}
488 }
489 
490 /*
491  * Flush all pending signals for this kthread.
492  */
493 void flush_signals(struct task_struct *t)
494 {
495 	unsigned long flags;
496 
497 	spin_lock_irqsave(&t->sighand->siglock, flags);
498 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
499 	flush_sigqueue(&t->pending);
500 	flush_sigqueue(&t->signal->shared_pending);
501 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
502 }
503 EXPORT_SYMBOL(flush_signals);
504 
505 void ignore_signals(struct task_struct *t)
506 {
507 	int i;
508 
509 	for (i = 0; i < _NSIG; ++i)
510 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
511 
512 	flush_signals(t);
513 }
514 
515 /*
516  * Flush all handlers for a task.
517  */
518 
519 void
520 flush_signal_handlers(struct task_struct *t, int force_default)
521 {
522 	int i;
523 	struct k_sigaction *ka = &t->sighand->action[0];
524 	for (i = _NSIG ; i != 0 ; i--) {
525 		if (force_default || ka->sa.sa_handler != SIG_IGN)
526 			ka->sa.sa_handler = SIG_DFL;
527 		ka->sa.sa_flags = 0;
528 #ifdef __ARCH_HAS_SA_RESTORER
529 		ka->sa.sa_restorer = NULL;
530 #endif
531 		sigemptyset(&ka->sa.sa_mask);
532 		ka++;
533 	}
534 }
535 
536 bool unhandled_signal(struct task_struct *tsk, int sig)
537 {
538 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
539 	if (is_global_init(tsk))
540 		return true;
541 
542 	if (handler != SIG_IGN && handler != SIG_DFL)
543 		return false;
544 
545 	/* If dying, we handle all new signals by ignoring them */
546 	if (fatal_signal_pending(tsk))
547 		return false;
548 
549 	/* if ptraced, let the tracer determine */
550 	return !tsk->ptrace;
551 }
552 
553 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
554 			   struct sigqueue **timer_sigq)
555 {
556 	struct sigqueue *q, *first = NULL;
557 
558 	/*
559 	 * Collect the siginfo appropriate to this signal.  Check if
560 	 * there is another siginfo for the same signal.
561 	*/
562 	list_for_each_entry(q, &list->list, list) {
563 		if (q->info.si_signo == sig) {
564 			if (first)
565 				goto still_pending;
566 			first = q;
567 		}
568 	}
569 
570 	sigdelset(&list->signal, sig);
571 
572 	if (first) {
573 still_pending:
574 		list_del_init(&first->list);
575 		copy_siginfo(info, &first->info);
576 
577 		/*
578 		 * posix-timer signals are preallocated and freed when the last
579 		 * reference count is dropped in posixtimer_deliver_signal() or
580 		 * immediately on timer deletion when the signal is not pending.
581 		 * Spare the extra round through __sigqueue_free() which is
582 		 * ignoring preallocated signals.
583 		 */
584 		if (unlikely((first->flags & SIGQUEUE_PREALLOC) && (info->si_code == SI_TIMER)))
585 			*timer_sigq = first;
586 		else
587 			__sigqueue_free(first);
588 	} else {
589 		/*
590 		 * Ok, it wasn't in the queue.  This must be
591 		 * a fast-pathed signal or we must have been
592 		 * out of queue space.  So zero out the info.
593 		 */
594 		clear_siginfo(info);
595 		info->si_signo = sig;
596 		info->si_errno = 0;
597 		info->si_code = SI_USER;
598 		info->si_pid = 0;
599 		info->si_uid = 0;
600 	}
601 }
602 
603 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
604 			    kernel_siginfo_t *info, struct sigqueue **timer_sigq)
605 {
606 	int sig = next_signal(pending, mask);
607 
608 	if (sig)
609 		collect_signal(sig, pending, info, timer_sigq);
610 	return sig;
611 }
612 
613 /*
614  * Try to dequeue a signal. If a deliverable signal is found fill in the
615  * caller provided siginfo and return the signal number. Otherwise return
616  * 0.
617  */
618 int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type)
619 {
620 	struct task_struct *tsk = current;
621 	struct sigqueue *timer_sigq;
622 	int signr;
623 
624 	lockdep_assert_held(&tsk->sighand->siglock);
625 
626 again:
627 	*type = PIDTYPE_PID;
628 	timer_sigq = NULL;
629 	signr = __dequeue_signal(&tsk->pending, mask, info, &timer_sigq);
630 	if (!signr) {
631 		*type = PIDTYPE_TGID;
632 		signr = __dequeue_signal(&tsk->signal->shared_pending,
633 					 mask, info, &timer_sigq);
634 
635 		if (unlikely(signr == SIGALRM))
636 			posixtimer_rearm_itimer(tsk);
637 	}
638 
639 	recalc_sigpending();
640 	if (!signr)
641 		return 0;
642 
643 	if (unlikely(sig_kernel_stop(signr))) {
644 		/*
645 		 * Set a marker that we have dequeued a stop signal.  Our
646 		 * caller might release the siglock and then the pending
647 		 * stop signal it is about to process is no longer in the
648 		 * pending bitmasks, but must still be cleared by a SIGCONT
649 		 * (and overruled by a SIGKILL).  So those cases clear this
650 		 * shared flag after we've set it.  Note that this flag may
651 		 * remain set after the signal we return is ignored or
652 		 * handled.  That doesn't matter because its only purpose
653 		 * is to alert stop-signal processing code when another
654 		 * processor has come along and cleared the flag.
655 		 */
656 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
657 	}
658 
659 	if (IS_ENABLED(CONFIG_POSIX_TIMERS) && unlikely(timer_sigq)) {
660 		if (!posixtimer_deliver_signal(info, timer_sigq))
661 			goto again;
662 	}
663 
664 	return signr;
665 }
666 EXPORT_SYMBOL_GPL(dequeue_signal);
667 
668 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
669 {
670 	struct task_struct *tsk = current;
671 	struct sigpending *pending = &tsk->pending;
672 	struct sigqueue *q, *sync = NULL;
673 
674 	/*
675 	 * Might a synchronous signal be in the queue?
676 	 */
677 	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
678 		return 0;
679 
680 	/*
681 	 * Return the first synchronous signal in the queue.
682 	 */
683 	list_for_each_entry(q, &pending->list, list) {
684 		/* Synchronous signals have a positive si_code */
685 		if ((q->info.si_code > SI_USER) &&
686 		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
687 			sync = q;
688 			goto next;
689 		}
690 	}
691 	return 0;
692 next:
693 	/*
694 	 * Check if there is another siginfo for the same signal.
695 	 */
696 	list_for_each_entry_continue(q, &pending->list, list) {
697 		if (q->info.si_signo == sync->info.si_signo)
698 			goto still_pending;
699 	}
700 
701 	sigdelset(&pending->signal, sync->info.si_signo);
702 	recalc_sigpending();
703 still_pending:
704 	list_del_init(&sync->list);
705 	copy_siginfo(info, &sync->info);
706 	__sigqueue_free(sync);
707 	return info->si_signo;
708 }
709 
710 /*
711  * Tell a process that it has a new active signal..
712  *
713  * NOTE! we rely on the previous spin_lock to
714  * lock interrupts for us! We can only be called with
715  * "siglock" held, and the local interrupt must
716  * have been disabled when that got acquired!
717  *
718  * No need to set need_resched since signal event passing
719  * goes through ->blocked
720  */
721 void signal_wake_up_state(struct task_struct *t, unsigned int state)
722 {
723 	lockdep_assert_held(&t->sighand->siglock);
724 
725 	set_tsk_thread_flag(t, TIF_SIGPENDING);
726 
727 	/*
728 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
729 	 * case. We don't check t->state here because there is a race with it
730 	 * executing another processor and just now entering stopped state.
731 	 * By using wake_up_state, we ensure the process will wake up and
732 	 * handle its death signal.
733 	 */
734 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
735 		kick_process(t);
736 }
737 
738 static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q);
739 
740 static void sigqueue_free_ignored(struct task_struct *tsk, struct sigqueue *q)
741 {
742 	if (likely(!(q->flags & SIGQUEUE_PREALLOC) || q->info.si_code != SI_TIMER))
743 		__sigqueue_free(q);
744 	else
745 		posixtimer_sig_ignore(tsk, q);
746 }
747 
748 /* Remove signals in mask from the pending set and queue. */
749 static void flush_sigqueue_mask(struct task_struct *p, sigset_t *mask, struct sigpending *s)
750 {
751 	struct sigqueue *q, *n;
752 	sigset_t m;
753 
754 	lockdep_assert_held(&p->sighand->siglock);
755 
756 	sigandsets(&m, mask, &s->signal);
757 	if (sigisemptyset(&m))
758 		return;
759 
760 	sigandnsets(&s->signal, &s->signal, mask);
761 	list_for_each_entry_safe(q, n, &s->list, list) {
762 		if (sigismember(mask, q->info.si_signo)) {
763 			list_del_init(&q->list);
764 			sigqueue_free_ignored(p, q);
765 		}
766 	}
767 }
768 
769 static inline int is_si_special(const struct kernel_siginfo *info)
770 {
771 	return info <= SEND_SIG_PRIV;
772 }
773 
774 static inline bool si_fromuser(const struct kernel_siginfo *info)
775 {
776 	return info == SEND_SIG_NOINFO ||
777 		(!is_si_special(info) && SI_FROMUSER(info));
778 }
779 
780 /*
781  * called with RCU read lock from check_kill_permission()
782  */
783 static bool kill_ok_by_cred(struct task_struct *t)
784 {
785 	const struct cred *cred = current_cred();
786 	const struct cred *tcred = __task_cred(t);
787 
788 	return uid_eq(cred->euid, tcred->suid) ||
789 	       uid_eq(cred->euid, tcred->uid) ||
790 	       uid_eq(cred->uid, tcred->suid) ||
791 	       uid_eq(cred->uid, tcred->uid) ||
792 	       ns_capable(tcred->user_ns, CAP_KILL);
793 }
794 
795 /*
796  * Bad permissions for sending the signal
797  * - the caller must hold the RCU read lock
798  */
799 static int check_kill_permission(int sig, struct kernel_siginfo *info,
800 				 struct task_struct *t)
801 {
802 	struct pid *sid;
803 	int error;
804 
805 	if (!valid_signal(sig))
806 		return -EINVAL;
807 
808 	if (!si_fromuser(info))
809 		return 0;
810 
811 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
812 	if (error)
813 		return error;
814 
815 	if (!same_thread_group(current, t) &&
816 	    !kill_ok_by_cred(t)) {
817 		switch (sig) {
818 		case SIGCONT:
819 			sid = task_session(t);
820 			/*
821 			 * We don't return the error if sid == NULL. The
822 			 * task was unhashed, the caller must notice this.
823 			 */
824 			if (!sid || sid == task_session(current))
825 				break;
826 			fallthrough;
827 		default:
828 			return -EPERM;
829 		}
830 	}
831 
832 	return security_task_kill(t, info, sig, NULL);
833 }
834 
835 /**
836  * ptrace_trap_notify - schedule trap to notify ptracer
837  * @t: tracee wanting to notify tracer
838  *
839  * This function schedules sticky ptrace trap which is cleared on the next
840  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
841  * ptracer.
842  *
843  * If @t is running, STOP trap will be taken.  If trapped for STOP and
844  * ptracer is listening for events, tracee is woken up so that it can
845  * re-trap for the new event.  If trapped otherwise, STOP trap will be
846  * eventually taken without returning to userland after the existing traps
847  * are finished by PTRACE_CONT.
848  *
849  * CONTEXT:
850  * Must be called with @task->sighand->siglock held.
851  */
852 static void ptrace_trap_notify(struct task_struct *t)
853 {
854 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
855 	lockdep_assert_held(&t->sighand->siglock);
856 
857 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
858 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
859 }
860 
861 /*
862  * Handle magic process-wide effects of stop/continue signals. Unlike
863  * the signal actions, these happen immediately at signal-generation
864  * time regardless of blocking, ignoring, or handling.  This does the
865  * actual continuing for SIGCONT, but not the actual stopping for stop
866  * signals. The process stop is done as a signal action for SIG_DFL.
867  *
868  * Returns true if the signal should be actually delivered, otherwise
869  * it should be dropped.
870  */
871 static bool prepare_signal(int sig, struct task_struct *p, bool force)
872 {
873 	struct signal_struct *signal = p->signal;
874 	struct task_struct *t;
875 	sigset_t flush;
876 
877 	if (signal->flags & SIGNAL_GROUP_EXIT) {
878 		if (signal->core_state)
879 			return sig == SIGKILL;
880 		/*
881 		 * The process is in the middle of dying, drop the signal.
882 		 */
883 		return false;
884 	} else if (sig_kernel_stop(sig)) {
885 		/*
886 		 * This is a stop signal.  Remove SIGCONT from all queues.
887 		 */
888 		siginitset(&flush, sigmask(SIGCONT));
889 		flush_sigqueue_mask(p, &flush, &signal->shared_pending);
890 		for_each_thread(p, t)
891 			flush_sigqueue_mask(p, &flush, &t->pending);
892 	} else if (sig == SIGCONT) {
893 		unsigned int why;
894 		/*
895 		 * Remove all stop signals from all queues, wake all threads.
896 		 */
897 		siginitset(&flush, SIG_KERNEL_STOP_MASK);
898 		flush_sigqueue_mask(p, &flush, &signal->shared_pending);
899 		for_each_thread(p, t) {
900 			flush_sigqueue_mask(p, &flush, &t->pending);
901 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
902 			if (likely(!(t->ptrace & PT_SEIZED))) {
903 				t->jobctl &= ~JOBCTL_STOPPED;
904 				wake_up_state(t, __TASK_STOPPED);
905 			} else
906 				ptrace_trap_notify(t);
907 		}
908 
909 		/*
910 		 * Notify the parent with CLD_CONTINUED if we were stopped.
911 		 *
912 		 * If we were in the middle of a group stop, we pretend it
913 		 * was already finished, and then continued. Since SIGCHLD
914 		 * doesn't queue we report only CLD_STOPPED, as if the next
915 		 * CLD_CONTINUED was dropped.
916 		 */
917 		why = 0;
918 		if (signal->flags & SIGNAL_STOP_STOPPED)
919 			why |= SIGNAL_CLD_CONTINUED;
920 		else if (signal->group_stop_count)
921 			why |= SIGNAL_CLD_STOPPED;
922 
923 		if (why) {
924 			/*
925 			 * The first thread which returns from do_signal_stop()
926 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
927 			 * notify its parent. See get_signal().
928 			 */
929 			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
930 			signal->group_stop_count = 0;
931 			signal->group_exit_code = 0;
932 		}
933 	}
934 
935 	return !sig_ignored(p, sig, force);
936 }
937 
938 /*
939  * Test if P wants to take SIG.  After we've checked all threads with this,
940  * it's equivalent to finding no threads not blocking SIG.  Any threads not
941  * blocking SIG were ruled out because they are not running and already
942  * have pending signals.  Such threads will dequeue from the shared queue
943  * as soon as they're available, so putting the signal on the shared queue
944  * will be equivalent to sending it to one such thread.
945  */
946 static inline bool wants_signal(int sig, struct task_struct *p)
947 {
948 	if (sigismember(&p->blocked, sig))
949 		return false;
950 
951 	if (p->flags & PF_EXITING)
952 		return false;
953 
954 	if (sig == SIGKILL)
955 		return true;
956 
957 	if (task_is_stopped_or_traced(p))
958 		return false;
959 
960 	return task_curr(p) || !task_sigpending(p);
961 }
962 
963 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
964 {
965 	struct signal_struct *signal = p->signal;
966 	struct task_struct *t;
967 
968 	/*
969 	 * Now find a thread we can wake up to take the signal off the queue.
970 	 *
971 	 * Try the suggested task first (may or may not be the main thread).
972 	 */
973 	if (wants_signal(sig, p))
974 		t = p;
975 	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
976 		/*
977 		 * There is just one thread and it does not need to be woken.
978 		 * It will dequeue unblocked signals before it runs again.
979 		 */
980 		return;
981 	else {
982 		/*
983 		 * Otherwise try to find a suitable thread.
984 		 */
985 		t = signal->curr_target;
986 		while (!wants_signal(sig, t)) {
987 			t = next_thread(t);
988 			if (t == signal->curr_target)
989 				/*
990 				 * No thread needs to be woken.
991 				 * Any eligible threads will see
992 				 * the signal in the queue soon.
993 				 */
994 				return;
995 		}
996 		signal->curr_target = t;
997 	}
998 
999 	/*
1000 	 * Found a killable thread.  If the signal will be fatal,
1001 	 * then start taking the whole group down immediately.
1002 	 */
1003 	if (sig_fatal(p, sig) &&
1004 	    (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1005 	    !sigismember(&t->real_blocked, sig) &&
1006 	    (sig == SIGKILL || !p->ptrace)) {
1007 		/*
1008 		 * This signal will be fatal to the whole group.
1009 		 */
1010 		if (!sig_kernel_coredump(sig)) {
1011 			/*
1012 			 * Start a group exit and wake everybody up.
1013 			 * This way we don't have other threads
1014 			 * running and doing things after a slower
1015 			 * thread has the fatal signal pending.
1016 			 */
1017 			signal->flags = SIGNAL_GROUP_EXIT;
1018 			signal->group_exit_code = sig;
1019 			signal->group_stop_count = 0;
1020 			__for_each_thread(signal, t) {
1021 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1022 				sigaddset(&t->pending.signal, SIGKILL);
1023 				signal_wake_up(t, 1);
1024 			}
1025 			return;
1026 		}
1027 	}
1028 
1029 	/*
1030 	 * The signal is already in the shared-pending queue.
1031 	 * Tell the chosen thread to wake up and dequeue it.
1032 	 */
1033 	signal_wake_up(t, sig == SIGKILL);
1034 	return;
1035 }
1036 
1037 static inline bool legacy_queue(struct sigpending *signals, int sig)
1038 {
1039 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1040 }
1041 
1042 static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1043 				struct task_struct *t, enum pid_type type, bool force)
1044 {
1045 	struct sigpending *pending;
1046 	struct sigqueue *q;
1047 	int override_rlimit;
1048 	int ret = 0, result;
1049 
1050 	lockdep_assert_held(&t->sighand->siglock);
1051 
1052 	result = TRACE_SIGNAL_IGNORED;
1053 	if (!prepare_signal(sig, t, force))
1054 		goto ret;
1055 
1056 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1057 	/*
1058 	 * Short-circuit ignored signals and support queuing
1059 	 * exactly one non-rt signal, so that we can get more
1060 	 * detailed information about the cause of the signal.
1061 	 */
1062 	result = TRACE_SIGNAL_ALREADY_PENDING;
1063 	if (legacy_queue(pending, sig))
1064 		goto ret;
1065 
1066 	result = TRACE_SIGNAL_DELIVERED;
1067 	/*
1068 	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1069 	 */
1070 	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1071 		goto out_set;
1072 
1073 	/*
1074 	 * Real-time signals must be queued if sent by sigqueue, or
1075 	 * some other real-time mechanism.  It is implementation
1076 	 * defined whether kill() does so.  We attempt to do so, on
1077 	 * the principle of least surprise, but since kill is not
1078 	 * allowed to fail with EAGAIN when low on memory we just
1079 	 * make sure at least one signal gets delivered and don't
1080 	 * pass on the info struct.
1081 	 */
1082 	if (sig < SIGRTMIN)
1083 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1084 	else
1085 		override_rlimit = 0;
1086 
1087 	q = sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1088 
1089 	if (q) {
1090 		list_add_tail(&q->list, &pending->list);
1091 		switch ((unsigned long) info) {
1092 		case (unsigned long) SEND_SIG_NOINFO:
1093 			clear_siginfo(&q->info);
1094 			q->info.si_signo = sig;
1095 			q->info.si_errno = 0;
1096 			q->info.si_code = SI_USER;
1097 			q->info.si_pid = task_tgid_nr_ns(current,
1098 							task_active_pid_ns(t));
1099 			rcu_read_lock();
1100 			q->info.si_uid =
1101 				from_kuid_munged(task_cred_xxx(t, user_ns),
1102 						 current_uid());
1103 			rcu_read_unlock();
1104 			break;
1105 		case (unsigned long) SEND_SIG_PRIV:
1106 			clear_siginfo(&q->info);
1107 			q->info.si_signo = sig;
1108 			q->info.si_errno = 0;
1109 			q->info.si_code = SI_KERNEL;
1110 			q->info.si_pid = 0;
1111 			q->info.si_uid = 0;
1112 			break;
1113 		default:
1114 			copy_siginfo(&q->info, info);
1115 			break;
1116 		}
1117 	} else if (!is_si_special(info) &&
1118 		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1119 		/*
1120 		 * Queue overflow, abort.  We may abort if the
1121 		 * signal was rt and sent by user using something
1122 		 * other than kill().
1123 		 */
1124 		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1125 		ret = -EAGAIN;
1126 		goto ret;
1127 	} else {
1128 		/*
1129 		 * This is a silent loss of information.  We still
1130 		 * send the signal, but the *info bits are lost.
1131 		 */
1132 		result = TRACE_SIGNAL_LOSE_INFO;
1133 	}
1134 
1135 out_set:
1136 	signalfd_notify(t, sig);
1137 	sigaddset(&pending->signal, sig);
1138 
1139 	/* Let multiprocess signals appear after on-going forks */
1140 	if (type > PIDTYPE_TGID) {
1141 		struct multiprocess_signals *delayed;
1142 		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1143 			sigset_t *signal = &delayed->signal;
1144 			/* Can't queue both a stop and a continue signal */
1145 			if (sig == SIGCONT)
1146 				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1147 			else if (sig_kernel_stop(sig))
1148 				sigdelset(signal, SIGCONT);
1149 			sigaddset(signal, sig);
1150 		}
1151 	}
1152 
1153 	complete_signal(sig, t, type);
1154 ret:
1155 	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1156 	return ret;
1157 }
1158 
1159 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1160 {
1161 	bool ret = false;
1162 	switch (siginfo_layout(info->si_signo, info->si_code)) {
1163 	case SIL_KILL:
1164 	case SIL_CHLD:
1165 	case SIL_RT:
1166 		ret = true;
1167 		break;
1168 	case SIL_TIMER:
1169 	case SIL_POLL:
1170 	case SIL_FAULT:
1171 	case SIL_FAULT_TRAPNO:
1172 	case SIL_FAULT_MCEERR:
1173 	case SIL_FAULT_BNDERR:
1174 	case SIL_FAULT_PKUERR:
1175 	case SIL_FAULT_PERF_EVENT:
1176 	case SIL_SYS:
1177 		ret = false;
1178 		break;
1179 	}
1180 	return ret;
1181 }
1182 
1183 int send_signal_locked(int sig, struct kernel_siginfo *info,
1184 		       struct task_struct *t, enum pid_type type)
1185 {
1186 	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1187 	bool force = false;
1188 
1189 	if (info == SEND_SIG_NOINFO) {
1190 		/* Force if sent from an ancestor pid namespace */
1191 		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1192 	} else if (info == SEND_SIG_PRIV) {
1193 		/* Don't ignore kernel generated signals */
1194 		force = true;
1195 	} else if (has_si_pid_and_uid(info)) {
1196 		/* SIGKILL and SIGSTOP is special or has ids */
1197 		struct user_namespace *t_user_ns;
1198 
1199 		rcu_read_lock();
1200 		t_user_ns = task_cred_xxx(t, user_ns);
1201 		if (current_user_ns() != t_user_ns) {
1202 			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1203 			info->si_uid = from_kuid_munged(t_user_ns, uid);
1204 		}
1205 		rcu_read_unlock();
1206 
1207 		/* A kernel generated signal? */
1208 		force = (info->si_code == SI_KERNEL);
1209 
1210 		/* From an ancestor pid namespace? */
1211 		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1212 			info->si_pid = 0;
1213 			force = true;
1214 		}
1215 	}
1216 	return __send_signal_locked(sig, info, t, type, force);
1217 }
1218 
1219 static void print_fatal_signal(int signr)
1220 {
1221 	struct pt_regs *regs = task_pt_regs(current);
1222 	struct file *exe_file;
1223 
1224 	exe_file = get_task_exe_file(current);
1225 	if (exe_file) {
1226 		pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1227 			exe_file, current->comm, signr);
1228 		fput(exe_file);
1229 	} else {
1230 		pr_info("%s: potentially unexpected fatal signal %d.\n",
1231 			current->comm, signr);
1232 	}
1233 
1234 #if defined(__i386__) && !defined(__arch_um__)
1235 	pr_info("code at %08lx: ", regs->ip);
1236 	{
1237 		int i;
1238 		for (i = 0; i < 16; i++) {
1239 			unsigned char insn;
1240 
1241 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1242 				break;
1243 			pr_cont("%02x ", insn);
1244 		}
1245 	}
1246 	pr_cont("\n");
1247 #endif
1248 	preempt_disable();
1249 	show_regs(regs);
1250 	preempt_enable();
1251 }
1252 
1253 static int __init setup_print_fatal_signals(char *str)
1254 {
1255 	get_option (&str, &print_fatal_signals);
1256 
1257 	return 1;
1258 }
1259 
1260 __setup("print-fatal-signals=", setup_print_fatal_signals);
1261 
1262 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1263 			enum pid_type type)
1264 {
1265 	unsigned long flags;
1266 	int ret = -ESRCH;
1267 
1268 	if (lock_task_sighand(p, &flags)) {
1269 		ret = send_signal_locked(sig, info, p, type);
1270 		unlock_task_sighand(p, &flags);
1271 	}
1272 
1273 	return ret;
1274 }
1275 
1276 enum sig_handler {
1277 	HANDLER_CURRENT, /* If reachable use the current handler */
1278 	HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1279 	HANDLER_EXIT,	 /* Only visible as the process exit code */
1280 };
1281 
1282 /*
1283  * Force a signal that the process can't ignore: if necessary
1284  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1285  *
1286  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1287  * since we do not want to have a signal handler that was blocked
1288  * be invoked when user space had explicitly blocked it.
1289  *
1290  * We don't want to have recursive SIGSEGV's etc, for example,
1291  * that is why we also clear SIGNAL_UNKILLABLE.
1292  */
1293 static int
1294 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1295 	enum sig_handler handler)
1296 {
1297 	unsigned long int flags;
1298 	int ret, blocked, ignored;
1299 	struct k_sigaction *action;
1300 	int sig = info->si_signo;
1301 
1302 	spin_lock_irqsave(&t->sighand->siglock, flags);
1303 	action = &t->sighand->action[sig-1];
1304 	ignored = action->sa.sa_handler == SIG_IGN;
1305 	blocked = sigismember(&t->blocked, sig);
1306 	if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1307 		action->sa.sa_handler = SIG_DFL;
1308 		if (handler == HANDLER_EXIT)
1309 			action->sa.sa_flags |= SA_IMMUTABLE;
1310 		if (blocked)
1311 			sigdelset(&t->blocked, sig);
1312 	}
1313 	/*
1314 	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1315 	 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1316 	 */
1317 	if (action->sa.sa_handler == SIG_DFL &&
1318 	    (!t->ptrace || (handler == HANDLER_EXIT)))
1319 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1320 	ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1321 	/* This can happen if the signal was already pending and blocked */
1322 	if (!task_sigpending(t))
1323 		signal_wake_up(t, 0);
1324 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1325 
1326 	return ret;
1327 }
1328 
1329 int force_sig_info(struct kernel_siginfo *info)
1330 {
1331 	return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1332 }
1333 
1334 /*
1335  * Nuke all other threads in the group.
1336  */
1337 int zap_other_threads(struct task_struct *p)
1338 {
1339 	struct task_struct *t;
1340 	int count = 0;
1341 
1342 	p->signal->group_stop_count = 0;
1343 
1344 	for_other_threads(p, t) {
1345 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1346 		count++;
1347 
1348 		/* Don't bother with already dead threads */
1349 		if (t->exit_state)
1350 			continue;
1351 		sigaddset(&t->pending.signal, SIGKILL);
1352 		signal_wake_up(t, 1);
1353 	}
1354 
1355 	return count;
1356 }
1357 
1358 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1359 					   unsigned long *flags)
1360 {
1361 	struct sighand_struct *sighand;
1362 
1363 	rcu_read_lock();
1364 	for (;;) {
1365 		sighand = rcu_dereference(tsk->sighand);
1366 		if (unlikely(sighand == NULL))
1367 			break;
1368 
1369 		/*
1370 		 * This sighand can be already freed and even reused, but
1371 		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1372 		 * initializes ->siglock: this slab can't go away, it has
1373 		 * the same object type, ->siglock can't be reinitialized.
1374 		 *
1375 		 * We need to ensure that tsk->sighand is still the same
1376 		 * after we take the lock, we can race with de_thread() or
1377 		 * __exit_signal(). In the latter case the next iteration
1378 		 * must see ->sighand == NULL.
1379 		 */
1380 		spin_lock_irqsave(&sighand->siglock, *flags);
1381 		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1382 			break;
1383 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1384 	}
1385 	rcu_read_unlock();
1386 
1387 	return sighand;
1388 }
1389 
1390 #ifdef CONFIG_LOCKDEP
1391 void lockdep_assert_task_sighand_held(struct task_struct *task)
1392 {
1393 	struct sighand_struct *sighand;
1394 
1395 	rcu_read_lock();
1396 	sighand = rcu_dereference(task->sighand);
1397 	if (sighand)
1398 		lockdep_assert_held(&sighand->siglock);
1399 	else
1400 		WARN_ON_ONCE(1);
1401 	rcu_read_unlock();
1402 }
1403 #endif
1404 
1405 /*
1406  * send signal info to all the members of a thread group or to the
1407  * individual thread if type == PIDTYPE_PID.
1408  */
1409 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1410 			struct task_struct *p, enum pid_type type)
1411 {
1412 	int ret;
1413 
1414 	rcu_read_lock();
1415 	ret = check_kill_permission(sig, info, p);
1416 	rcu_read_unlock();
1417 
1418 	if (!ret && sig)
1419 		ret = do_send_sig_info(sig, info, p, type);
1420 
1421 	return ret;
1422 }
1423 
1424 /*
1425  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1426  * control characters do (^C, ^Z etc)
1427  * - the caller must hold at least a readlock on tasklist_lock
1428  */
1429 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1430 {
1431 	struct task_struct *p = NULL;
1432 	int ret = -ESRCH;
1433 
1434 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1435 		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1436 		/*
1437 		 * If group_send_sig_info() succeeds at least once ret
1438 		 * becomes 0 and after that the code below has no effect.
1439 		 * Otherwise we return the last err or -ESRCH if this
1440 		 * process group is empty.
1441 		 */
1442 		if (ret)
1443 			ret = err;
1444 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1445 
1446 	return ret;
1447 }
1448 
1449 static int kill_pid_info_type(int sig, struct kernel_siginfo *info,
1450 				struct pid *pid, enum pid_type type)
1451 {
1452 	int error = -ESRCH;
1453 	struct task_struct *p;
1454 
1455 	for (;;) {
1456 		rcu_read_lock();
1457 		p = pid_task(pid, PIDTYPE_PID);
1458 		if (p)
1459 			error = group_send_sig_info(sig, info, p, type);
1460 		rcu_read_unlock();
1461 		if (likely(!p || error != -ESRCH))
1462 			return error;
1463 		/*
1464 		 * The task was unhashed in between, try again.  If it
1465 		 * is dead, pid_task() will return NULL, if we race with
1466 		 * de_thread() it will find the new leader.
1467 		 */
1468 	}
1469 }
1470 
1471 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1472 {
1473 	return kill_pid_info_type(sig, info, pid, PIDTYPE_TGID);
1474 }
1475 
1476 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1477 {
1478 	int error;
1479 	rcu_read_lock();
1480 	error = kill_pid_info(sig, info, find_vpid(pid));
1481 	rcu_read_unlock();
1482 	return error;
1483 }
1484 
1485 static inline bool kill_as_cred_perm(const struct cred *cred,
1486 				     struct task_struct *target)
1487 {
1488 	const struct cred *pcred = __task_cred(target);
1489 
1490 	return uid_eq(cred->euid, pcred->suid) ||
1491 	       uid_eq(cred->euid, pcred->uid) ||
1492 	       uid_eq(cred->uid, pcred->suid) ||
1493 	       uid_eq(cred->uid, pcred->uid);
1494 }
1495 
1496 /*
1497  * The usb asyncio usage of siginfo is wrong.  The glibc support
1498  * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1499  * AKA after the generic fields:
1500  *	kernel_pid_t	si_pid;
1501  *	kernel_uid32_t	si_uid;
1502  *	sigval_t	si_value;
1503  *
1504  * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1505  * after the generic fields is:
1506  *	void __user 	*si_addr;
1507  *
1508  * This is a practical problem when there is a 64bit big endian kernel
1509  * and a 32bit userspace.  As the 32bit address will encoded in the low
1510  * 32bits of the pointer.  Those low 32bits will be stored at higher
1511  * address than appear in a 32 bit pointer.  So userspace will not
1512  * see the address it was expecting for it's completions.
1513  *
1514  * There is nothing in the encoding that can allow
1515  * copy_siginfo_to_user32 to detect this confusion of formats, so
1516  * handle this by requiring the caller of kill_pid_usb_asyncio to
1517  * notice when this situration takes place and to store the 32bit
1518  * pointer in sival_int, instead of sival_addr of the sigval_t addr
1519  * parameter.
1520  */
1521 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1522 			 struct pid *pid, const struct cred *cred)
1523 {
1524 	struct kernel_siginfo info;
1525 	struct task_struct *p;
1526 	unsigned long flags;
1527 	int ret = -EINVAL;
1528 
1529 	if (!valid_signal(sig))
1530 		return ret;
1531 
1532 	clear_siginfo(&info);
1533 	info.si_signo = sig;
1534 	info.si_errno = errno;
1535 	info.si_code = SI_ASYNCIO;
1536 	*((sigval_t *)&info.si_pid) = addr;
1537 
1538 	rcu_read_lock();
1539 	p = pid_task(pid, PIDTYPE_PID);
1540 	if (!p) {
1541 		ret = -ESRCH;
1542 		goto out_unlock;
1543 	}
1544 	if (!kill_as_cred_perm(cred, p)) {
1545 		ret = -EPERM;
1546 		goto out_unlock;
1547 	}
1548 	ret = security_task_kill(p, &info, sig, cred);
1549 	if (ret)
1550 		goto out_unlock;
1551 
1552 	if (sig) {
1553 		if (lock_task_sighand(p, &flags)) {
1554 			ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1555 			unlock_task_sighand(p, &flags);
1556 		} else
1557 			ret = -ESRCH;
1558 	}
1559 out_unlock:
1560 	rcu_read_unlock();
1561 	return ret;
1562 }
1563 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1564 
1565 /*
1566  * kill_something_info() interprets pid in interesting ways just like kill(2).
1567  *
1568  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1569  * is probably wrong.  Should make it like BSD or SYSV.
1570  */
1571 
1572 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1573 {
1574 	int ret;
1575 
1576 	if (pid > 0)
1577 		return kill_proc_info(sig, info, pid);
1578 
1579 	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1580 	if (pid == INT_MIN)
1581 		return -ESRCH;
1582 
1583 	read_lock(&tasklist_lock);
1584 	if (pid != -1) {
1585 		ret = __kill_pgrp_info(sig, info,
1586 				pid ? find_vpid(-pid) : task_pgrp(current));
1587 	} else {
1588 		int retval = 0, count = 0;
1589 		struct task_struct * p;
1590 
1591 		for_each_process(p) {
1592 			if (task_pid_vnr(p) > 1 &&
1593 					!same_thread_group(p, current)) {
1594 				int err = group_send_sig_info(sig, info, p,
1595 							      PIDTYPE_MAX);
1596 				++count;
1597 				if (err != -EPERM)
1598 					retval = err;
1599 			}
1600 		}
1601 		ret = count ? retval : -ESRCH;
1602 	}
1603 	read_unlock(&tasklist_lock);
1604 
1605 	return ret;
1606 }
1607 
1608 /*
1609  * These are for backward compatibility with the rest of the kernel source.
1610  */
1611 
1612 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1613 {
1614 	/*
1615 	 * Make sure legacy kernel users don't send in bad values
1616 	 * (normal paths check this in check_kill_permission).
1617 	 */
1618 	if (!valid_signal(sig))
1619 		return -EINVAL;
1620 
1621 	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1622 }
1623 EXPORT_SYMBOL(send_sig_info);
1624 
1625 #define __si_special(priv) \
1626 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1627 
1628 int
1629 send_sig(int sig, struct task_struct *p, int priv)
1630 {
1631 	return send_sig_info(sig, __si_special(priv), p);
1632 }
1633 EXPORT_SYMBOL(send_sig);
1634 
1635 void force_sig(int sig)
1636 {
1637 	struct kernel_siginfo info;
1638 
1639 	clear_siginfo(&info);
1640 	info.si_signo = sig;
1641 	info.si_errno = 0;
1642 	info.si_code = SI_KERNEL;
1643 	info.si_pid = 0;
1644 	info.si_uid = 0;
1645 	force_sig_info(&info);
1646 }
1647 EXPORT_SYMBOL(force_sig);
1648 
1649 void force_fatal_sig(int sig)
1650 {
1651 	struct kernel_siginfo info;
1652 
1653 	clear_siginfo(&info);
1654 	info.si_signo = sig;
1655 	info.si_errno = 0;
1656 	info.si_code = SI_KERNEL;
1657 	info.si_pid = 0;
1658 	info.si_uid = 0;
1659 	force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1660 }
1661 
1662 void force_exit_sig(int sig)
1663 {
1664 	struct kernel_siginfo info;
1665 
1666 	clear_siginfo(&info);
1667 	info.si_signo = sig;
1668 	info.si_errno = 0;
1669 	info.si_code = SI_KERNEL;
1670 	info.si_pid = 0;
1671 	info.si_uid = 0;
1672 	force_sig_info_to_task(&info, current, HANDLER_EXIT);
1673 }
1674 
1675 /*
1676  * When things go south during signal handling, we
1677  * will force a SIGSEGV. And if the signal that caused
1678  * the problem was already a SIGSEGV, we'll want to
1679  * make sure we don't even try to deliver the signal..
1680  */
1681 void force_sigsegv(int sig)
1682 {
1683 	if (sig == SIGSEGV)
1684 		force_fatal_sig(SIGSEGV);
1685 	else
1686 		force_sig(SIGSEGV);
1687 }
1688 
1689 int force_sig_fault_to_task(int sig, int code, void __user *addr,
1690 			    struct task_struct *t)
1691 {
1692 	struct kernel_siginfo info;
1693 
1694 	clear_siginfo(&info);
1695 	info.si_signo = sig;
1696 	info.si_errno = 0;
1697 	info.si_code  = code;
1698 	info.si_addr  = addr;
1699 	return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1700 }
1701 
1702 int force_sig_fault(int sig, int code, void __user *addr)
1703 {
1704 	return force_sig_fault_to_task(sig, code, addr, current);
1705 }
1706 
1707 int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t)
1708 {
1709 	struct kernel_siginfo info;
1710 
1711 	clear_siginfo(&info);
1712 	info.si_signo = sig;
1713 	info.si_errno = 0;
1714 	info.si_code  = code;
1715 	info.si_addr  = addr;
1716 	return send_sig_info(info.si_signo, &info, t);
1717 }
1718 
1719 int force_sig_mceerr(int code, void __user *addr, short lsb)
1720 {
1721 	struct kernel_siginfo info;
1722 
1723 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1724 	clear_siginfo(&info);
1725 	info.si_signo = SIGBUS;
1726 	info.si_errno = 0;
1727 	info.si_code = code;
1728 	info.si_addr = addr;
1729 	info.si_addr_lsb = lsb;
1730 	return force_sig_info(&info);
1731 }
1732 
1733 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1734 {
1735 	struct kernel_siginfo info;
1736 
1737 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1738 	clear_siginfo(&info);
1739 	info.si_signo = SIGBUS;
1740 	info.si_errno = 0;
1741 	info.si_code = code;
1742 	info.si_addr = addr;
1743 	info.si_addr_lsb = lsb;
1744 	return send_sig_info(info.si_signo, &info, t);
1745 }
1746 EXPORT_SYMBOL(send_sig_mceerr);
1747 
1748 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1749 {
1750 	struct kernel_siginfo info;
1751 
1752 	clear_siginfo(&info);
1753 	info.si_signo = SIGSEGV;
1754 	info.si_errno = 0;
1755 	info.si_code  = SEGV_BNDERR;
1756 	info.si_addr  = addr;
1757 	info.si_lower = lower;
1758 	info.si_upper = upper;
1759 	return force_sig_info(&info);
1760 }
1761 
1762 #ifdef SEGV_PKUERR
1763 int force_sig_pkuerr(void __user *addr, u32 pkey)
1764 {
1765 	struct kernel_siginfo info;
1766 
1767 	clear_siginfo(&info);
1768 	info.si_signo = SIGSEGV;
1769 	info.si_errno = 0;
1770 	info.si_code  = SEGV_PKUERR;
1771 	info.si_addr  = addr;
1772 	info.si_pkey  = pkey;
1773 	return force_sig_info(&info);
1774 }
1775 #endif
1776 
1777 int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1778 {
1779 	struct kernel_siginfo info;
1780 
1781 	clear_siginfo(&info);
1782 	info.si_signo     = SIGTRAP;
1783 	info.si_errno     = 0;
1784 	info.si_code      = TRAP_PERF;
1785 	info.si_addr      = addr;
1786 	info.si_perf_data = sig_data;
1787 	info.si_perf_type = type;
1788 
1789 	/*
1790 	 * Signals generated by perf events should not terminate the whole
1791 	 * process if SIGTRAP is blocked, however, delivering the signal
1792 	 * asynchronously is better than not delivering at all. But tell user
1793 	 * space if the signal was asynchronous, so it can clearly be
1794 	 * distinguished from normal synchronous ones.
1795 	 */
1796 	info.si_perf_flags = sigismember(&current->blocked, info.si_signo) ?
1797 				     TRAP_PERF_FLAG_ASYNC :
1798 				     0;
1799 
1800 	return send_sig_info(info.si_signo, &info, current);
1801 }
1802 
1803 /**
1804  * force_sig_seccomp - signals the task to allow in-process syscall emulation
1805  * @syscall: syscall number to send to userland
1806  * @reason: filter-supplied reason code to send to userland (via si_errno)
1807  * @force_coredump: true to trigger a coredump
1808  *
1809  * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1810  */
1811 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1812 {
1813 	struct kernel_siginfo info;
1814 
1815 	clear_siginfo(&info);
1816 	info.si_signo = SIGSYS;
1817 	info.si_code = SYS_SECCOMP;
1818 	info.si_call_addr = (void __user *)KSTK_EIP(current);
1819 	info.si_errno = reason;
1820 	info.si_arch = syscall_get_arch(current);
1821 	info.si_syscall = syscall;
1822 	return force_sig_info_to_task(&info, current,
1823 		force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1824 }
1825 
1826 /* For the crazy architectures that include trap information in
1827  * the errno field, instead of an actual errno value.
1828  */
1829 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1830 {
1831 	struct kernel_siginfo info;
1832 
1833 	clear_siginfo(&info);
1834 	info.si_signo = SIGTRAP;
1835 	info.si_errno = errno;
1836 	info.si_code  = TRAP_HWBKPT;
1837 	info.si_addr  = addr;
1838 	return force_sig_info(&info);
1839 }
1840 
1841 /* For the rare architectures that include trap information using
1842  * si_trapno.
1843  */
1844 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1845 {
1846 	struct kernel_siginfo info;
1847 
1848 	clear_siginfo(&info);
1849 	info.si_signo = sig;
1850 	info.si_errno = 0;
1851 	info.si_code  = code;
1852 	info.si_addr  = addr;
1853 	info.si_trapno = trapno;
1854 	return force_sig_info(&info);
1855 }
1856 
1857 /* For the rare architectures that include trap information using
1858  * si_trapno.
1859  */
1860 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1861 			  struct task_struct *t)
1862 {
1863 	struct kernel_siginfo info;
1864 
1865 	clear_siginfo(&info);
1866 	info.si_signo = sig;
1867 	info.si_errno = 0;
1868 	info.si_code  = code;
1869 	info.si_addr  = addr;
1870 	info.si_trapno = trapno;
1871 	return send_sig_info(info.si_signo, &info, t);
1872 }
1873 
1874 static int kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1875 {
1876 	int ret;
1877 	read_lock(&tasklist_lock);
1878 	ret = __kill_pgrp_info(sig, info, pgrp);
1879 	read_unlock(&tasklist_lock);
1880 	return ret;
1881 }
1882 
1883 int kill_pgrp(struct pid *pid, int sig, int priv)
1884 {
1885 	return kill_pgrp_info(sig, __si_special(priv), pid);
1886 }
1887 EXPORT_SYMBOL(kill_pgrp);
1888 
1889 int kill_pid(struct pid *pid, int sig, int priv)
1890 {
1891 	return kill_pid_info(sig, __si_special(priv), pid);
1892 }
1893 EXPORT_SYMBOL(kill_pid);
1894 
1895 #ifdef CONFIG_POSIX_TIMERS
1896 /*
1897  * These functions handle POSIX timer signals. POSIX timers use
1898  * preallocated sigqueue structs for sending signals.
1899  */
1900 static void __flush_itimer_signals(struct sigpending *pending)
1901 {
1902 	sigset_t signal, retain;
1903 	struct sigqueue *q, *n;
1904 
1905 	signal = pending->signal;
1906 	sigemptyset(&retain);
1907 
1908 	list_for_each_entry_safe(q, n, &pending->list, list) {
1909 		int sig = q->info.si_signo;
1910 
1911 		if (likely(q->info.si_code != SI_TIMER)) {
1912 			sigaddset(&retain, sig);
1913 		} else {
1914 			sigdelset(&signal, sig);
1915 			list_del_init(&q->list);
1916 			__sigqueue_free(q);
1917 		}
1918 	}
1919 
1920 	sigorsets(&pending->signal, &signal, &retain);
1921 }
1922 
1923 void flush_itimer_signals(void)
1924 {
1925 	struct task_struct *tsk = current;
1926 
1927 	guard(spinlock_irqsave)(&tsk->sighand->siglock);
1928 	__flush_itimer_signals(&tsk->pending);
1929 	__flush_itimer_signals(&tsk->signal->shared_pending);
1930 }
1931 
1932 bool posixtimer_init_sigqueue(struct sigqueue *q)
1933 {
1934 	struct ucounts *ucounts = sig_get_ucounts(current, -1, 0);
1935 
1936 	if (!ucounts)
1937 		return false;
1938 	clear_siginfo(&q->info);
1939 	__sigqueue_init(q, ucounts, SIGQUEUE_PREALLOC);
1940 	return true;
1941 }
1942 
1943 static void posixtimer_queue_sigqueue(struct sigqueue *q, struct task_struct *t, enum pid_type type)
1944 {
1945 	struct sigpending *pending;
1946 	int sig = q->info.si_signo;
1947 
1948 	signalfd_notify(t, sig);
1949 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1950 	list_add_tail(&q->list, &pending->list);
1951 	sigaddset(&pending->signal, sig);
1952 	complete_signal(sig, t, type);
1953 }
1954 
1955 /*
1956  * This function is used by POSIX timers to deliver a timer signal.
1957  * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1958  * set), the signal must be delivered to the specific thread (queues
1959  * into t->pending).
1960  *
1961  * Where type is not PIDTYPE_PID, signals must be delivered to the
1962  * process. In this case, prefer to deliver to current if it is in
1963  * the same thread group as the target process and its sighand is
1964  * stable, which avoids unnecessarily waking up a potentially idle task.
1965  */
1966 static inline struct task_struct *posixtimer_get_target(struct k_itimer *tmr)
1967 {
1968 	struct task_struct *t = pid_task(tmr->it_pid, tmr->it_pid_type);
1969 
1970 	if (t && tmr->it_pid_type != PIDTYPE_PID &&
1971 	    same_thread_group(t, current) && !current->exit_state)
1972 		t = current;
1973 	return t;
1974 }
1975 
1976 void posixtimer_send_sigqueue(struct k_itimer *tmr)
1977 {
1978 	struct sigqueue *q = &tmr->sigq;
1979 	int sig = q->info.si_signo;
1980 	struct task_struct *t;
1981 	unsigned long flags;
1982 	int result;
1983 
1984 	guard(rcu)();
1985 
1986 	t = posixtimer_get_target(tmr);
1987 	if (!t)
1988 		return;
1989 
1990 	if (!likely(lock_task_sighand(t, &flags)))
1991 		return;
1992 
1993 	/*
1994 	 * Update @tmr::sigqueue_seq for posix timer signals with sighand
1995 	 * locked to prevent a race against dequeue_signal().
1996 	 */
1997 	tmr->it_sigqueue_seq = tmr->it_signal_seq;
1998 
1999 	/*
2000 	 * Set the signal delivery status under sighand lock, so that the
2001 	 * ignored signal handling can distinguish between a periodic and a
2002 	 * non-periodic timer.
2003 	 */
2004 	tmr->it_sig_periodic = tmr->it_status == POSIX_TIMER_REQUEUE_PENDING;
2005 
2006 	if (!prepare_signal(sig, t, false)) {
2007 		result = TRACE_SIGNAL_IGNORED;
2008 
2009 		if (!list_empty(&q->list)) {
2010 			/*
2011 			 * The signal was ignored and blocked. The timer
2012 			 * expiry queued it because blocked signals are
2013 			 * queued independent of the ignored state.
2014 			 *
2015 			 * The unblocking set SIGPENDING, but the signal
2016 			 * was not yet dequeued from the pending list.
2017 			 * So prepare_signal() sees unblocked and ignored,
2018 			 * which ends up here. Leave it queued like a
2019 			 * regular signal.
2020 			 *
2021 			 * The same happens when the task group is exiting
2022 			 * and the signal is already queued.
2023 			 * prepare_signal() treats SIGNAL_GROUP_EXIT as
2024 			 * ignored independent of its queued state. This
2025 			 * gets cleaned up in __exit_signal().
2026 			 */
2027 			goto out;
2028 		}
2029 
2030 		/* Periodic timers with SIG_IGN are queued on the ignored list */
2031 		if (tmr->it_sig_periodic) {
2032 			/*
2033 			 * Already queued means the timer was rearmed after
2034 			 * the previous expiry got it on the ignore list.
2035 			 * Nothing to do for that case.
2036 			 */
2037 			if (hlist_unhashed(&tmr->ignored_list)) {
2038 				/*
2039 				 * Take a signal reference and queue it on
2040 				 * the ignored list.
2041 				 */
2042 				posixtimer_sigqueue_getref(q);
2043 				posixtimer_sig_ignore(t, q);
2044 			}
2045 		} else if (!hlist_unhashed(&tmr->ignored_list)) {
2046 			/*
2047 			 * Covers the case where a timer was periodic and
2048 			 * then the signal was ignored. Later it was rearmed
2049 			 * as oneshot timer. The previous signal is invalid
2050 			 * now, and this oneshot signal has to be dropped.
2051 			 * Remove it from the ignored list and drop the
2052 			 * reference count as the signal is not longer
2053 			 * queued.
2054 			 */
2055 			hlist_del_init(&tmr->ignored_list);
2056 			posixtimer_putref(tmr);
2057 		}
2058 		goto out;
2059 	}
2060 
2061 	if (unlikely(!list_empty(&q->list))) {
2062 		/* This holds a reference count already */
2063 		result = TRACE_SIGNAL_ALREADY_PENDING;
2064 		goto out;
2065 	}
2066 
2067 	/*
2068 	 * If the signal is on the ignore list, it got blocked after it was
2069 	 * ignored earlier. But nothing lifted the ignore. Move it back to
2070 	 * the pending list to be consistent with the regular signal
2071 	 * handling. This already holds a reference count.
2072 	 *
2073 	 * If it's not on the ignore list acquire a reference count.
2074 	 */
2075 	if (likely(hlist_unhashed(&tmr->ignored_list)))
2076 		posixtimer_sigqueue_getref(q);
2077 	else
2078 		hlist_del_init(&tmr->ignored_list);
2079 
2080 	posixtimer_queue_sigqueue(q, t, tmr->it_pid_type);
2081 	result = TRACE_SIGNAL_DELIVERED;
2082 out:
2083 	trace_signal_generate(sig, &q->info, t, tmr->it_pid_type != PIDTYPE_PID, result);
2084 	unlock_task_sighand(t, &flags);
2085 }
2086 
2087 static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q)
2088 {
2089 	struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
2090 
2091 	/*
2092 	 * If the timer is marked deleted already or the signal originates
2093 	 * from a non-periodic timer, then just drop the reference
2094 	 * count. Otherwise queue it on the ignored list.
2095 	 */
2096 	if (posixtimer_valid(tmr) && tmr->it_sig_periodic)
2097 		hlist_add_head(&tmr->ignored_list, &tsk->signal->ignored_posix_timers);
2098 	else
2099 		posixtimer_putref(tmr);
2100 }
2101 
2102 static void posixtimer_sig_unignore(struct task_struct *tsk, int sig)
2103 {
2104 	struct hlist_head *head = &tsk->signal->ignored_posix_timers;
2105 	struct hlist_node *tmp;
2106 	struct k_itimer *tmr;
2107 
2108 	if (likely(hlist_empty(head)))
2109 		return;
2110 
2111 	/*
2112 	 * Rearming a timer with sighand lock held is not possible due to
2113 	 * lock ordering vs. tmr::it_lock. Just stick the sigqueue back and
2114 	 * let the signal delivery path deal with it whether it needs to be
2115 	 * rearmed or not. This cannot be decided here w/o dropping sighand
2116 	 * lock and creating a loop retry horror show.
2117 	 */
2118 	hlist_for_each_entry_safe(tmr, tmp , head, ignored_list) {
2119 		struct task_struct *target;
2120 
2121 		/*
2122 		 * tmr::sigq.info.si_signo is immutable, so accessing it
2123 		 * without holding tmr::it_lock is safe.
2124 		 */
2125 		if (tmr->sigq.info.si_signo != sig)
2126 			continue;
2127 
2128 		hlist_del_init(&tmr->ignored_list);
2129 
2130 		/* This should never happen and leaks a reference count */
2131 		if (WARN_ON_ONCE(!list_empty(&tmr->sigq.list)))
2132 			continue;
2133 
2134 		/*
2135 		 * Get the target for the signal. If target is a thread and
2136 		 * has exited by now, drop the reference count.
2137 		 */
2138 		guard(rcu)();
2139 		target = posixtimer_get_target(tmr);
2140 		if (target)
2141 			posixtimer_queue_sigqueue(&tmr->sigq, target, tmr->it_pid_type);
2142 		else
2143 			posixtimer_putref(tmr);
2144 	}
2145 }
2146 #else /* CONFIG_POSIX_TIMERS */
2147 static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q) { }
2148 static inline void posixtimer_sig_unignore(struct task_struct *tsk, int sig) { }
2149 #endif /* !CONFIG_POSIX_TIMERS */
2150 
2151 void do_notify_pidfd(struct task_struct *task)
2152 {
2153 	struct pid *pid = task_pid(task);
2154 
2155 	WARN_ON(task->exit_state == 0);
2156 
2157 	__wake_up(&pid->wait_pidfd, TASK_NORMAL, 0,
2158 			poll_to_key(EPOLLIN | EPOLLRDNORM));
2159 }
2160 
2161 /*
2162  * Let a parent know about the death of a child.
2163  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2164  *
2165  * Returns true if our parent ignored us and so we've switched to
2166  * self-reaping.
2167  */
2168 bool do_notify_parent(struct task_struct *tsk, int sig)
2169 {
2170 	struct kernel_siginfo info;
2171 	unsigned long flags;
2172 	struct sighand_struct *psig;
2173 	bool autoreap = false;
2174 	u64 utime, stime;
2175 
2176 	WARN_ON_ONCE(sig == -1);
2177 
2178 	/* do_notify_parent_cldstop should have been called instead.  */
2179 	WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2180 
2181 	WARN_ON_ONCE(!tsk->ptrace &&
2182 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2183 	/*
2184 	 * Notify for thread-group leaders without subthreads.
2185 	 */
2186 	if (thread_group_empty(tsk))
2187 		do_notify_pidfd(tsk);
2188 
2189 	if (sig != SIGCHLD) {
2190 		/*
2191 		 * This is only possible if parent == real_parent.
2192 		 * Check if it has changed security domain.
2193 		 */
2194 		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2195 			sig = SIGCHLD;
2196 	}
2197 
2198 	clear_siginfo(&info);
2199 	info.si_signo = sig;
2200 	info.si_errno = 0;
2201 	/*
2202 	 * We are under tasklist_lock here so our parent is tied to
2203 	 * us and cannot change.
2204 	 *
2205 	 * task_active_pid_ns will always return the same pid namespace
2206 	 * until a task passes through release_task.
2207 	 *
2208 	 * write_lock() currently calls preempt_disable() which is the
2209 	 * same as rcu_read_lock(), but according to Oleg, this is not
2210 	 * correct to rely on this
2211 	 */
2212 	rcu_read_lock();
2213 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2214 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2215 				       task_uid(tsk));
2216 	rcu_read_unlock();
2217 
2218 	task_cputime(tsk, &utime, &stime);
2219 	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2220 	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2221 
2222 	info.si_status = tsk->exit_code & 0x7f;
2223 	if (tsk->exit_code & 0x80)
2224 		info.si_code = CLD_DUMPED;
2225 	else if (tsk->exit_code & 0x7f)
2226 		info.si_code = CLD_KILLED;
2227 	else {
2228 		info.si_code = CLD_EXITED;
2229 		info.si_status = tsk->exit_code >> 8;
2230 	}
2231 
2232 	psig = tsk->parent->sighand;
2233 	spin_lock_irqsave(&psig->siglock, flags);
2234 	if (!tsk->ptrace && sig == SIGCHLD &&
2235 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2236 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2237 		/*
2238 		 * We are exiting and our parent doesn't care.  POSIX.1
2239 		 * defines special semantics for setting SIGCHLD to SIG_IGN
2240 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
2241 		 * automatically and not left for our parent's wait4 call.
2242 		 * Rather than having the parent do it as a magic kind of
2243 		 * signal handler, we just set this to tell do_exit that we
2244 		 * can be cleaned up without becoming a zombie.  Note that
2245 		 * we still call __wake_up_parent in this case, because a
2246 		 * blocked sys_wait4 might now return -ECHILD.
2247 		 *
2248 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2249 		 * is implementation-defined: we do (if you don't want
2250 		 * it, just use SIG_IGN instead).
2251 		 */
2252 		autoreap = true;
2253 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2254 			sig = 0;
2255 	}
2256 	/*
2257 	 * Send with __send_signal as si_pid and si_uid are in the
2258 	 * parent's namespaces.
2259 	 */
2260 	if (valid_signal(sig) && sig)
2261 		__send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2262 	__wake_up_parent(tsk, tsk->parent);
2263 	spin_unlock_irqrestore(&psig->siglock, flags);
2264 
2265 	return autoreap;
2266 }
2267 
2268 /**
2269  * do_notify_parent_cldstop - notify parent of stopped/continued state change
2270  * @tsk: task reporting the state change
2271  * @for_ptracer: the notification is for ptracer
2272  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2273  *
2274  * Notify @tsk's parent that the stopped/continued state has changed.  If
2275  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2276  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2277  *
2278  * CONTEXT:
2279  * Must be called with tasklist_lock at least read locked.
2280  */
2281 static void do_notify_parent_cldstop(struct task_struct *tsk,
2282 				     bool for_ptracer, int why)
2283 {
2284 	struct kernel_siginfo info;
2285 	unsigned long flags;
2286 	struct task_struct *parent;
2287 	struct sighand_struct *sighand;
2288 	u64 utime, stime;
2289 
2290 	if (for_ptracer) {
2291 		parent = tsk->parent;
2292 	} else {
2293 		tsk = tsk->group_leader;
2294 		parent = tsk->real_parent;
2295 	}
2296 
2297 	clear_siginfo(&info);
2298 	info.si_signo = SIGCHLD;
2299 	info.si_errno = 0;
2300 	/*
2301 	 * see comment in do_notify_parent() about the following 4 lines
2302 	 */
2303 	rcu_read_lock();
2304 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2305 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2306 	rcu_read_unlock();
2307 
2308 	task_cputime(tsk, &utime, &stime);
2309 	info.si_utime = nsec_to_clock_t(utime);
2310 	info.si_stime = nsec_to_clock_t(stime);
2311 
2312  	info.si_code = why;
2313  	switch (why) {
2314  	case CLD_CONTINUED:
2315  		info.si_status = SIGCONT;
2316  		break;
2317  	case CLD_STOPPED:
2318  		info.si_status = tsk->signal->group_exit_code & 0x7f;
2319  		break;
2320  	case CLD_TRAPPED:
2321  		info.si_status = tsk->exit_code & 0x7f;
2322  		break;
2323  	default:
2324  		BUG();
2325  	}
2326 
2327 	sighand = parent->sighand;
2328 	spin_lock_irqsave(&sighand->siglock, flags);
2329 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2330 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2331 		send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2332 	/*
2333 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2334 	 */
2335 	__wake_up_parent(tsk, parent);
2336 	spin_unlock_irqrestore(&sighand->siglock, flags);
2337 }
2338 
2339 /*
2340  * This must be called with current->sighand->siglock held.
2341  *
2342  * This should be the path for all ptrace stops.
2343  * We always set current->last_siginfo while stopped here.
2344  * That makes it a way to test a stopped process for
2345  * being ptrace-stopped vs being job-control-stopped.
2346  *
2347  * Returns the signal the ptracer requested the code resume
2348  * with.  If the code did not stop because the tracer is gone,
2349  * the stop signal remains unchanged unless clear_code.
2350  */
2351 static int ptrace_stop(int exit_code, int why, unsigned long message,
2352 		       kernel_siginfo_t *info)
2353 	__releases(&current->sighand->siglock)
2354 	__acquires(&current->sighand->siglock)
2355 {
2356 	bool gstop_done = false;
2357 
2358 	if (arch_ptrace_stop_needed()) {
2359 		/*
2360 		 * The arch code has something special to do before a
2361 		 * ptrace stop.  This is allowed to block, e.g. for faults
2362 		 * on user stack pages.  We can't keep the siglock while
2363 		 * calling arch_ptrace_stop, so we must release it now.
2364 		 * To preserve proper semantics, we must do this before
2365 		 * any signal bookkeeping like checking group_stop_count.
2366 		 */
2367 		spin_unlock_irq(&current->sighand->siglock);
2368 		arch_ptrace_stop();
2369 		spin_lock_irq(&current->sighand->siglock);
2370 	}
2371 
2372 	/*
2373 	 * After this point ptrace_signal_wake_up or signal_wake_up
2374 	 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2375 	 * signal comes in.  Handle previous ptrace_unlinks and fatal
2376 	 * signals here to prevent ptrace_stop sleeping in schedule.
2377 	 */
2378 	if (!current->ptrace || __fatal_signal_pending(current))
2379 		return exit_code;
2380 
2381 	set_special_state(TASK_TRACED);
2382 	current->jobctl |= JOBCTL_TRACED;
2383 
2384 	/*
2385 	 * We're committing to trapping.  TRACED should be visible before
2386 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2387 	 * Also, transition to TRACED and updates to ->jobctl should be
2388 	 * atomic with respect to siglock and should be done after the arch
2389 	 * hook as siglock is released and regrabbed across it.
2390 	 *
2391 	 *     TRACER				    TRACEE
2392 	 *
2393 	 *     ptrace_attach()
2394 	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2395 	 *     do_wait()
2396 	 *       set_current_state()                smp_wmb();
2397 	 *       ptrace_do_wait()
2398 	 *         wait_task_stopped()
2399 	 *           task_stopped_code()
2400 	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2401 	 */
2402 	smp_wmb();
2403 
2404 	current->ptrace_message = message;
2405 	current->last_siginfo = info;
2406 	current->exit_code = exit_code;
2407 
2408 	/*
2409 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2410 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2411 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2412 	 * could be clear now.  We act as if SIGCONT is received after
2413 	 * TASK_TRACED is entered - ignore it.
2414 	 */
2415 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2416 		gstop_done = task_participate_group_stop(current);
2417 
2418 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2419 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2420 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2421 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2422 
2423 	/* entering a trap, clear TRAPPING */
2424 	task_clear_jobctl_trapping(current);
2425 
2426 	spin_unlock_irq(&current->sighand->siglock);
2427 	read_lock(&tasklist_lock);
2428 	/*
2429 	 * Notify parents of the stop.
2430 	 *
2431 	 * While ptraced, there are two parents - the ptracer and
2432 	 * the real_parent of the group_leader.  The ptracer should
2433 	 * know about every stop while the real parent is only
2434 	 * interested in the completion of group stop.  The states
2435 	 * for the two don't interact with each other.  Notify
2436 	 * separately unless they're gonna be duplicates.
2437 	 */
2438 	if (current->ptrace)
2439 		do_notify_parent_cldstop(current, true, why);
2440 	if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2441 		do_notify_parent_cldstop(current, false, why);
2442 
2443 	/*
2444 	 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2445 	 * One a PREEMPTION kernel this can result in preemption requirement
2446 	 * which will be fulfilled after read_unlock() and the ptracer will be
2447 	 * put on the CPU.
2448 	 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2449 	 * this task wait in schedule(). If this task gets preempted then it
2450 	 * remains enqueued on the runqueue. The ptracer will observe this and
2451 	 * then sleep for a delay of one HZ tick. In the meantime this task
2452 	 * gets scheduled, enters schedule() and will wait for the ptracer.
2453 	 *
2454 	 * This preemption point is not bad from a correctness point of
2455 	 * view but extends the runtime by one HZ tick time due to the
2456 	 * ptracer's sleep.  The preempt-disable section ensures that there
2457 	 * will be no preemption between unlock and schedule() and so
2458 	 * improving the performance since the ptracer will observe that
2459 	 * the tracee is scheduled out once it gets on the CPU.
2460 	 *
2461 	 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2462 	 * Therefore the task can be preempted after do_notify_parent_cldstop()
2463 	 * before unlocking tasklist_lock so there is no benefit in doing this.
2464 	 *
2465 	 * In fact disabling preemption is harmful on PREEMPT_RT because
2466 	 * the spinlock_t in cgroup_enter_frozen() must not be acquired
2467 	 * with preemption disabled due to the 'sleeping' spinlock
2468 	 * substitution of RT.
2469 	 */
2470 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2471 		preempt_disable();
2472 	read_unlock(&tasklist_lock);
2473 	cgroup_enter_frozen();
2474 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2475 		preempt_enable_no_resched();
2476 	schedule();
2477 	cgroup_leave_frozen(true);
2478 
2479 	/*
2480 	 * We are back.  Now reacquire the siglock before touching
2481 	 * last_siginfo, so that we are sure to have synchronized with
2482 	 * any signal-sending on another CPU that wants to examine it.
2483 	 */
2484 	spin_lock_irq(&current->sighand->siglock);
2485 	exit_code = current->exit_code;
2486 	current->last_siginfo = NULL;
2487 	current->ptrace_message = 0;
2488 	current->exit_code = 0;
2489 
2490 	/* LISTENING can be set only during STOP traps, clear it */
2491 	current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2492 
2493 	/*
2494 	 * Queued signals ignored us while we were stopped for tracing.
2495 	 * So check for any that we should take before resuming user mode.
2496 	 * This sets TIF_SIGPENDING, but never clears it.
2497 	 */
2498 	recalc_sigpending_tsk(current);
2499 	return exit_code;
2500 }
2501 
2502 static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2503 {
2504 	kernel_siginfo_t info;
2505 
2506 	clear_siginfo(&info);
2507 	info.si_signo = signr;
2508 	info.si_code = exit_code;
2509 	info.si_pid = task_pid_vnr(current);
2510 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2511 
2512 	/* Let the debugger run.  */
2513 	return ptrace_stop(exit_code, why, message, &info);
2514 }
2515 
2516 int ptrace_notify(int exit_code, unsigned long message)
2517 {
2518 	int signr;
2519 
2520 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2521 	if (unlikely(task_work_pending(current)))
2522 		task_work_run();
2523 
2524 	spin_lock_irq(&current->sighand->siglock);
2525 	signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2526 	spin_unlock_irq(&current->sighand->siglock);
2527 	return signr;
2528 }
2529 
2530 /**
2531  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2532  * @signr: signr causing group stop if initiating
2533  *
2534  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2535  * and participate in it.  If already set, participate in the existing
2536  * group stop.  If participated in a group stop (and thus slept), %true is
2537  * returned with siglock released.
2538  *
2539  * If ptraced, this function doesn't handle stop itself.  Instead,
2540  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2541  * untouched.  The caller must ensure that INTERRUPT trap handling takes
2542  * places afterwards.
2543  *
2544  * CONTEXT:
2545  * Must be called with @current->sighand->siglock held, which is released
2546  * on %true return.
2547  *
2548  * RETURNS:
2549  * %false if group stop is already cancelled or ptrace trap is scheduled.
2550  * %true if participated in group stop.
2551  */
2552 static bool do_signal_stop(int signr)
2553 	__releases(&current->sighand->siglock)
2554 {
2555 	struct signal_struct *sig = current->signal;
2556 
2557 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2558 		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2559 		struct task_struct *t;
2560 
2561 		/* signr will be recorded in task->jobctl for retries */
2562 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2563 
2564 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2565 		    unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2566 		    unlikely(sig->group_exec_task))
2567 			return false;
2568 		/*
2569 		 * There is no group stop already in progress.  We must
2570 		 * initiate one now.
2571 		 *
2572 		 * While ptraced, a task may be resumed while group stop is
2573 		 * still in effect and then receive a stop signal and
2574 		 * initiate another group stop.  This deviates from the
2575 		 * usual behavior as two consecutive stop signals can't
2576 		 * cause two group stops when !ptraced.  That is why we
2577 		 * also check !task_is_stopped(t) below.
2578 		 *
2579 		 * The condition can be distinguished by testing whether
2580 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2581 		 * group_exit_code in such case.
2582 		 *
2583 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2584 		 * an intervening stop signal is required to cause two
2585 		 * continued events regardless of ptrace.
2586 		 */
2587 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2588 			sig->group_exit_code = signr;
2589 
2590 		sig->group_stop_count = 0;
2591 		if (task_set_jobctl_pending(current, signr | gstop))
2592 			sig->group_stop_count++;
2593 
2594 		for_other_threads(current, t) {
2595 			/*
2596 			 * Setting state to TASK_STOPPED for a group
2597 			 * stop is always done with the siglock held,
2598 			 * so this check has no races.
2599 			 */
2600 			if (!task_is_stopped(t) &&
2601 			    task_set_jobctl_pending(t, signr | gstop)) {
2602 				sig->group_stop_count++;
2603 				if (likely(!(t->ptrace & PT_SEIZED)))
2604 					signal_wake_up(t, 0);
2605 				else
2606 					ptrace_trap_notify(t);
2607 			}
2608 		}
2609 	}
2610 
2611 	if (likely(!current->ptrace)) {
2612 		int notify = 0;
2613 
2614 		/*
2615 		 * If there are no other threads in the group, or if there
2616 		 * is a group stop in progress and we are the last to stop,
2617 		 * report to the parent.
2618 		 */
2619 		if (task_participate_group_stop(current))
2620 			notify = CLD_STOPPED;
2621 
2622 		current->jobctl |= JOBCTL_STOPPED;
2623 		set_special_state(TASK_STOPPED);
2624 		spin_unlock_irq(&current->sighand->siglock);
2625 
2626 		/*
2627 		 * Notify the parent of the group stop completion.  Because
2628 		 * we're not holding either the siglock or tasklist_lock
2629 		 * here, ptracer may attach inbetween; however, this is for
2630 		 * group stop and should always be delivered to the real
2631 		 * parent of the group leader.  The new ptracer will get
2632 		 * its notification when this task transitions into
2633 		 * TASK_TRACED.
2634 		 */
2635 		if (notify) {
2636 			read_lock(&tasklist_lock);
2637 			do_notify_parent_cldstop(current, false, notify);
2638 			read_unlock(&tasklist_lock);
2639 		}
2640 
2641 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2642 		cgroup_enter_frozen();
2643 		schedule();
2644 		return true;
2645 	} else {
2646 		/*
2647 		 * While ptraced, group stop is handled by STOP trap.
2648 		 * Schedule it and let the caller deal with it.
2649 		 */
2650 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2651 		return false;
2652 	}
2653 }
2654 
2655 /**
2656  * do_jobctl_trap - take care of ptrace jobctl traps
2657  *
2658  * When PT_SEIZED, it's used for both group stop and explicit
2659  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2660  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2661  * the stop signal; otherwise, %SIGTRAP.
2662  *
2663  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2664  * number as exit_code and no siginfo.
2665  *
2666  * CONTEXT:
2667  * Must be called with @current->sighand->siglock held, which may be
2668  * released and re-acquired before returning with intervening sleep.
2669  */
2670 static void do_jobctl_trap(void)
2671 {
2672 	struct signal_struct *signal = current->signal;
2673 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2674 
2675 	if (current->ptrace & PT_SEIZED) {
2676 		if (!signal->group_stop_count &&
2677 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2678 			signr = SIGTRAP;
2679 		WARN_ON_ONCE(!signr);
2680 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2681 				 CLD_STOPPED, 0);
2682 	} else {
2683 		WARN_ON_ONCE(!signr);
2684 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2685 	}
2686 }
2687 
2688 /**
2689  * do_freezer_trap - handle the freezer jobctl trap
2690  *
2691  * Puts the task into frozen state, if only the task is not about to quit.
2692  * In this case it drops JOBCTL_TRAP_FREEZE.
2693  *
2694  * CONTEXT:
2695  * Must be called with @current->sighand->siglock held,
2696  * which is always released before returning.
2697  */
2698 static void do_freezer_trap(void)
2699 	__releases(&current->sighand->siglock)
2700 {
2701 	/*
2702 	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2703 	 * let's make another loop to give it a chance to be handled.
2704 	 * In any case, we'll return back.
2705 	 */
2706 	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2707 	     JOBCTL_TRAP_FREEZE) {
2708 		spin_unlock_irq(&current->sighand->siglock);
2709 		return;
2710 	}
2711 
2712 	/*
2713 	 * Now we're sure that there is no pending fatal signal and no
2714 	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2715 	 * immediately (if there is a non-fatal signal pending), and
2716 	 * put the task into sleep.
2717 	 */
2718 	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2719 	clear_thread_flag(TIF_SIGPENDING);
2720 	spin_unlock_irq(&current->sighand->siglock);
2721 	cgroup_enter_frozen();
2722 	schedule();
2723 
2724 	/*
2725 	 * We could've been woken by task_work, run it to clear
2726 	 * TIF_NOTIFY_SIGNAL. The caller will retry if necessary.
2727 	 */
2728 	clear_notify_signal();
2729 	if (unlikely(task_work_pending(current)))
2730 		task_work_run();
2731 }
2732 
2733 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2734 {
2735 	/*
2736 	 * We do not check sig_kernel_stop(signr) but set this marker
2737 	 * unconditionally because we do not know whether debugger will
2738 	 * change signr. This flag has no meaning unless we are going
2739 	 * to stop after return from ptrace_stop(). In this case it will
2740 	 * be checked in do_signal_stop(), we should only stop if it was
2741 	 * not cleared by SIGCONT while we were sleeping. See also the
2742 	 * comment in dequeue_signal().
2743 	 */
2744 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2745 	signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2746 
2747 	/* We're back.  Did the debugger cancel the sig?  */
2748 	if (signr == 0)
2749 		return signr;
2750 
2751 	/*
2752 	 * Update the siginfo structure if the signal has
2753 	 * changed.  If the debugger wanted something
2754 	 * specific in the siginfo structure then it should
2755 	 * have updated *info via PTRACE_SETSIGINFO.
2756 	 */
2757 	if (signr != info->si_signo) {
2758 		clear_siginfo(info);
2759 		info->si_signo = signr;
2760 		info->si_errno = 0;
2761 		info->si_code = SI_USER;
2762 		rcu_read_lock();
2763 		info->si_pid = task_pid_vnr(current->parent);
2764 		info->si_uid = from_kuid_munged(current_user_ns(),
2765 						task_uid(current->parent));
2766 		rcu_read_unlock();
2767 	}
2768 
2769 	/* If the (new) signal is now blocked, requeue it.  */
2770 	if (sigismember(&current->blocked, signr) ||
2771 	    fatal_signal_pending(current)) {
2772 		send_signal_locked(signr, info, current, type);
2773 		signr = 0;
2774 	}
2775 
2776 	return signr;
2777 }
2778 
2779 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2780 {
2781 	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2782 	case SIL_FAULT:
2783 	case SIL_FAULT_TRAPNO:
2784 	case SIL_FAULT_MCEERR:
2785 	case SIL_FAULT_BNDERR:
2786 	case SIL_FAULT_PKUERR:
2787 	case SIL_FAULT_PERF_EVENT:
2788 		ksig->info.si_addr = arch_untagged_si_addr(
2789 			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2790 		break;
2791 	case SIL_KILL:
2792 	case SIL_TIMER:
2793 	case SIL_POLL:
2794 	case SIL_CHLD:
2795 	case SIL_RT:
2796 	case SIL_SYS:
2797 		break;
2798 	}
2799 }
2800 
2801 bool get_signal(struct ksignal *ksig)
2802 {
2803 	struct sighand_struct *sighand = current->sighand;
2804 	struct signal_struct *signal = current->signal;
2805 	int signr;
2806 
2807 	clear_notify_signal();
2808 	if (unlikely(task_work_pending(current)))
2809 		task_work_run();
2810 
2811 	if (!task_sigpending(current))
2812 		return false;
2813 
2814 	if (unlikely(uprobe_deny_signal()))
2815 		return false;
2816 
2817 	/*
2818 	 * Do this once, we can't return to user-mode if freezing() == T.
2819 	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2820 	 * thus do not need another check after return.
2821 	 */
2822 	try_to_freeze();
2823 
2824 relock:
2825 	spin_lock_irq(&sighand->siglock);
2826 
2827 	/*
2828 	 * Every stopped thread goes here after wakeup. Check to see if
2829 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2830 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2831 	 */
2832 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2833 		int why;
2834 
2835 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2836 			why = CLD_CONTINUED;
2837 		else
2838 			why = CLD_STOPPED;
2839 
2840 		signal->flags &= ~SIGNAL_CLD_MASK;
2841 
2842 		spin_unlock_irq(&sighand->siglock);
2843 
2844 		/*
2845 		 * Notify the parent that we're continuing.  This event is
2846 		 * always per-process and doesn't make whole lot of sense
2847 		 * for ptracers, who shouldn't consume the state via
2848 		 * wait(2) either, but, for backward compatibility, notify
2849 		 * the ptracer of the group leader too unless it's gonna be
2850 		 * a duplicate.
2851 		 */
2852 		read_lock(&tasklist_lock);
2853 		do_notify_parent_cldstop(current, false, why);
2854 
2855 		if (ptrace_reparented(current->group_leader))
2856 			do_notify_parent_cldstop(current->group_leader,
2857 						true, why);
2858 		read_unlock(&tasklist_lock);
2859 
2860 		goto relock;
2861 	}
2862 
2863 	for (;;) {
2864 		struct k_sigaction *ka;
2865 		enum pid_type type;
2866 
2867 		/* Has this task already been marked for death? */
2868 		if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2869 		     signal->group_exec_task) {
2870 			signr = SIGKILL;
2871 			sigdelset(&current->pending.signal, SIGKILL);
2872 			trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2873 					     &sighand->action[SIGKILL-1]);
2874 			recalc_sigpending();
2875 			/*
2876 			 * implies do_group_exit() or return to PF_USER_WORKER,
2877 			 * no need to initialize ksig->info/etc.
2878 			 */
2879 			goto fatal;
2880 		}
2881 
2882 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2883 		    do_signal_stop(0))
2884 			goto relock;
2885 
2886 		if (unlikely(current->jobctl &
2887 			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2888 			if (current->jobctl & JOBCTL_TRAP_MASK) {
2889 				do_jobctl_trap();
2890 				spin_unlock_irq(&sighand->siglock);
2891 			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2892 				do_freezer_trap();
2893 
2894 			goto relock;
2895 		}
2896 
2897 		/*
2898 		 * If the task is leaving the frozen state, let's update
2899 		 * cgroup counters and reset the frozen bit.
2900 		 */
2901 		if (unlikely(cgroup_task_frozen(current))) {
2902 			spin_unlock_irq(&sighand->siglock);
2903 			cgroup_leave_frozen(false);
2904 			goto relock;
2905 		}
2906 
2907 		/*
2908 		 * Signals generated by the execution of an instruction
2909 		 * need to be delivered before any other pending signals
2910 		 * so that the instruction pointer in the signal stack
2911 		 * frame points to the faulting instruction.
2912 		 */
2913 		type = PIDTYPE_PID;
2914 		signr = dequeue_synchronous_signal(&ksig->info);
2915 		if (!signr)
2916 			signr = dequeue_signal(&current->blocked, &ksig->info, &type);
2917 
2918 		if (!signr)
2919 			break; /* will return 0 */
2920 
2921 		if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2922 		    !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2923 			signr = ptrace_signal(signr, &ksig->info, type);
2924 			if (!signr)
2925 				continue;
2926 		}
2927 
2928 		ka = &sighand->action[signr-1];
2929 
2930 		/* Trace actually delivered signals. */
2931 		trace_signal_deliver(signr, &ksig->info, ka);
2932 
2933 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2934 			continue;
2935 		if (ka->sa.sa_handler != SIG_DFL) {
2936 			/* Run the handler.  */
2937 			ksig->ka = *ka;
2938 
2939 			if (ka->sa.sa_flags & SA_ONESHOT)
2940 				ka->sa.sa_handler = SIG_DFL;
2941 
2942 			break; /* will return non-zero "signr" value */
2943 		}
2944 
2945 		/*
2946 		 * Now we are doing the default action for this signal.
2947 		 */
2948 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2949 			continue;
2950 
2951 		/*
2952 		 * Global init gets no signals it doesn't want.
2953 		 * Container-init gets no signals it doesn't want from same
2954 		 * container.
2955 		 *
2956 		 * Note that if global/container-init sees a sig_kernel_only()
2957 		 * signal here, the signal must have been generated internally
2958 		 * or must have come from an ancestor namespace. In either
2959 		 * case, the signal cannot be dropped.
2960 		 */
2961 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2962 				!sig_kernel_only(signr))
2963 			continue;
2964 
2965 		if (sig_kernel_stop(signr)) {
2966 			/*
2967 			 * The default action is to stop all threads in
2968 			 * the thread group.  The job control signals
2969 			 * do nothing in an orphaned pgrp, but SIGSTOP
2970 			 * always works.  Note that siglock needs to be
2971 			 * dropped during the call to is_orphaned_pgrp()
2972 			 * because of lock ordering with tasklist_lock.
2973 			 * This allows an intervening SIGCONT to be posted.
2974 			 * We need to check for that and bail out if necessary.
2975 			 */
2976 			if (signr != SIGSTOP) {
2977 				spin_unlock_irq(&sighand->siglock);
2978 
2979 				/* signals can be posted during this window */
2980 
2981 				if (is_current_pgrp_orphaned())
2982 					goto relock;
2983 
2984 				spin_lock_irq(&sighand->siglock);
2985 			}
2986 
2987 			if (likely(do_signal_stop(signr))) {
2988 				/* It released the siglock.  */
2989 				goto relock;
2990 			}
2991 
2992 			/*
2993 			 * We didn't actually stop, due to a race
2994 			 * with SIGCONT or something like that.
2995 			 */
2996 			continue;
2997 		}
2998 
2999 	fatal:
3000 		spin_unlock_irq(&sighand->siglock);
3001 		if (unlikely(cgroup_task_frozen(current)))
3002 			cgroup_leave_frozen(true);
3003 
3004 		/*
3005 		 * Anything else is fatal, maybe with a core dump.
3006 		 */
3007 		current->flags |= PF_SIGNALED;
3008 
3009 		if (sig_kernel_coredump(signr)) {
3010 			if (print_fatal_signals)
3011 				print_fatal_signal(signr);
3012 			proc_coredump_connector(current);
3013 			/*
3014 			 * If it was able to dump core, this kills all
3015 			 * other threads in the group and synchronizes with
3016 			 * their demise.  If we lost the race with another
3017 			 * thread getting here, it set group_exit_code
3018 			 * first and our do_group_exit call below will use
3019 			 * that value and ignore the one we pass it.
3020 			 */
3021 			do_coredump(&ksig->info);
3022 		}
3023 
3024 		/*
3025 		 * PF_USER_WORKER threads will catch and exit on fatal signals
3026 		 * themselves. They have cleanup that must be performed, so we
3027 		 * cannot call do_exit() on their behalf. Note that ksig won't
3028 		 * be properly initialized, PF_USER_WORKER's shouldn't use it.
3029 		 */
3030 		if (current->flags & PF_USER_WORKER)
3031 			goto out;
3032 
3033 		/*
3034 		 * Death signals, no core dump.
3035 		 */
3036 		do_group_exit(signr);
3037 		/* NOTREACHED */
3038 	}
3039 	spin_unlock_irq(&sighand->siglock);
3040 
3041 	ksig->sig = signr;
3042 
3043 	if (signr && !(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
3044 		hide_si_addr_tag_bits(ksig);
3045 out:
3046 	return signr > 0;
3047 }
3048 
3049 /**
3050  * signal_delivered - called after signal delivery to update blocked signals
3051  * @ksig:		kernel signal struct
3052  * @stepping:		nonzero if debugger single-step or block-step in use
3053  *
3054  * This function should be called when a signal has successfully been
3055  * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
3056  * is always blocked), and the signal itself is blocked unless %SA_NODEFER
3057  * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
3058  */
3059 static void signal_delivered(struct ksignal *ksig, int stepping)
3060 {
3061 	sigset_t blocked;
3062 
3063 	/* A signal was successfully delivered, and the
3064 	   saved sigmask was stored on the signal frame,
3065 	   and will be restored by sigreturn.  So we can
3066 	   simply clear the restore sigmask flag.  */
3067 	clear_restore_sigmask();
3068 
3069 	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
3070 	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
3071 		sigaddset(&blocked, ksig->sig);
3072 	set_current_blocked(&blocked);
3073 	if (current->sas_ss_flags & SS_AUTODISARM)
3074 		sas_ss_reset(current);
3075 	if (stepping)
3076 		ptrace_notify(SIGTRAP, 0);
3077 }
3078 
3079 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
3080 {
3081 	if (failed)
3082 		force_sigsegv(ksig->sig);
3083 	else
3084 		signal_delivered(ksig, stepping);
3085 }
3086 
3087 /*
3088  * It could be that complete_signal() picked us to notify about the
3089  * group-wide signal. Other threads should be notified now to take
3090  * the shared signals in @which since we will not.
3091  */
3092 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
3093 {
3094 	sigset_t retarget;
3095 	struct task_struct *t;
3096 
3097 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
3098 	if (sigisemptyset(&retarget))
3099 		return;
3100 
3101 	for_other_threads(tsk, t) {
3102 		if (t->flags & PF_EXITING)
3103 			continue;
3104 
3105 		if (!has_pending_signals(&retarget, &t->blocked))
3106 			continue;
3107 		/* Remove the signals this thread can handle. */
3108 		sigandsets(&retarget, &retarget, &t->blocked);
3109 
3110 		if (!task_sigpending(t))
3111 			signal_wake_up(t, 0);
3112 
3113 		if (sigisemptyset(&retarget))
3114 			break;
3115 	}
3116 }
3117 
3118 void exit_signals(struct task_struct *tsk)
3119 {
3120 	int group_stop = 0;
3121 	sigset_t unblocked;
3122 
3123 	/*
3124 	 * @tsk is about to have PF_EXITING set - lock out users which
3125 	 * expect stable threadgroup.
3126 	 */
3127 	cgroup_threadgroup_change_begin(tsk);
3128 
3129 	if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
3130 		sched_mm_cid_exit_signals(tsk);
3131 		tsk->flags |= PF_EXITING;
3132 		cgroup_threadgroup_change_end(tsk);
3133 		return;
3134 	}
3135 
3136 	spin_lock_irq(&tsk->sighand->siglock);
3137 	/*
3138 	 * From now this task is not visible for group-wide signals,
3139 	 * see wants_signal(), do_signal_stop().
3140 	 */
3141 	sched_mm_cid_exit_signals(tsk);
3142 	tsk->flags |= PF_EXITING;
3143 
3144 	cgroup_threadgroup_change_end(tsk);
3145 
3146 	if (!task_sigpending(tsk))
3147 		goto out;
3148 
3149 	unblocked = tsk->blocked;
3150 	signotset(&unblocked);
3151 	retarget_shared_pending(tsk, &unblocked);
3152 
3153 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3154 	    task_participate_group_stop(tsk))
3155 		group_stop = CLD_STOPPED;
3156 out:
3157 	spin_unlock_irq(&tsk->sighand->siglock);
3158 
3159 	/*
3160 	 * If group stop has completed, deliver the notification.  This
3161 	 * should always go to the real parent of the group leader.
3162 	 */
3163 	if (unlikely(group_stop)) {
3164 		read_lock(&tasklist_lock);
3165 		do_notify_parent_cldstop(tsk, false, group_stop);
3166 		read_unlock(&tasklist_lock);
3167 	}
3168 }
3169 
3170 /*
3171  * System call entry points.
3172  */
3173 
3174 /**
3175  *  sys_restart_syscall - restart a system call
3176  */
3177 SYSCALL_DEFINE0(restart_syscall)
3178 {
3179 	struct restart_block *restart = &current->restart_block;
3180 	return restart->fn(restart);
3181 }
3182 
3183 long do_no_restart_syscall(struct restart_block *param)
3184 {
3185 	return -EINTR;
3186 }
3187 
3188 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3189 {
3190 	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3191 		sigset_t newblocked;
3192 		/* A set of now blocked but previously unblocked signals. */
3193 		sigandnsets(&newblocked, newset, &current->blocked);
3194 		retarget_shared_pending(tsk, &newblocked);
3195 	}
3196 	tsk->blocked = *newset;
3197 	recalc_sigpending();
3198 }
3199 
3200 /**
3201  * set_current_blocked - change current->blocked mask
3202  * @newset: new mask
3203  *
3204  * It is wrong to change ->blocked directly, this helper should be used
3205  * to ensure the process can't miss a shared signal we are going to block.
3206  */
3207 void set_current_blocked(sigset_t *newset)
3208 {
3209 	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3210 	__set_current_blocked(newset);
3211 }
3212 
3213 void __set_current_blocked(const sigset_t *newset)
3214 {
3215 	struct task_struct *tsk = current;
3216 
3217 	/*
3218 	 * In case the signal mask hasn't changed, there is nothing we need
3219 	 * to do. The current->blocked shouldn't be modified by other task.
3220 	 */
3221 	if (sigequalsets(&tsk->blocked, newset))
3222 		return;
3223 
3224 	spin_lock_irq(&tsk->sighand->siglock);
3225 	__set_task_blocked(tsk, newset);
3226 	spin_unlock_irq(&tsk->sighand->siglock);
3227 }
3228 
3229 /*
3230  * This is also useful for kernel threads that want to temporarily
3231  * (or permanently) block certain signals.
3232  *
3233  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3234  * interface happily blocks "unblockable" signals like SIGKILL
3235  * and friends.
3236  */
3237 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3238 {
3239 	struct task_struct *tsk = current;
3240 	sigset_t newset;
3241 
3242 	/* Lockless, only current can change ->blocked, never from irq */
3243 	if (oldset)
3244 		*oldset = tsk->blocked;
3245 
3246 	switch (how) {
3247 	case SIG_BLOCK:
3248 		sigorsets(&newset, &tsk->blocked, set);
3249 		break;
3250 	case SIG_UNBLOCK:
3251 		sigandnsets(&newset, &tsk->blocked, set);
3252 		break;
3253 	case SIG_SETMASK:
3254 		newset = *set;
3255 		break;
3256 	default:
3257 		return -EINVAL;
3258 	}
3259 
3260 	__set_current_blocked(&newset);
3261 	return 0;
3262 }
3263 EXPORT_SYMBOL(sigprocmask);
3264 
3265 /*
3266  * The api helps set app-provided sigmasks.
3267  *
3268  * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3269  * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3270  *
3271  * Note that it does set_restore_sigmask() in advance, so it must be always
3272  * paired with restore_saved_sigmask_unless() before return from syscall.
3273  */
3274 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3275 {
3276 	sigset_t kmask;
3277 
3278 	if (!umask)
3279 		return 0;
3280 	if (sigsetsize != sizeof(sigset_t))
3281 		return -EINVAL;
3282 	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3283 		return -EFAULT;
3284 
3285 	set_restore_sigmask();
3286 	current->saved_sigmask = current->blocked;
3287 	set_current_blocked(&kmask);
3288 
3289 	return 0;
3290 }
3291 
3292 #ifdef CONFIG_COMPAT
3293 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3294 			    size_t sigsetsize)
3295 {
3296 	sigset_t kmask;
3297 
3298 	if (!umask)
3299 		return 0;
3300 	if (sigsetsize != sizeof(compat_sigset_t))
3301 		return -EINVAL;
3302 	if (get_compat_sigset(&kmask, umask))
3303 		return -EFAULT;
3304 
3305 	set_restore_sigmask();
3306 	current->saved_sigmask = current->blocked;
3307 	set_current_blocked(&kmask);
3308 
3309 	return 0;
3310 }
3311 #endif
3312 
3313 /**
3314  *  sys_rt_sigprocmask - change the list of currently blocked signals
3315  *  @how: whether to add, remove, or set signals
3316  *  @nset: stores pending signals
3317  *  @oset: previous value of signal mask if non-null
3318  *  @sigsetsize: size of sigset_t type
3319  */
3320 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3321 		sigset_t __user *, oset, size_t, sigsetsize)
3322 {
3323 	sigset_t old_set, new_set;
3324 	int error;
3325 
3326 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3327 	if (sigsetsize != sizeof(sigset_t))
3328 		return -EINVAL;
3329 
3330 	old_set = current->blocked;
3331 
3332 	if (nset) {
3333 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3334 			return -EFAULT;
3335 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3336 
3337 		error = sigprocmask(how, &new_set, NULL);
3338 		if (error)
3339 			return error;
3340 	}
3341 
3342 	if (oset) {
3343 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3344 			return -EFAULT;
3345 	}
3346 
3347 	return 0;
3348 }
3349 
3350 #ifdef CONFIG_COMPAT
3351 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3352 		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3353 {
3354 	sigset_t old_set = current->blocked;
3355 
3356 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3357 	if (sigsetsize != sizeof(sigset_t))
3358 		return -EINVAL;
3359 
3360 	if (nset) {
3361 		sigset_t new_set;
3362 		int error;
3363 		if (get_compat_sigset(&new_set, nset))
3364 			return -EFAULT;
3365 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3366 
3367 		error = sigprocmask(how, &new_set, NULL);
3368 		if (error)
3369 			return error;
3370 	}
3371 	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3372 }
3373 #endif
3374 
3375 static void do_sigpending(sigset_t *set)
3376 {
3377 	spin_lock_irq(&current->sighand->siglock);
3378 	sigorsets(set, &current->pending.signal,
3379 		  &current->signal->shared_pending.signal);
3380 	spin_unlock_irq(&current->sighand->siglock);
3381 
3382 	/* Outside the lock because only this thread touches it.  */
3383 	sigandsets(set, &current->blocked, set);
3384 }
3385 
3386 /**
3387  *  sys_rt_sigpending - examine a pending signal that has been raised
3388  *			while blocked
3389  *  @uset: stores pending signals
3390  *  @sigsetsize: size of sigset_t type or larger
3391  */
3392 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3393 {
3394 	sigset_t set;
3395 
3396 	if (sigsetsize > sizeof(*uset))
3397 		return -EINVAL;
3398 
3399 	do_sigpending(&set);
3400 
3401 	if (copy_to_user(uset, &set, sigsetsize))
3402 		return -EFAULT;
3403 
3404 	return 0;
3405 }
3406 
3407 #ifdef CONFIG_COMPAT
3408 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3409 		compat_size_t, sigsetsize)
3410 {
3411 	sigset_t set;
3412 
3413 	if (sigsetsize > sizeof(*uset))
3414 		return -EINVAL;
3415 
3416 	do_sigpending(&set);
3417 
3418 	return put_compat_sigset(uset, &set, sigsetsize);
3419 }
3420 #endif
3421 
3422 static const struct {
3423 	unsigned char limit, layout;
3424 } sig_sicodes[] = {
3425 	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3426 	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3427 	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3428 	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3429 	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3430 #if defined(SIGEMT)
3431 	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3432 #endif
3433 	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3434 	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3435 	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3436 };
3437 
3438 static bool known_siginfo_layout(unsigned sig, int si_code)
3439 {
3440 	if (si_code == SI_KERNEL)
3441 		return true;
3442 	else if ((si_code > SI_USER)) {
3443 		if (sig_specific_sicodes(sig)) {
3444 			if (si_code <= sig_sicodes[sig].limit)
3445 				return true;
3446 		}
3447 		else if (si_code <= NSIGPOLL)
3448 			return true;
3449 	}
3450 	else if (si_code >= SI_DETHREAD)
3451 		return true;
3452 	else if (si_code == SI_ASYNCNL)
3453 		return true;
3454 	return false;
3455 }
3456 
3457 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3458 {
3459 	enum siginfo_layout layout = SIL_KILL;
3460 	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3461 		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3462 		    (si_code <= sig_sicodes[sig].limit)) {
3463 			layout = sig_sicodes[sig].layout;
3464 			/* Handle the exceptions */
3465 			if ((sig == SIGBUS) &&
3466 			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3467 				layout = SIL_FAULT_MCEERR;
3468 			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3469 				layout = SIL_FAULT_BNDERR;
3470 #ifdef SEGV_PKUERR
3471 			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3472 				layout = SIL_FAULT_PKUERR;
3473 #endif
3474 			else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3475 				layout = SIL_FAULT_PERF_EVENT;
3476 			else if (IS_ENABLED(CONFIG_SPARC) &&
3477 				 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3478 				layout = SIL_FAULT_TRAPNO;
3479 			else if (IS_ENABLED(CONFIG_ALPHA) &&
3480 				 ((sig == SIGFPE) ||
3481 				  ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3482 				layout = SIL_FAULT_TRAPNO;
3483 		}
3484 		else if (si_code <= NSIGPOLL)
3485 			layout = SIL_POLL;
3486 	} else {
3487 		if (si_code == SI_TIMER)
3488 			layout = SIL_TIMER;
3489 		else if (si_code == SI_SIGIO)
3490 			layout = SIL_POLL;
3491 		else if (si_code < 0)
3492 			layout = SIL_RT;
3493 	}
3494 	return layout;
3495 }
3496 
3497 static inline char __user *si_expansion(const siginfo_t __user *info)
3498 {
3499 	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3500 }
3501 
3502 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3503 {
3504 	char __user *expansion = si_expansion(to);
3505 	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3506 		return -EFAULT;
3507 	if (clear_user(expansion, SI_EXPANSION_SIZE))
3508 		return -EFAULT;
3509 	return 0;
3510 }
3511 
3512 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3513 				       const siginfo_t __user *from)
3514 {
3515 	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3516 		char __user *expansion = si_expansion(from);
3517 		char buf[SI_EXPANSION_SIZE];
3518 		int i;
3519 		/*
3520 		 * An unknown si_code might need more than
3521 		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3522 		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3523 		 * will return this data to userspace exactly.
3524 		 */
3525 		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3526 			return -EFAULT;
3527 		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3528 			if (buf[i] != 0)
3529 				return -E2BIG;
3530 		}
3531 	}
3532 	return 0;
3533 }
3534 
3535 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3536 				    const siginfo_t __user *from)
3537 {
3538 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3539 		return -EFAULT;
3540 	to->si_signo = signo;
3541 	return post_copy_siginfo_from_user(to, from);
3542 }
3543 
3544 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3545 {
3546 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3547 		return -EFAULT;
3548 	return post_copy_siginfo_from_user(to, from);
3549 }
3550 
3551 #ifdef CONFIG_COMPAT
3552 /**
3553  * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3554  * @to: compat siginfo destination
3555  * @from: kernel siginfo source
3556  *
3557  * Note: This function does not work properly for the SIGCHLD on x32, but
3558  * fortunately it doesn't have to.  The only valid callers for this function are
3559  * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3560  * The latter does not care because SIGCHLD will never cause a coredump.
3561  */
3562 void copy_siginfo_to_external32(struct compat_siginfo *to,
3563 		const struct kernel_siginfo *from)
3564 {
3565 	memset(to, 0, sizeof(*to));
3566 
3567 	to->si_signo = from->si_signo;
3568 	to->si_errno = from->si_errno;
3569 	to->si_code  = from->si_code;
3570 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3571 	case SIL_KILL:
3572 		to->si_pid = from->si_pid;
3573 		to->si_uid = from->si_uid;
3574 		break;
3575 	case SIL_TIMER:
3576 		to->si_tid     = from->si_tid;
3577 		to->si_overrun = from->si_overrun;
3578 		to->si_int     = from->si_int;
3579 		break;
3580 	case SIL_POLL:
3581 		to->si_band = from->si_band;
3582 		to->si_fd   = from->si_fd;
3583 		break;
3584 	case SIL_FAULT:
3585 		to->si_addr = ptr_to_compat(from->si_addr);
3586 		break;
3587 	case SIL_FAULT_TRAPNO:
3588 		to->si_addr = ptr_to_compat(from->si_addr);
3589 		to->si_trapno = from->si_trapno;
3590 		break;
3591 	case SIL_FAULT_MCEERR:
3592 		to->si_addr = ptr_to_compat(from->si_addr);
3593 		to->si_addr_lsb = from->si_addr_lsb;
3594 		break;
3595 	case SIL_FAULT_BNDERR:
3596 		to->si_addr = ptr_to_compat(from->si_addr);
3597 		to->si_lower = ptr_to_compat(from->si_lower);
3598 		to->si_upper = ptr_to_compat(from->si_upper);
3599 		break;
3600 	case SIL_FAULT_PKUERR:
3601 		to->si_addr = ptr_to_compat(from->si_addr);
3602 		to->si_pkey = from->si_pkey;
3603 		break;
3604 	case SIL_FAULT_PERF_EVENT:
3605 		to->si_addr = ptr_to_compat(from->si_addr);
3606 		to->si_perf_data = from->si_perf_data;
3607 		to->si_perf_type = from->si_perf_type;
3608 		to->si_perf_flags = from->si_perf_flags;
3609 		break;
3610 	case SIL_CHLD:
3611 		to->si_pid = from->si_pid;
3612 		to->si_uid = from->si_uid;
3613 		to->si_status = from->si_status;
3614 		to->si_utime = from->si_utime;
3615 		to->si_stime = from->si_stime;
3616 		break;
3617 	case SIL_RT:
3618 		to->si_pid = from->si_pid;
3619 		to->si_uid = from->si_uid;
3620 		to->si_int = from->si_int;
3621 		break;
3622 	case SIL_SYS:
3623 		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3624 		to->si_syscall   = from->si_syscall;
3625 		to->si_arch      = from->si_arch;
3626 		break;
3627 	}
3628 }
3629 
3630 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3631 			   const struct kernel_siginfo *from)
3632 {
3633 	struct compat_siginfo new;
3634 
3635 	copy_siginfo_to_external32(&new, from);
3636 	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3637 		return -EFAULT;
3638 	return 0;
3639 }
3640 
3641 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3642 					 const struct compat_siginfo *from)
3643 {
3644 	clear_siginfo(to);
3645 	to->si_signo = from->si_signo;
3646 	to->si_errno = from->si_errno;
3647 	to->si_code  = from->si_code;
3648 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3649 	case SIL_KILL:
3650 		to->si_pid = from->si_pid;
3651 		to->si_uid = from->si_uid;
3652 		break;
3653 	case SIL_TIMER:
3654 		to->si_tid     = from->si_tid;
3655 		to->si_overrun = from->si_overrun;
3656 		to->si_int     = from->si_int;
3657 		break;
3658 	case SIL_POLL:
3659 		to->si_band = from->si_band;
3660 		to->si_fd   = from->si_fd;
3661 		break;
3662 	case SIL_FAULT:
3663 		to->si_addr = compat_ptr(from->si_addr);
3664 		break;
3665 	case SIL_FAULT_TRAPNO:
3666 		to->si_addr = compat_ptr(from->si_addr);
3667 		to->si_trapno = from->si_trapno;
3668 		break;
3669 	case SIL_FAULT_MCEERR:
3670 		to->si_addr = compat_ptr(from->si_addr);
3671 		to->si_addr_lsb = from->si_addr_lsb;
3672 		break;
3673 	case SIL_FAULT_BNDERR:
3674 		to->si_addr = compat_ptr(from->si_addr);
3675 		to->si_lower = compat_ptr(from->si_lower);
3676 		to->si_upper = compat_ptr(from->si_upper);
3677 		break;
3678 	case SIL_FAULT_PKUERR:
3679 		to->si_addr = compat_ptr(from->si_addr);
3680 		to->si_pkey = from->si_pkey;
3681 		break;
3682 	case SIL_FAULT_PERF_EVENT:
3683 		to->si_addr = compat_ptr(from->si_addr);
3684 		to->si_perf_data = from->si_perf_data;
3685 		to->si_perf_type = from->si_perf_type;
3686 		to->si_perf_flags = from->si_perf_flags;
3687 		break;
3688 	case SIL_CHLD:
3689 		to->si_pid    = from->si_pid;
3690 		to->si_uid    = from->si_uid;
3691 		to->si_status = from->si_status;
3692 #ifdef CONFIG_X86_X32_ABI
3693 		if (in_x32_syscall()) {
3694 			to->si_utime = from->_sifields._sigchld_x32._utime;
3695 			to->si_stime = from->_sifields._sigchld_x32._stime;
3696 		} else
3697 #endif
3698 		{
3699 			to->si_utime = from->si_utime;
3700 			to->si_stime = from->si_stime;
3701 		}
3702 		break;
3703 	case SIL_RT:
3704 		to->si_pid = from->si_pid;
3705 		to->si_uid = from->si_uid;
3706 		to->si_int = from->si_int;
3707 		break;
3708 	case SIL_SYS:
3709 		to->si_call_addr = compat_ptr(from->si_call_addr);
3710 		to->si_syscall   = from->si_syscall;
3711 		to->si_arch      = from->si_arch;
3712 		break;
3713 	}
3714 	return 0;
3715 }
3716 
3717 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3718 				      const struct compat_siginfo __user *ufrom)
3719 {
3720 	struct compat_siginfo from;
3721 
3722 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3723 		return -EFAULT;
3724 
3725 	from.si_signo = signo;
3726 	return post_copy_siginfo_from_user32(to, &from);
3727 }
3728 
3729 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3730 			     const struct compat_siginfo __user *ufrom)
3731 {
3732 	struct compat_siginfo from;
3733 
3734 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3735 		return -EFAULT;
3736 
3737 	return post_copy_siginfo_from_user32(to, &from);
3738 }
3739 #endif /* CONFIG_COMPAT */
3740 
3741 /**
3742  *  do_sigtimedwait - wait for queued signals specified in @which
3743  *  @which: queued signals to wait for
3744  *  @info: if non-null, the signal's siginfo is returned here
3745  *  @ts: upper bound on process time suspension
3746  */
3747 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3748 		    const struct timespec64 *ts)
3749 {
3750 	ktime_t *to = NULL, timeout = KTIME_MAX;
3751 	struct task_struct *tsk = current;
3752 	sigset_t mask = *which;
3753 	enum pid_type type;
3754 	int sig, ret = 0;
3755 
3756 	if (ts) {
3757 		if (!timespec64_valid(ts))
3758 			return -EINVAL;
3759 		timeout = timespec64_to_ktime(*ts);
3760 		to = &timeout;
3761 	}
3762 
3763 	/*
3764 	 * Invert the set of allowed signals to get those we want to block.
3765 	 */
3766 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3767 	signotset(&mask);
3768 
3769 	spin_lock_irq(&tsk->sighand->siglock);
3770 	sig = dequeue_signal(&mask, info, &type);
3771 	if (!sig && timeout) {
3772 		/*
3773 		 * None ready, temporarily unblock those we're interested
3774 		 * while we are sleeping in so that we'll be awakened when
3775 		 * they arrive. Unblocking is always fine, we can avoid
3776 		 * set_current_blocked().
3777 		 */
3778 		tsk->real_blocked = tsk->blocked;
3779 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3780 		recalc_sigpending();
3781 		spin_unlock_irq(&tsk->sighand->siglock);
3782 
3783 		__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3784 		ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3785 					       HRTIMER_MODE_REL);
3786 		spin_lock_irq(&tsk->sighand->siglock);
3787 		__set_task_blocked(tsk, &tsk->real_blocked);
3788 		sigemptyset(&tsk->real_blocked);
3789 		sig = dequeue_signal(&mask, info, &type);
3790 	}
3791 	spin_unlock_irq(&tsk->sighand->siglock);
3792 
3793 	if (sig)
3794 		return sig;
3795 	return ret ? -EINTR : -EAGAIN;
3796 }
3797 
3798 /**
3799  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3800  *			in @uthese
3801  *  @uthese: queued signals to wait for
3802  *  @uinfo: if non-null, the signal's siginfo is returned here
3803  *  @uts: upper bound on process time suspension
3804  *  @sigsetsize: size of sigset_t type
3805  */
3806 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3807 		siginfo_t __user *, uinfo,
3808 		const struct __kernel_timespec __user *, uts,
3809 		size_t, sigsetsize)
3810 {
3811 	sigset_t these;
3812 	struct timespec64 ts;
3813 	kernel_siginfo_t info;
3814 	int ret;
3815 
3816 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3817 	if (sigsetsize != sizeof(sigset_t))
3818 		return -EINVAL;
3819 
3820 	if (copy_from_user(&these, uthese, sizeof(these)))
3821 		return -EFAULT;
3822 
3823 	if (uts) {
3824 		if (get_timespec64(&ts, uts))
3825 			return -EFAULT;
3826 	}
3827 
3828 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3829 
3830 	if (ret > 0 && uinfo) {
3831 		if (copy_siginfo_to_user(uinfo, &info))
3832 			ret = -EFAULT;
3833 	}
3834 
3835 	return ret;
3836 }
3837 
3838 #ifdef CONFIG_COMPAT_32BIT_TIME
3839 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3840 		siginfo_t __user *, uinfo,
3841 		const struct old_timespec32 __user *, uts,
3842 		size_t, sigsetsize)
3843 {
3844 	sigset_t these;
3845 	struct timespec64 ts;
3846 	kernel_siginfo_t info;
3847 	int ret;
3848 
3849 	if (sigsetsize != sizeof(sigset_t))
3850 		return -EINVAL;
3851 
3852 	if (copy_from_user(&these, uthese, sizeof(these)))
3853 		return -EFAULT;
3854 
3855 	if (uts) {
3856 		if (get_old_timespec32(&ts, uts))
3857 			return -EFAULT;
3858 	}
3859 
3860 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3861 
3862 	if (ret > 0 && uinfo) {
3863 		if (copy_siginfo_to_user(uinfo, &info))
3864 			ret = -EFAULT;
3865 	}
3866 
3867 	return ret;
3868 }
3869 #endif
3870 
3871 #ifdef CONFIG_COMPAT
3872 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3873 		struct compat_siginfo __user *, uinfo,
3874 		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3875 {
3876 	sigset_t s;
3877 	struct timespec64 t;
3878 	kernel_siginfo_t info;
3879 	long ret;
3880 
3881 	if (sigsetsize != sizeof(sigset_t))
3882 		return -EINVAL;
3883 
3884 	if (get_compat_sigset(&s, uthese))
3885 		return -EFAULT;
3886 
3887 	if (uts) {
3888 		if (get_timespec64(&t, uts))
3889 			return -EFAULT;
3890 	}
3891 
3892 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3893 
3894 	if (ret > 0 && uinfo) {
3895 		if (copy_siginfo_to_user32(uinfo, &info))
3896 			ret = -EFAULT;
3897 	}
3898 
3899 	return ret;
3900 }
3901 
3902 #ifdef CONFIG_COMPAT_32BIT_TIME
3903 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3904 		struct compat_siginfo __user *, uinfo,
3905 		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3906 {
3907 	sigset_t s;
3908 	struct timespec64 t;
3909 	kernel_siginfo_t info;
3910 	long ret;
3911 
3912 	if (sigsetsize != sizeof(sigset_t))
3913 		return -EINVAL;
3914 
3915 	if (get_compat_sigset(&s, uthese))
3916 		return -EFAULT;
3917 
3918 	if (uts) {
3919 		if (get_old_timespec32(&t, uts))
3920 			return -EFAULT;
3921 	}
3922 
3923 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3924 
3925 	if (ret > 0 && uinfo) {
3926 		if (copy_siginfo_to_user32(uinfo, &info))
3927 			ret = -EFAULT;
3928 	}
3929 
3930 	return ret;
3931 }
3932 #endif
3933 #endif
3934 
3935 static void prepare_kill_siginfo(int sig, struct kernel_siginfo *info,
3936 				 enum pid_type type)
3937 {
3938 	clear_siginfo(info);
3939 	info->si_signo = sig;
3940 	info->si_errno = 0;
3941 	info->si_code = (type == PIDTYPE_PID) ? SI_TKILL : SI_USER;
3942 	info->si_pid = task_tgid_vnr(current);
3943 	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3944 }
3945 
3946 /**
3947  *  sys_kill - send a signal to a process
3948  *  @pid: the PID of the process
3949  *  @sig: signal to be sent
3950  */
3951 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3952 {
3953 	struct kernel_siginfo info;
3954 
3955 	prepare_kill_siginfo(sig, &info, PIDTYPE_TGID);
3956 
3957 	return kill_something_info(sig, &info, pid);
3958 }
3959 
3960 /*
3961  * Verify that the signaler and signalee either are in the same pid namespace
3962  * or that the signaler's pid namespace is an ancestor of the signalee's pid
3963  * namespace.
3964  */
3965 static bool access_pidfd_pidns(struct pid *pid)
3966 {
3967 	struct pid_namespace *active = task_active_pid_ns(current);
3968 	struct pid_namespace *p = ns_of_pid(pid);
3969 
3970 	for (;;) {
3971 		if (!p)
3972 			return false;
3973 		if (p == active)
3974 			break;
3975 		p = p->parent;
3976 	}
3977 
3978 	return true;
3979 }
3980 
3981 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3982 		siginfo_t __user *info)
3983 {
3984 #ifdef CONFIG_COMPAT
3985 	/*
3986 	 * Avoid hooking up compat syscalls and instead handle necessary
3987 	 * conversions here. Note, this is a stop-gap measure and should not be
3988 	 * considered a generic solution.
3989 	 */
3990 	if (in_compat_syscall())
3991 		return copy_siginfo_from_user32(
3992 			kinfo, (struct compat_siginfo __user *)info);
3993 #endif
3994 	return copy_siginfo_from_user(kinfo, info);
3995 }
3996 
3997 static struct pid *pidfd_to_pid(const struct file *file)
3998 {
3999 	struct pid *pid;
4000 
4001 	pid = pidfd_pid(file);
4002 	if (!IS_ERR(pid))
4003 		return pid;
4004 
4005 	return tgid_pidfd_to_pid(file);
4006 }
4007 
4008 #define PIDFD_SEND_SIGNAL_FLAGS                            \
4009 	(PIDFD_SIGNAL_THREAD | PIDFD_SIGNAL_THREAD_GROUP | \
4010 	 PIDFD_SIGNAL_PROCESS_GROUP)
4011 
4012 static int do_pidfd_send_signal(struct pid *pid, int sig, enum pid_type type,
4013 				siginfo_t __user *info, unsigned int flags)
4014 {
4015 	kernel_siginfo_t kinfo;
4016 
4017 	switch (flags) {
4018 	case PIDFD_SIGNAL_THREAD:
4019 		type = PIDTYPE_PID;
4020 		break;
4021 	case PIDFD_SIGNAL_THREAD_GROUP:
4022 		type = PIDTYPE_TGID;
4023 		break;
4024 	case PIDFD_SIGNAL_PROCESS_GROUP:
4025 		type = PIDTYPE_PGID;
4026 		break;
4027 	}
4028 
4029 	if (info) {
4030 		int ret;
4031 
4032 		ret = copy_siginfo_from_user_any(&kinfo, info);
4033 		if (unlikely(ret))
4034 			return ret;
4035 
4036 		if (unlikely(sig != kinfo.si_signo))
4037 			return -EINVAL;
4038 
4039 		/* Only allow sending arbitrary signals to yourself. */
4040 		if ((task_pid(current) != pid || type > PIDTYPE_TGID) &&
4041 		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
4042 			return -EPERM;
4043 	} else {
4044 		prepare_kill_siginfo(sig, &kinfo, type);
4045 	}
4046 
4047 	if (type == PIDTYPE_PGID)
4048 		return kill_pgrp_info(sig, &kinfo, pid);
4049 
4050 	return kill_pid_info_type(sig, &kinfo, pid, type);
4051 }
4052 
4053 /**
4054  * sys_pidfd_send_signal - Signal a process through a pidfd
4055  * @pidfd:  file descriptor of the process
4056  * @sig:    signal to send
4057  * @info:   signal info
4058  * @flags:  future flags
4059  *
4060  * Send the signal to the thread group or to the individual thread depending
4061  * on PIDFD_THREAD.
4062  * In the future extension to @flags may be used to override the default scope
4063  * of @pidfd.
4064  *
4065  * Return: 0 on success, negative errno on failure
4066  */
4067 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
4068 		siginfo_t __user *, info, unsigned int, flags)
4069 {
4070 	struct pid *pid;
4071 	enum pid_type type;
4072 
4073 	/* Enforce flags be set to 0 until we add an extension. */
4074 	if (flags & ~PIDFD_SEND_SIGNAL_FLAGS)
4075 		return -EINVAL;
4076 
4077 	/* Ensure that only a single signal scope determining flag is set. */
4078 	if (hweight32(flags & PIDFD_SEND_SIGNAL_FLAGS) > 1)
4079 		return -EINVAL;
4080 
4081 	switch (pidfd) {
4082 	case PIDFD_SELF_THREAD:
4083 		pid = get_task_pid(current, PIDTYPE_PID);
4084 		type = PIDTYPE_PID;
4085 		break;
4086 	case PIDFD_SELF_THREAD_GROUP:
4087 		pid = get_task_pid(current, PIDTYPE_TGID);
4088 		type = PIDTYPE_TGID;
4089 		break;
4090 	default: {
4091 		CLASS(fd, f)(pidfd);
4092 		if (fd_empty(f))
4093 			return -EBADF;
4094 
4095 		/* Is this a pidfd? */
4096 		pid = pidfd_to_pid(fd_file(f));
4097 		if (IS_ERR(pid))
4098 			return PTR_ERR(pid);
4099 
4100 		if (!access_pidfd_pidns(pid))
4101 			return -EINVAL;
4102 
4103 		/* Infer scope from the type of pidfd. */
4104 		if (fd_file(f)->f_flags & PIDFD_THREAD)
4105 			type = PIDTYPE_PID;
4106 		else
4107 			type = PIDTYPE_TGID;
4108 
4109 		return do_pidfd_send_signal(pid, sig, type, info, flags);
4110 	}
4111 	}
4112 
4113 	return do_pidfd_send_signal(pid, sig, type, info, flags);
4114 }
4115 
4116 static int
4117 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
4118 {
4119 	struct task_struct *p;
4120 	int error = -ESRCH;
4121 
4122 	rcu_read_lock();
4123 	p = find_task_by_vpid(pid);
4124 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
4125 		error = check_kill_permission(sig, info, p);
4126 		/*
4127 		 * The null signal is a permissions and process existence
4128 		 * probe.  No signal is actually delivered.
4129 		 */
4130 		if (!error && sig) {
4131 			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
4132 			/*
4133 			 * If lock_task_sighand() failed we pretend the task
4134 			 * dies after receiving the signal. The window is tiny,
4135 			 * and the signal is private anyway.
4136 			 */
4137 			if (unlikely(error == -ESRCH))
4138 				error = 0;
4139 		}
4140 	}
4141 	rcu_read_unlock();
4142 
4143 	return error;
4144 }
4145 
4146 static int do_tkill(pid_t tgid, pid_t pid, int sig)
4147 {
4148 	struct kernel_siginfo info;
4149 
4150 	prepare_kill_siginfo(sig, &info, PIDTYPE_PID);
4151 
4152 	return do_send_specific(tgid, pid, sig, &info);
4153 }
4154 
4155 /**
4156  *  sys_tgkill - send signal to one specific thread
4157  *  @tgid: the thread group ID of the thread
4158  *  @pid: the PID of the thread
4159  *  @sig: signal to be sent
4160  *
4161  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
4162  *  exists but it's not belonging to the target process anymore. This
4163  *  method solves the problem of threads exiting and PIDs getting reused.
4164  */
4165 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
4166 {
4167 	/* This is only valid for single tasks */
4168 	if (pid <= 0 || tgid <= 0)
4169 		return -EINVAL;
4170 
4171 	return do_tkill(tgid, pid, sig);
4172 }
4173 
4174 /**
4175  *  sys_tkill - send signal to one specific task
4176  *  @pid: the PID of the task
4177  *  @sig: signal to be sent
4178  *
4179  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
4180  */
4181 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4182 {
4183 	/* This is only valid for single tasks */
4184 	if (pid <= 0)
4185 		return -EINVAL;
4186 
4187 	return do_tkill(0, pid, sig);
4188 }
4189 
4190 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4191 {
4192 	/* Not even root can pretend to send signals from the kernel.
4193 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4194 	 */
4195 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4196 	    (task_pid_vnr(current) != pid))
4197 		return -EPERM;
4198 
4199 	/* POSIX.1b doesn't mention process groups.  */
4200 	return kill_proc_info(sig, info, pid);
4201 }
4202 
4203 /**
4204  *  sys_rt_sigqueueinfo - send signal information to a signal
4205  *  @pid: the PID of the thread
4206  *  @sig: signal to be sent
4207  *  @uinfo: signal info to be sent
4208  */
4209 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4210 		siginfo_t __user *, uinfo)
4211 {
4212 	kernel_siginfo_t info;
4213 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4214 	if (unlikely(ret))
4215 		return ret;
4216 	return do_rt_sigqueueinfo(pid, sig, &info);
4217 }
4218 
4219 #ifdef CONFIG_COMPAT
4220 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4221 			compat_pid_t, pid,
4222 			int, sig,
4223 			struct compat_siginfo __user *, uinfo)
4224 {
4225 	kernel_siginfo_t info;
4226 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4227 	if (unlikely(ret))
4228 		return ret;
4229 	return do_rt_sigqueueinfo(pid, sig, &info);
4230 }
4231 #endif
4232 
4233 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4234 {
4235 	/* This is only valid for single tasks */
4236 	if (pid <= 0 || tgid <= 0)
4237 		return -EINVAL;
4238 
4239 	/* Not even root can pretend to send signals from the kernel.
4240 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4241 	 */
4242 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4243 	    (task_pid_vnr(current) != pid))
4244 		return -EPERM;
4245 
4246 	return do_send_specific(tgid, pid, sig, info);
4247 }
4248 
4249 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4250 		siginfo_t __user *, uinfo)
4251 {
4252 	kernel_siginfo_t info;
4253 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4254 	if (unlikely(ret))
4255 		return ret;
4256 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4257 }
4258 
4259 #ifdef CONFIG_COMPAT
4260 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4261 			compat_pid_t, tgid,
4262 			compat_pid_t, pid,
4263 			int, sig,
4264 			struct compat_siginfo __user *, uinfo)
4265 {
4266 	kernel_siginfo_t info;
4267 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4268 	if (unlikely(ret))
4269 		return ret;
4270 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4271 }
4272 #endif
4273 
4274 /*
4275  * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4276  */
4277 void kernel_sigaction(int sig, __sighandler_t action)
4278 {
4279 	spin_lock_irq(&current->sighand->siglock);
4280 	current->sighand->action[sig - 1].sa.sa_handler = action;
4281 	if (action == SIG_IGN) {
4282 		sigset_t mask;
4283 
4284 		sigemptyset(&mask);
4285 		sigaddset(&mask, sig);
4286 
4287 		flush_sigqueue_mask(current, &mask, &current->signal->shared_pending);
4288 		flush_sigqueue_mask(current, &mask, &current->pending);
4289 		recalc_sigpending();
4290 	}
4291 	spin_unlock_irq(&current->sighand->siglock);
4292 }
4293 EXPORT_SYMBOL(kernel_sigaction);
4294 
4295 void __weak sigaction_compat_abi(struct k_sigaction *act,
4296 		struct k_sigaction *oact)
4297 {
4298 }
4299 
4300 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4301 {
4302 	struct task_struct *p = current, *t;
4303 	struct k_sigaction *k;
4304 	sigset_t mask;
4305 
4306 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4307 		return -EINVAL;
4308 
4309 	k = &p->sighand->action[sig-1];
4310 
4311 	spin_lock_irq(&p->sighand->siglock);
4312 	if (k->sa.sa_flags & SA_IMMUTABLE) {
4313 		spin_unlock_irq(&p->sighand->siglock);
4314 		return -EINVAL;
4315 	}
4316 	if (oact)
4317 		*oact = *k;
4318 
4319 	/*
4320 	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4321 	 * e.g. by having an architecture use the bit in their uapi.
4322 	 */
4323 	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4324 
4325 	/*
4326 	 * Clear unknown flag bits in order to allow userspace to detect missing
4327 	 * support for flag bits and to allow the kernel to use non-uapi bits
4328 	 * internally.
4329 	 */
4330 	if (act)
4331 		act->sa.sa_flags &= UAPI_SA_FLAGS;
4332 	if (oact)
4333 		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4334 
4335 	sigaction_compat_abi(act, oact);
4336 
4337 	if (act) {
4338 		bool was_ignored = k->sa.sa_handler == SIG_IGN;
4339 
4340 		sigdelsetmask(&act->sa.sa_mask,
4341 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4342 		*k = *act;
4343 		/*
4344 		 * POSIX 3.3.1.3:
4345 		 *  "Setting a signal action to SIG_IGN for a signal that is
4346 		 *   pending shall cause the pending signal to be discarded,
4347 		 *   whether or not it is blocked."
4348 		 *
4349 		 *  "Setting a signal action to SIG_DFL for a signal that is
4350 		 *   pending and whose default action is to ignore the signal
4351 		 *   (for example, SIGCHLD), shall cause the pending signal to
4352 		 *   be discarded, whether or not it is blocked"
4353 		 */
4354 		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4355 			sigemptyset(&mask);
4356 			sigaddset(&mask, sig);
4357 			flush_sigqueue_mask(p, &mask, &p->signal->shared_pending);
4358 			for_each_thread(p, t)
4359 				flush_sigqueue_mask(p, &mask, &t->pending);
4360 		} else if (was_ignored) {
4361 			posixtimer_sig_unignore(p, sig);
4362 		}
4363 	}
4364 
4365 	spin_unlock_irq(&p->sighand->siglock);
4366 	return 0;
4367 }
4368 
4369 #ifdef CONFIG_DYNAMIC_SIGFRAME
4370 static inline void sigaltstack_lock(void)
4371 	__acquires(&current->sighand->siglock)
4372 {
4373 	spin_lock_irq(&current->sighand->siglock);
4374 }
4375 
4376 static inline void sigaltstack_unlock(void)
4377 	__releases(&current->sighand->siglock)
4378 {
4379 	spin_unlock_irq(&current->sighand->siglock);
4380 }
4381 #else
4382 static inline void sigaltstack_lock(void) { }
4383 static inline void sigaltstack_unlock(void) { }
4384 #endif
4385 
4386 static int
4387 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4388 		size_t min_ss_size)
4389 {
4390 	struct task_struct *t = current;
4391 	int ret = 0;
4392 
4393 	if (oss) {
4394 		memset(oss, 0, sizeof(stack_t));
4395 		oss->ss_sp = (void __user *) t->sas_ss_sp;
4396 		oss->ss_size = t->sas_ss_size;
4397 		oss->ss_flags = sas_ss_flags(sp) |
4398 			(current->sas_ss_flags & SS_FLAG_BITS);
4399 	}
4400 
4401 	if (ss) {
4402 		void __user *ss_sp = ss->ss_sp;
4403 		size_t ss_size = ss->ss_size;
4404 		unsigned ss_flags = ss->ss_flags;
4405 		int ss_mode;
4406 
4407 		if (unlikely(on_sig_stack(sp)))
4408 			return -EPERM;
4409 
4410 		ss_mode = ss_flags & ~SS_FLAG_BITS;
4411 		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4412 				ss_mode != 0))
4413 			return -EINVAL;
4414 
4415 		/*
4416 		 * Return before taking any locks if no actual
4417 		 * sigaltstack changes were requested.
4418 		 */
4419 		if (t->sas_ss_sp == (unsigned long)ss_sp &&
4420 		    t->sas_ss_size == ss_size &&
4421 		    t->sas_ss_flags == ss_flags)
4422 			return 0;
4423 
4424 		sigaltstack_lock();
4425 		if (ss_mode == SS_DISABLE) {
4426 			ss_size = 0;
4427 			ss_sp = NULL;
4428 		} else {
4429 			if (unlikely(ss_size < min_ss_size))
4430 				ret = -ENOMEM;
4431 			if (!sigaltstack_size_valid(ss_size))
4432 				ret = -ENOMEM;
4433 		}
4434 		if (!ret) {
4435 			t->sas_ss_sp = (unsigned long) ss_sp;
4436 			t->sas_ss_size = ss_size;
4437 			t->sas_ss_flags = ss_flags;
4438 		}
4439 		sigaltstack_unlock();
4440 	}
4441 	return ret;
4442 }
4443 
4444 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4445 {
4446 	stack_t new, old;
4447 	int err;
4448 	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4449 		return -EFAULT;
4450 	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4451 			      current_user_stack_pointer(),
4452 			      MINSIGSTKSZ);
4453 	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4454 		err = -EFAULT;
4455 	return err;
4456 }
4457 
4458 int restore_altstack(const stack_t __user *uss)
4459 {
4460 	stack_t new;
4461 	if (copy_from_user(&new, uss, sizeof(stack_t)))
4462 		return -EFAULT;
4463 	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4464 			     MINSIGSTKSZ);
4465 	/* squash all but EFAULT for now */
4466 	return 0;
4467 }
4468 
4469 int __save_altstack(stack_t __user *uss, unsigned long sp)
4470 {
4471 	struct task_struct *t = current;
4472 	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4473 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4474 		__put_user(t->sas_ss_size, &uss->ss_size);
4475 	return err;
4476 }
4477 
4478 #ifdef CONFIG_COMPAT
4479 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4480 				 compat_stack_t __user *uoss_ptr)
4481 {
4482 	stack_t uss, uoss;
4483 	int ret;
4484 
4485 	if (uss_ptr) {
4486 		compat_stack_t uss32;
4487 		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4488 			return -EFAULT;
4489 		uss.ss_sp = compat_ptr(uss32.ss_sp);
4490 		uss.ss_flags = uss32.ss_flags;
4491 		uss.ss_size = uss32.ss_size;
4492 	}
4493 	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4494 			     compat_user_stack_pointer(),
4495 			     COMPAT_MINSIGSTKSZ);
4496 	if (ret >= 0 && uoss_ptr)  {
4497 		compat_stack_t old;
4498 		memset(&old, 0, sizeof(old));
4499 		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4500 		old.ss_flags = uoss.ss_flags;
4501 		old.ss_size = uoss.ss_size;
4502 		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4503 			ret = -EFAULT;
4504 	}
4505 	return ret;
4506 }
4507 
4508 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4509 			const compat_stack_t __user *, uss_ptr,
4510 			compat_stack_t __user *, uoss_ptr)
4511 {
4512 	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4513 }
4514 
4515 int compat_restore_altstack(const compat_stack_t __user *uss)
4516 {
4517 	int err = do_compat_sigaltstack(uss, NULL);
4518 	/* squash all but -EFAULT for now */
4519 	return err == -EFAULT ? err : 0;
4520 }
4521 
4522 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4523 {
4524 	int err;
4525 	struct task_struct *t = current;
4526 	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4527 			 &uss->ss_sp) |
4528 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4529 		__put_user(t->sas_ss_size, &uss->ss_size);
4530 	return err;
4531 }
4532 #endif
4533 
4534 #ifdef __ARCH_WANT_SYS_SIGPENDING
4535 
4536 /**
4537  *  sys_sigpending - examine pending signals
4538  *  @uset: where mask of pending signal is returned
4539  */
4540 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4541 {
4542 	sigset_t set;
4543 
4544 	if (sizeof(old_sigset_t) > sizeof(*uset))
4545 		return -EINVAL;
4546 
4547 	do_sigpending(&set);
4548 
4549 	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4550 		return -EFAULT;
4551 
4552 	return 0;
4553 }
4554 
4555 #ifdef CONFIG_COMPAT
4556 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4557 {
4558 	sigset_t set;
4559 
4560 	do_sigpending(&set);
4561 
4562 	return put_user(set.sig[0], set32);
4563 }
4564 #endif
4565 
4566 #endif
4567 
4568 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4569 /**
4570  *  sys_sigprocmask - examine and change blocked signals
4571  *  @how: whether to add, remove, or set signals
4572  *  @nset: signals to add or remove (if non-null)
4573  *  @oset: previous value of signal mask if non-null
4574  *
4575  * Some platforms have their own version with special arguments;
4576  * others support only sys_rt_sigprocmask.
4577  */
4578 
4579 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4580 		old_sigset_t __user *, oset)
4581 {
4582 	old_sigset_t old_set, new_set;
4583 	sigset_t new_blocked;
4584 
4585 	old_set = current->blocked.sig[0];
4586 
4587 	if (nset) {
4588 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4589 			return -EFAULT;
4590 
4591 		new_blocked = current->blocked;
4592 
4593 		switch (how) {
4594 		case SIG_BLOCK:
4595 			sigaddsetmask(&new_blocked, new_set);
4596 			break;
4597 		case SIG_UNBLOCK:
4598 			sigdelsetmask(&new_blocked, new_set);
4599 			break;
4600 		case SIG_SETMASK:
4601 			new_blocked.sig[0] = new_set;
4602 			break;
4603 		default:
4604 			return -EINVAL;
4605 		}
4606 
4607 		set_current_blocked(&new_blocked);
4608 	}
4609 
4610 	if (oset) {
4611 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4612 			return -EFAULT;
4613 	}
4614 
4615 	return 0;
4616 }
4617 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4618 
4619 #ifndef CONFIG_ODD_RT_SIGACTION
4620 /**
4621  *  sys_rt_sigaction - alter an action taken by a process
4622  *  @sig: signal to be sent
4623  *  @act: new sigaction
4624  *  @oact: used to save the previous sigaction
4625  *  @sigsetsize: size of sigset_t type
4626  */
4627 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4628 		const struct sigaction __user *, act,
4629 		struct sigaction __user *, oact,
4630 		size_t, sigsetsize)
4631 {
4632 	struct k_sigaction new_sa, old_sa;
4633 	int ret;
4634 
4635 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4636 	if (sigsetsize != sizeof(sigset_t))
4637 		return -EINVAL;
4638 
4639 	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4640 		return -EFAULT;
4641 
4642 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4643 	if (ret)
4644 		return ret;
4645 
4646 	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4647 		return -EFAULT;
4648 
4649 	return 0;
4650 }
4651 #ifdef CONFIG_COMPAT
4652 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4653 		const struct compat_sigaction __user *, act,
4654 		struct compat_sigaction __user *, oact,
4655 		compat_size_t, sigsetsize)
4656 {
4657 	struct k_sigaction new_ka, old_ka;
4658 #ifdef __ARCH_HAS_SA_RESTORER
4659 	compat_uptr_t restorer;
4660 #endif
4661 	int ret;
4662 
4663 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4664 	if (sigsetsize != sizeof(compat_sigset_t))
4665 		return -EINVAL;
4666 
4667 	if (act) {
4668 		compat_uptr_t handler;
4669 		ret = get_user(handler, &act->sa_handler);
4670 		new_ka.sa.sa_handler = compat_ptr(handler);
4671 #ifdef __ARCH_HAS_SA_RESTORER
4672 		ret |= get_user(restorer, &act->sa_restorer);
4673 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4674 #endif
4675 		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4676 		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4677 		if (ret)
4678 			return -EFAULT;
4679 	}
4680 
4681 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4682 	if (!ret && oact) {
4683 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4684 			       &oact->sa_handler);
4685 		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4686 					 sizeof(oact->sa_mask));
4687 		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4688 #ifdef __ARCH_HAS_SA_RESTORER
4689 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4690 				&oact->sa_restorer);
4691 #endif
4692 	}
4693 	return ret;
4694 }
4695 #endif
4696 #endif /* !CONFIG_ODD_RT_SIGACTION */
4697 
4698 #ifdef CONFIG_OLD_SIGACTION
4699 SYSCALL_DEFINE3(sigaction, int, sig,
4700 		const struct old_sigaction __user *, act,
4701 	        struct old_sigaction __user *, oact)
4702 {
4703 	struct k_sigaction new_ka, old_ka;
4704 	int ret;
4705 
4706 	if (act) {
4707 		old_sigset_t mask;
4708 		if (!access_ok(act, sizeof(*act)) ||
4709 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4710 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4711 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4712 		    __get_user(mask, &act->sa_mask))
4713 			return -EFAULT;
4714 #ifdef __ARCH_HAS_KA_RESTORER
4715 		new_ka.ka_restorer = NULL;
4716 #endif
4717 		siginitset(&new_ka.sa.sa_mask, mask);
4718 	}
4719 
4720 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4721 
4722 	if (!ret && oact) {
4723 		if (!access_ok(oact, sizeof(*oact)) ||
4724 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4725 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4726 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4727 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4728 			return -EFAULT;
4729 	}
4730 
4731 	return ret;
4732 }
4733 #endif
4734 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4735 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4736 		const struct compat_old_sigaction __user *, act,
4737 	        struct compat_old_sigaction __user *, oact)
4738 {
4739 	struct k_sigaction new_ka, old_ka;
4740 	int ret;
4741 	compat_old_sigset_t mask;
4742 	compat_uptr_t handler, restorer;
4743 
4744 	if (act) {
4745 		if (!access_ok(act, sizeof(*act)) ||
4746 		    __get_user(handler, &act->sa_handler) ||
4747 		    __get_user(restorer, &act->sa_restorer) ||
4748 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4749 		    __get_user(mask, &act->sa_mask))
4750 			return -EFAULT;
4751 
4752 #ifdef __ARCH_HAS_KA_RESTORER
4753 		new_ka.ka_restorer = NULL;
4754 #endif
4755 		new_ka.sa.sa_handler = compat_ptr(handler);
4756 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4757 		siginitset(&new_ka.sa.sa_mask, mask);
4758 	}
4759 
4760 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4761 
4762 	if (!ret && oact) {
4763 		if (!access_ok(oact, sizeof(*oact)) ||
4764 		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4765 			       &oact->sa_handler) ||
4766 		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4767 			       &oact->sa_restorer) ||
4768 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4769 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4770 			return -EFAULT;
4771 	}
4772 	return ret;
4773 }
4774 #endif
4775 
4776 #ifdef CONFIG_SGETMASK_SYSCALL
4777 
4778 /*
4779  * For backwards compatibility.  Functionality superseded by sigprocmask.
4780  */
4781 SYSCALL_DEFINE0(sgetmask)
4782 {
4783 	/* SMP safe */
4784 	return current->blocked.sig[0];
4785 }
4786 
4787 SYSCALL_DEFINE1(ssetmask, int, newmask)
4788 {
4789 	int old = current->blocked.sig[0];
4790 	sigset_t newset;
4791 
4792 	siginitset(&newset, newmask);
4793 	set_current_blocked(&newset);
4794 
4795 	return old;
4796 }
4797 #endif /* CONFIG_SGETMASK_SYSCALL */
4798 
4799 #ifdef __ARCH_WANT_SYS_SIGNAL
4800 /*
4801  * For backwards compatibility.  Functionality superseded by sigaction.
4802  */
4803 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4804 {
4805 	struct k_sigaction new_sa, old_sa;
4806 	int ret;
4807 
4808 	new_sa.sa.sa_handler = handler;
4809 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4810 	sigemptyset(&new_sa.sa.sa_mask);
4811 
4812 	ret = do_sigaction(sig, &new_sa, &old_sa);
4813 
4814 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4815 }
4816 #endif /* __ARCH_WANT_SYS_SIGNAL */
4817 
4818 #ifdef __ARCH_WANT_SYS_PAUSE
4819 
4820 SYSCALL_DEFINE0(pause)
4821 {
4822 	while (!signal_pending(current)) {
4823 		__set_current_state(TASK_INTERRUPTIBLE);
4824 		schedule();
4825 	}
4826 	return -ERESTARTNOHAND;
4827 }
4828 
4829 #endif
4830 
4831 static int sigsuspend(sigset_t *set)
4832 {
4833 	current->saved_sigmask = current->blocked;
4834 	set_current_blocked(set);
4835 
4836 	while (!signal_pending(current)) {
4837 		__set_current_state(TASK_INTERRUPTIBLE);
4838 		schedule();
4839 	}
4840 	set_restore_sigmask();
4841 	return -ERESTARTNOHAND;
4842 }
4843 
4844 /**
4845  *  sys_rt_sigsuspend - replace the signal mask for a value with the
4846  *	@unewset value until a signal is received
4847  *  @unewset: new signal mask value
4848  *  @sigsetsize: size of sigset_t type
4849  */
4850 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4851 {
4852 	sigset_t newset;
4853 
4854 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4855 	if (sigsetsize != sizeof(sigset_t))
4856 		return -EINVAL;
4857 
4858 	if (copy_from_user(&newset, unewset, sizeof(newset)))
4859 		return -EFAULT;
4860 	return sigsuspend(&newset);
4861 }
4862 
4863 #ifdef CONFIG_COMPAT
4864 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4865 {
4866 	sigset_t newset;
4867 
4868 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4869 	if (sigsetsize != sizeof(sigset_t))
4870 		return -EINVAL;
4871 
4872 	if (get_compat_sigset(&newset, unewset))
4873 		return -EFAULT;
4874 	return sigsuspend(&newset);
4875 }
4876 #endif
4877 
4878 #ifdef CONFIG_OLD_SIGSUSPEND
4879 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4880 {
4881 	sigset_t blocked;
4882 	siginitset(&blocked, mask);
4883 	return sigsuspend(&blocked);
4884 }
4885 #endif
4886 #ifdef CONFIG_OLD_SIGSUSPEND3
4887 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4888 {
4889 	sigset_t blocked;
4890 	siginitset(&blocked, mask);
4891 	return sigsuspend(&blocked);
4892 }
4893 #endif
4894 
4895 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4896 {
4897 	return NULL;
4898 }
4899 
4900 static inline void siginfo_buildtime_checks(void)
4901 {
4902 	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4903 
4904 	/* Verify the offsets in the two siginfos match */
4905 #define CHECK_OFFSET(field) \
4906 	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4907 
4908 	/* kill */
4909 	CHECK_OFFSET(si_pid);
4910 	CHECK_OFFSET(si_uid);
4911 
4912 	/* timer */
4913 	CHECK_OFFSET(si_tid);
4914 	CHECK_OFFSET(si_overrun);
4915 	CHECK_OFFSET(si_value);
4916 
4917 	/* rt */
4918 	CHECK_OFFSET(si_pid);
4919 	CHECK_OFFSET(si_uid);
4920 	CHECK_OFFSET(si_value);
4921 
4922 	/* sigchld */
4923 	CHECK_OFFSET(si_pid);
4924 	CHECK_OFFSET(si_uid);
4925 	CHECK_OFFSET(si_status);
4926 	CHECK_OFFSET(si_utime);
4927 	CHECK_OFFSET(si_stime);
4928 
4929 	/* sigfault */
4930 	CHECK_OFFSET(si_addr);
4931 	CHECK_OFFSET(si_trapno);
4932 	CHECK_OFFSET(si_addr_lsb);
4933 	CHECK_OFFSET(si_lower);
4934 	CHECK_OFFSET(si_upper);
4935 	CHECK_OFFSET(si_pkey);
4936 	CHECK_OFFSET(si_perf_data);
4937 	CHECK_OFFSET(si_perf_type);
4938 	CHECK_OFFSET(si_perf_flags);
4939 
4940 	/* sigpoll */
4941 	CHECK_OFFSET(si_band);
4942 	CHECK_OFFSET(si_fd);
4943 
4944 	/* sigsys */
4945 	CHECK_OFFSET(si_call_addr);
4946 	CHECK_OFFSET(si_syscall);
4947 	CHECK_OFFSET(si_arch);
4948 #undef CHECK_OFFSET
4949 
4950 	/* usb asyncio */
4951 	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4952 		     offsetof(struct siginfo, si_addr));
4953 	if (sizeof(int) == sizeof(void __user *)) {
4954 		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4955 			     sizeof(void __user *));
4956 	} else {
4957 		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4958 			      sizeof_field(struct siginfo, si_uid)) !=
4959 			     sizeof(void __user *));
4960 		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4961 			     offsetof(struct siginfo, si_uid));
4962 	}
4963 #ifdef CONFIG_COMPAT
4964 	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4965 		     offsetof(struct compat_siginfo, si_addr));
4966 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4967 		     sizeof(compat_uptr_t));
4968 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4969 		     sizeof_field(struct siginfo, si_pid));
4970 #endif
4971 }
4972 
4973 #if defined(CONFIG_SYSCTL)
4974 static const struct ctl_table signal_debug_table[] = {
4975 #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4976 	{
4977 		.procname	= "exception-trace",
4978 		.data		= &show_unhandled_signals,
4979 		.maxlen		= sizeof(int),
4980 		.mode		= 0644,
4981 		.proc_handler	= proc_dointvec
4982 	},
4983 #endif
4984 };
4985 
4986 static int __init init_signal_sysctls(void)
4987 {
4988 	register_sysctl_init("debug", signal_debug_table);
4989 	return 0;
4990 }
4991 early_initcall(init_signal_sysctls);
4992 #endif /* CONFIG_SYSCTL */
4993 
4994 void __init signals_init(void)
4995 {
4996 	siginfo_buildtime_checks();
4997 
4998 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4999 }
5000 
5001 #ifdef CONFIG_KGDB_KDB
5002 #include <linux/kdb.h>
5003 /*
5004  * kdb_send_sig - Allows kdb to send signals without exposing
5005  * signal internals.  This function checks if the required locks are
5006  * available before calling the main signal code, to avoid kdb
5007  * deadlocks.
5008  */
5009 void kdb_send_sig(struct task_struct *t, int sig)
5010 {
5011 	static struct task_struct *kdb_prev_t;
5012 	int new_t, ret;
5013 	if (!spin_trylock(&t->sighand->siglock)) {
5014 		kdb_printf("Can't do kill command now.\n"
5015 			   "The sigmask lock is held somewhere else in "
5016 			   "kernel, try again later\n");
5017 		return;
5018 	}
5019 	new_t = kdb_prev_t != t;
5020 	kdb_prev_t = t;
5021 	if (!task_is_running(t) && new_t) {
5022 		spin_unlock(&t->sighand->siglock);
5023 		kdb_printf("Process is not RUNNING, sending a signal from "
5024 			   "kdb risks deadlock\n"
5025 			   "on the run queue locks. "
5026 			   "The signal has _not_ been sent.\n"
5027 			   "Reissue the kill command if you want to risk "
5028 			   "the deadlock.\n");
5029 		return;
5030 	}
5031 	ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
5032 	spin_unlock(&t->sighand->siglock);
5033 	if (ret)
5034 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
5035 			   sig, t->pid);
5036 	else
5037 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
5038 }
5039 #endif	/* CONFIG_KGDB_KDB */
5040