xref: /linux/kernel/signal.c (revision e4aebf06695c32d49f1007f9d252f97b5b2998a7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/signal.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
8  *
9  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
10  *		Changes to use preallocated sigqueue structures
11  *		to allow signals to be sent reliably.
12  */
13 
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/livepatch.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
49 
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/signal.h>
52 
53 #include <asm/param.h>
54 #include <linux/uaccess.h>
55 #include <asm/unistd.h>
56 #include <asm/siginfo.h>
57 #include <asm/cacheflush.h>
58 
59 /*
60  * SLAB caches for signal bits.
61  */
62 
63 static struct kmem_cache *sigqueue_cachep;
64 
65 int print_fatal_signals __read_mostly;
66 
67 static void __user *sig_handler(struct task_struct *t, int sig)
68 {
69 	return t->sighand->action[sig - 1].sa.sa_handler;
70 }
71 
72 static inline bool sig_handler_ignored(void __user *handler, int sig)
73 {
74 	/* Is it explicitly or implicitly ignored? */
75 	return handler == SIG_IGN ||
76 	       (handler == SIG_DFL && sig_kernel_ignore(sig));
77 }
78 
79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
80 {
81 	void __user *handler;
82 
83 	handler = sig_handler(t, sig);
84 
85 	/* SIGKILL and SIGSTOP may not be sent to the global init */
86 	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
87 		return true;
88 
89 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 		return true;
92 
93 	/* Only allow kernel generated signals to this kthread */
94 	if (unlikely((t->flags & PF_KTHREAD) &&
95 		     (handler == SIG_KTHREAD_KERNEL) && !force))
96 		return true;
97 
98 	return sig_handler_ignored(handler, sig);
99 }
100 
101 static bool sig_ignored(struct task_struct *t, int sig, bool force)
102 {
103 	/*
104 	 * Blocked signals are never ignored, since the
105 	 * signal handler may change by the time it is
106 	 * unblocked.
107 	 */
108 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
109 		return false;
110 
111 	/*
112 	 * Tracers may want to know about even ignored signal unless it
113 	 * is SIGKILL which can't be reported anyway but can be ignored
114 	 * by SIGNAL_UNKILLABLE task.
115 	 */
116 	if (t->ptrace && sig != SIGKILL)
117 		return false;
118 
119 	return sig_task_ignored(t, sig, force);
120 }
121 
122 /*
123  * Re-calculate pending state from the set of locally pending
124  * signals, globally pending signals, and blocked signals.
125  */
126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
127 {
128 	unsigned long ready;
129 	long i;
130 
131 	switch (_NSIG_WORDS) {
132 	default:
133 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 			ready |= signal->sig[i] &~ blocked->sig[i];
135 		break;
136 
137 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
138 		ready |= signal->sig[2] &~ blocked->sig[2];
139 		ready |= signal->sig[1] &~ blocked->sig[1];
140 		ready |= signal->sig[0] &~ blocked->sig[0];
141 		break;
142 
143 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
144 		ready |= signal->sig[0] &~ blocked->sig[0];
145 		break;
146 
147 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
148 	}
149 	return ready !=	0;
150 }
151 
152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153 
154 static bool recalc_sigpending_tsk(struct task_struct *t)
155 {
156 	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 	    PENDING(&t->pending, &t->blocked) ||
158 	    PENDING(&t->signal->shared_pending, &t->blocked) ||
159 	    cgroup_task_frozen(t)) {
160 		set_tsk_thread_flag(t, TIF_SIGPENDING);
161 		return true;
162 	}
163 
164 	/*
165 	 * We must never clear the flag in another thread, or in current
166 	 * when it's possible the current syscall is returning -ERESTART*.
167 	 * So we don't clear it here, and only callers who know they should do.
168 	 */
169 	return false;
170 }
171 
172 /*
173  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174  * This is superfluous when called on current, the wakeup is a harmless no-op.
175  */
176 void recalc_sigpending_and_wake(struct task_struct *t)
177 {
178 	if (recalc_sigpending_tsk(t))
179 		signal_wake_up(t, 0);
180 }
181 
182 void recalc_sigpending(void)
183 {
184 	if (!recalc_sigpending_tsk(current) && !freezing(current) &&
185 	    !klp_patch_pending(current))
186 		clear_thread_flag(TIF_SIGPENDING);
187 
188 }
189 EXPORT_SYMBOL(recalc_sigpending);
190 
191 void calculate_sigpending(void)
192 {
193 	/* Have any signals or users of TIF_SIGPENDING been delayed
194 	 * until after fork?
195 	 */
196 	spin_lock_irq(&current->sighand->siglock);
197 	set_tsk_thread_flag(current, TIF_SIGPENDING);
198 	recalc_sigpending();
199 	spin_unlock_irq(&current->sighand->siglock);
200 }
201 
202 /* Given the mask, find the first available signal that should be serviced. */
203 
204 #define SYNCHRONOUS_MASK \
205 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
206 	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
207 
208 int next_signal(struct sigpending *pending, sigset_t *mask)
209 {
210 	unsigned long i, *s, *m, x;
211 	int sig = 0;
212 
213 	s = pending->signal.sig;
214 	m = mask->sig;
215 
216 	/*
217 	 * Handle the first word specially: it contains the
218 	 * synchronous signals that need to be dequeued first.
219 	 */
220 	x = *s &~ *m;
221 	if (x) {
222 		if (x & SYNCHRONOUS_MASK)
223 			x &= SYNCHRONOUS_MASK;
224 		sig = ffz(~x) + 1;
225 		return sig;
226 	}
227 
228 	switch (_NSIG_WORDS) {
229 	default:
230 		for (i = 1; i < _NSIG_WORDS; ++i) {
231 			x = *++s &~ *++m;
232 			if (!x)
233 				continue;
234 			sig = ffz(~x) + i*_NSIG_BPW + 1;
235 			break;
236 		}
237 		break;
238 
239 	case 2:
240 		x = s[1] &~ m[1];
241 		if (!x)
242 			break;
243 		sig = ffz(~x) + _NSIG_BPW + 1;
244 		break;
245 
246 	case 1:
247 		/* Nothing to do */
248 		break;
249 	}
250 
251 	return sig;
252 }
253 
254 static inline void print_dropped_signal(int sig)
255 {
256 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
257 
258 	if (!print_fatal_signals)
259 		return;
260 
261 	if (!__ratelimit(&ratelimit_state))
262 		return;
263 
264 	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
265 				current->comm, current->pid, sig);
266 }
267 
268 /**
269  * task_set_jobctl_pending - set jobctl pending bits
270  * @task: target task
271  * @mask: pending bits to set
272  *
273  * Clear @mask from @task->jobctl.  @mask must be subset of
274  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
275  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
276  * cleared.  If @task is already being killed or exiting, this function
277  * becomes noop.
278  *
279  * CONTEXT:
280  * Must be called with @task->sighand->siglock held.
281  *
282  * RETURNS:
283  * %true if @mask is set, %false if made noop because @task was dying.
284  */
285 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
286 {
287 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
288 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
289 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
290 
291 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
292 		return false;
293 
294 	if (mask & JOBCTL_STOP_SIGMASK)
295 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
296 
297 	task->jobctl |= mask;
298 	return true;
299 }
300 
301 /**
302  * task_clear_jobctl_trapping - clear jobctl trapping bit
303  * @task: target task
304  *
305  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
306  * Clear it and wake up the ptracer.  Note that we don't need any further
307  * locking.  @task->siglock guarantees that @task->parent points to the
308  * ptracer.
309  *
310  * CONTEXT:
311  * Must be called with @task->sighand->siglock held.
312  */
313 void task_clear_jobctl_trapping(struct task_struct *task)
314 {
315 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
316 		task->jobctl &= ~JOBCTL_TRAPPING;
317 		smp_mb();	/* advised by wake_up_bit() */
318 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
319 	}
320 }
321 
322 /**
323  * task_clear_jobctl_pending - clear jobctl pending bits
324  * @task: target task
325  * @mask: pending bits to clear
326  *
327  * Clear @mask from @task->jobctl.  @mask must be subset of
328  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
329  * STOP bits are cleared together.
330  *
331  * If clearing of @mask leaves no stop or trap pending, this function calls
332  * task_clear_jobctl_trapping().
333  *
334  * CONTEXT:
335  * Must be called with @task->sighand->siglock held.
336  */
337 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
338 {
339 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
340 
341 	if (mask & JOBCTL_STOP_PENDING)
342 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
343 
344 	task->jobctl &= ~mask;
345 
346 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
347 		task_clear_jobctl_trapping(task);
348 }
349 
350 /**
351  * task_participate_group_stop - participate in a group stop
352  * @task: task participating in a group stop
353  *
354  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
355  * Group stop states are cleared and the group stop count is consumed if
356  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
357  * stop, the appropriate `SIGNAL_*` flags are set.
358  *
359  * CONTEXT:
360  * Must be called with @task->sighand->siglock held.
361  *
362  * RETURNS:
363  * %true if group stop completion should be notified to the parent, %false
364  * otherwise.
365  */
366 static bool task_participate_group_stop(struct task_struct *task)
367 {
368 	struct signal_struct *sig = task->signal;
369 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
370 
371 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
372 
373 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
374 
375 	if (!consume)
376 		return false;
377 
378 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
379 		sig->group_stop_count--;
380 
381 	/*
382 	 * Tell the caller to notify completion iff we are entering into a
383 	 * fresh group stop.  Read comment in do_signal_stop() for details.
384 	 */
385 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
386 		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
387 		return true;
388 	}
389 	return false;
390 }
391 
392 void task_join_group_stop(struct task_struct *task)
393 {
394 	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
395 	struct signal_struct *sig = current->signal;
396 
397 	if (sig->group_stop_count) {
398 		sig->group_stop_count++;
399 		mask |= JOBCTL_STOP_CONSUME;
400 	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
401 		return;
402 
403 	/* Have the new thread join an on-going signal group stop */
404 	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
405 }
406 
407 /*
408  * allocate a new signal queue record
409  * - this may be called without locks if and only if t == current, otherwise an
410  *   appropriate lock must be held to stop the target task from exiting
411  */
412 static struct sigqueue *
413 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
414 {
415 	struct sigqueue *q = NULL;
416 	struct ucounts *ucounts = NULL;
417 	long sigpending;
418 
419 	/*
420 	 * Protect access to @t credentials. This can go away when all
421 	 * callers hold rcu read lock.
422 	 *
423 	 * NOTE! A pending signal will hold on to the user refcount,
424 	 * and we get/put the refcount only when the sigpending count
425 	 * changes from/to zero.
426 	 */
427 	rcu_read_lock();
428 	ucounts = task_ucounts(t);
429 	sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
430 	if (sigpending == 1)
431 		ucounts = get_ucounts(ucounts);
432 	rcu_read_unlock();
433 
434 	if (override_rlimit || (sigpending < LONG_MAX && sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
435 		q = kmem_cache_alloc(sigqueue_cachep, flags);
436 	} else {
437 		print_dropped_signal(sig);
438 	}
439 
440 	if (unlikely(q == NULL)) {
441 		if (ucounts && dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
442 			put_ucounts(ucounts);
443 	} else {
444 		INIT_LIST_HEAD(&q->list);
445 		q->flags = 0;
446 		q->ucounts = ucounts;
447 	}
448 	return q;
449 }
450 
451 static void __sigqueue_free(struct sigqueue *q)
452 {
453 	if (q->flags & SIGQUEUE_PREALLOC)
454 		return;
455 	if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) {
456 		put_ucounts(q->ucounts);
457 		q->ucounts = NULL;
458 	}
459 	kmem_cache_free(sigqueue_cachep, q);
460 }
461 
462 void flush_sigqueue(struct sigpending *queue)
463 {
464 	struct sigqueue *q;
465 
466 	sigemptyset(&queue->signal);
467 	while (!list_empty(&queue->list)) {
468 		q = list_entry(queue->list.next, struct sigqueue , list);
469 		list_del_init(&q->list);
470 		__sigqueue_free(q);
471 	}
472 }
473 
474 /*
475  * Flush all pending signals for this kthread.
476  */
477 void flush_signals(struct task_struct *t)
478 {
479 	unsigned long flags;
480 
481 	spin_lock_irqsave(&t->sighand->siglock, flags);
482 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
483 	flush_sigqueue(&t->pending);
484 	flush_sigqueue(&t->signal->shared_pending);
485 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
486 }
487 EXPORT_SYMBOL(flush_signals);
488 
489 #ifdef CONFIG_POSIX_TIMERS
490 static void __flush_itimer_signals(struct sigpending *pending)
491 {
492 	sigset_t signal, retain;
493 	struct sigqueue *q, *n;
494 
495 	signal = pending->signal;
496 	sigemptyset(&retain);
497 
498 	list_for_each_entry_safe(q, n, &pending->list, list) {
499 		int sig = q->info.si_signo;
500 
501 		if (likely(q->info.si_code != SI_TIMER)) {
502 			sigaddset(&retain, sig);
503 		} else {
504 			sigdelset(&signal, sig);
505 			list_del_init(&q->list);
506 			__sigqueue_free(q);
507 		}
508 	}
509 
510 	sigorsets(&pending->signal, &signal, &retain);
511 }
512 
513 void flush_itimer_signals(void)
514 {
515 	struct task_struct *tsk = current;
516 	unsigned long flags;
517 
518 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
519 	__flush_itimer_signals(&tsk->pending);
520 	__flush_itimer_signals(&tsk->signal->shared_pending);
521 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
522 }
523 #endif
524 
525 void ignore_signals(struct task_struct *t)
526 {
527 	int i;
528 
529 	for (i = 0; i < _NSIG; ++i)
530 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
531 
532 	flush_signals(t);
533 }
534 
535 /*
536  * Flush all handlers for a task.
537  */
538 
539 void
540 flush_signal_handlers(struct task_struct *t, int force_default)
541 {
542 	int i;
543 	struct k_sigaction *ka = &t->sighand->action[0];
544 	for (i = _NSIG ; i != 0 ; i--) {
545 		if (force_default || ka->sa.sa_handler != SIG_IGN)
546 			ka->sa.sa_handler = SIG_DFL;
547 		ka->sa.sa_flags = 0;
548 #ifdef __ARCH_HAS_SA_RESTORER
549 		ka->sa.sa_restorer = NULL;
550 #endif
551 		sigemptyset(&ka->sa.sa_mask);
552 		ka++;
553 	}
554 }
555 
556 bool unhandled_signal(struct task_struct *tsk, int sig)
557 {
558 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
559 	if (is_global_init(tsk))
560 		return true;
561 
562 	if (handler != SIG_IGN && handler != SIG_DFL)
563 		return false;
564 
565 	/* if ptraced, let the tracer determine */
566 	return !tsk->ptrace;
567 }
568 
569 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
570 			   bool *resched_timer)
571 {
572 	struct sigqueue *q, *first = NULL;
573 
574 	/*
575 	 * Collect the siginfo appropriate to this signal.  Check if
576 	 * there is another siginfo for the same signal.
577 	*/
578 	list_for_each_entry(q, &list->list, list) {
579 		if (q->info.si_signo == sig) {
580 			if (first)
581 				goto still_pending;
582 			first = q;
583 		}
584 	}
585 
586 	sigdelset(&list->signal, sig);
587 
588 	if (first) {
589 still_pending:
590 		list_del_init(&first->list);
591 		copy_siginfo(info, &first->info);
592 
593 		*resched_timer =
594 			(first->flags & SIGQUEUE_PREALLOC) &&
595 			(info->si_code == SI_TIMER) &&
596 			(info->si_sys_private);
597 
598 		__sigqueue_free(first);
599 	} else {
600 		/*
601 		 * Ok, it wasn't in the queue.  This must be
602 		 * a fast-pathed signal or we must have been
603 		 * out of queue space.  So zero out the info.
604 		 */
605 		clear_siginfo(info);
606 		info->si_signo = sig;
607 		info->si_errno = 0;
608 		info->si_code = SI_USER;
609 		info->si_pid = 0;
610 		info->si_uid = 0;
611 	}
612 }
613 
614 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
615 			kernel_siginfo_t *info, bool *resched_timer)
616 {
617 	int sig = next_signal(pending, mask);
618 
619 	if (sig)
620 		collect_signal(sig, pending, info, resched_timer);
621 	return sig;
622 }
623 
624 /*
625  * Dequeue a signal and return the element to the caller, which is
626  * expected to free it.
627  *
628  * All callers have to hold the siglock.
629  */
630 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
631 {
632 	bool resched_timer = false;
633 	int signr;
634 
635 	/* We only dequeue private signals from ourselves, we don't let
636 	 * signalfd steal them
637 	 */
638 	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
639 	if (!signr) {
640 		signr = __dequeue_signal(&tsk->signal->shared_pending,
641 					 mask, info, &resched_timer);
642 #ifdef CONFIG_POSIX_TIMERS
643 		/*
644 		 * itimer signal ?
645 		 *
646 		 * itimers are process shared and we restart periodic
647 		 * itimers in the signal delivery path to prevent DoS
648 		 * attacks in the high resolution timer case. This is
649 		 * compliant with the old way of self-restarting
650 		 * itimers, as the SIGALRM is a legacy signal and only
651 		 * queued once. Changing the restart behaviour to
652 		 * restart the timer in the signal dequeue path is
653 		 * reducing the timer noise on heavy loaded !highres
654 		 * systems too.
655 		 */
656 		if (unlikely(signr == SIGALRM)) {
657 			struct hrtimer *tmr = &tsk->signal->real_timer;
658 
659 			if (!hrtimer_is_queued(tmr) &&
660 			    tsk->signal->it_real_incr != 0) {
661 				hrtimer_forward(tmr, tmr->base->get_time(),
662 						tsk->signal->it_real_incr);
663 				hrtimer_restart(tmr);
664 			}
665 		}
666 #endif
667 	}
668 
669 	recalc_sigpending();
670 	if (!signr)
671 		return 0;
672 
673 	if (unlikely(sig_kernel_stop(signr))) {
674 		/*
675 		 * Set a marker that we have dequeued a stop signal.  Our
676 		 * caller might release the siglock and then the pending
677 		 * stop signal it is about to process is no longer in the
678 		 * pending bitmasks, but must still be cleared by a SIGCONT
679 		 * (and overruled by a SIGKILL).  So those cases clear this
680 		 * shared flag after we've set it.  Note that this flag may
681 		 * remain set after the signal we return is ignored or
682 		 * handled.  That doesn't matter because its only purpose
683 		 * is to alert stop-signal processing code when another
684 		 * processor has come along and cleared the flag.
685 		 */
686 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
687 	}
688 #ifdef CONFIG_POSIX_TIMERS
689 	if (resched_timer) {
690 		/*
691 		 * Release the siglock to ensure proper locking order
692 		 * of timer locks outside of siglocks.  Note, we leave
693 		 * irqs disabled here, since the posix-timers code is
694 		 * about to disable them again anyway.
695 		 */
696 		spin_unlock(&tsk->sighand->siglock);
697 		posixtimer_rearm(info);
698 		spin_lock(&tsk->sighand->siglock);
699 
700 		/* Don't expose the si_sys_private value to userspace */
701 		info->si_sys_private = 0;
702 	}
703 #endif
704 	return signr;
705 }
706 EXPORT_SYMBOL_GPL(dequeue_signal);
707 
708 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
709 {
710 	struct task_struct *tsk = current;
711 	struct sigpending *pending = &tsk->pending;
712 	struct sigqueue *q, *sync = NULL;
713 
714 	/*
715 	 * Might a synchronous signal be in the queue?
716 	 */
717 	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
718 		return 0;
719 
720 	/*
721 	 * Return the first synchronous signal in the queue.
722 	 */
723 	list_for_each_entry(q, &pending->list, list) {
724 		/* Synchronous signals have a positive si_code */
725 		if ((q->info.si_code > SI_USER) &&
726 		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
727 			sync = q;
728 			goto next;
729 		}
730 	}
731 	return 0;
732 next:
733 	/*
734 	 * Check if there is another siginfo for the same signal.
735 	 */
736 	list_for_each_entry_continue(q, &pending->list, list) {
737 		if (q->info.si_signo == sync->info.si_signo)
738 			goto still_pending;
739 	}
740 
741 	sigdelset(&pending->signal, sync->info.si_signo);
742 	recalc_sigpending();
743 still_pending:
744 	list_del_init(&sync->list);
745 	copy_siginfo(info, &sync->info);
746 	__sigqueue_free(sync);
747 	return info->si_signo;
748 }
749 
750 /*
751  * Tell a process that it has a new active signal..
752  *
753  * NOTE! we rely on the previous spin_lock to
754  * lock interrupts for us! We can only be called with
755  * "siglock" held, and the local interrupt must
756  * have been disabled when that got acquired!
757  *
758  * No need to set need_resched since signal event passing
759  * goes through ->blocked
760  */
761 void signal_wake_up_state(struct task_struct *t, unsigned int state)
762 {
763 	set_tsk_thread_flag(t, TIF_SIGPENDING);
764 	/*
765 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
766 	 * case. We don't check t->state here because there is a race with it
767 	 * executing another processor and just now entering stopped state.
768 	 * By using wake_up_state, we ensure the process will wake up and
769 	 * handle its death signal.
770 	 */
771 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
772 		kick_process(t);
773 }
774 
775 /*
776  * Remove signals in mask from the pending set and queue.
777  * Returns 1 if any signals were found.
778  *
779  * All callers must be holding the siglock.
780  */
781 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
782 {
783 	struct sigqueue *q, *n;
784 	sigset_t m;
785 
786 	sigandsets(&m, mask, &s->signal);
787 	if (sigisemptyset(&m))
788 		return;
789 
790 	sigandnsets(&s->signal, &s->signal, mask);
791 	list_for_each_entry_safe(q, n, &s->list, list) {
792 		if (sigismember(mask, q->info.si_signo)) {
793 			list_del_init(&q->list);
794 			__sigqueue_free(q);
795 		}
796 	}
797 }
798 
799 static inline int is_si_special(const struct kernel_siginfo *info)
800 {
801 	return info <= SEND_SIG_PRIV;
802 }
803 
804 static inline bool si_fromuser(const struct kernel_siginfo *info)
805 {
806 	return info == SEND_SIG_NOINFO ||
807 		(!is_si_special(info) && SI_FROMUSER(info));
808 }
809 
810 /*
811  * called with RCU read lock from check_kill_permission()
812  */
813 static bool kill_ok_by_cred(struct task_struct *t)
814 {
815 	const struct cred *cred = current_cred();
816 	const struct cred *tcred = __task_cred(t);
817 
818 	return uid_eq(cred->euid, tcred->suid) ||
819 	       uid_eq(cred->euid, tcred->uid) ||
820 	       uid_eq(cred->uid, tcred->suid) ||
821 	       uid_eq(cred->uid, tcred->uid) ||
822 	       ns_capable(tcred->user_ns, CAP_KILL);
823 }
824 
825 /*
826  * Bad permissions for sending the signal
827  * - the caller must hold the RCU read lock
828  */
829 static int check_kill_permission(int sig, struct kernel_siginfo *info,
830 				 struct task_struct *t)
831 {
832 	struct pid *sid;
833 	int error;
834 
835 	if (!valid_signal(sig))
836 		return -EINVAL;
837 
838 	if (!si_fromuser(info))
839 		return 0;
840 
841 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
842 	if (error)
843 		return error;
844 
845 	if (!same_thread_group(current, t) &&
846 	    !kill_ok_by_cred(t)) {
847 		switch (sig) {
848 		case SIGCONT:
849 			sid = task_session(t);
850 			/*
851 			 * We don't return the error if sid == NULL. The
852 			 * task was unhashed, the caller must notice this.
853 			 */
854 			if (!sid || sid == task_session(current))
855 				break;
856 			fallthrough;
857 		default:
858 			return -EPERM;
859 		}
860 	}
861 
862 	return security_task_kill(t, info, sig, NULL);
863 }
864 
865 /**
866  * ptrace_trap_notify - schedule trap to notify ptracer
867  * @t: tracee wanting to notify tracer
868  *
869  * This function schedules sticky ptrace trap which is cleared on the next
870  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
871  * ptracer.
872  *
873  * If @t is running, STOP trap will be taken.  If trapped for STOP and
874  * ptracer is listening for events, tracee is woken up so that it can
875  * re-trap for the new event.  If trapped otherwise, STOP trap will be
876  * eventually taken without returning to userland after the existing traps
877  * are finished by PTRACE_CONT.
878  *
879  * CONTEXT:
880  * Must be called with @task->sighand->siglock held.
881  */
882 static void ptrace_trap_notify(struct task_struct *t)
883 {
884 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
885 	assert_spin_locked(&t->sighand->siglock);
886 
887 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
888 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
889 }
890 
891 /*
892  * Handle magic process-wide effects of stop/continue signals. Unlike
893  * the signal actions, these happen immediately at signal-generation
894  * time regardless of blocking, ignoring, or handling.  This does the
895  * actual continuing for SIGCONT, but not the actual stopping for stop
896  * signals. The process stop is done as a signal action for SIG_DFL.
897  *
898  * Returns true if the signal should be actually delivered, otherwise
899  * it should be dropped.
900  */
901 static bool prepare_signal(int sig, struct task_struct *p, bool force)
902 {
903 	struct signal_struct *signal = p->signal;
904 	struct task_struct *t;
905 	sigset_t flush;
906 
907 	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
908 		if (!(signal->flags & SIGNAL_GROUP_EXIT))
909 			return sig == SIGKILL;
910 		/*
911 		 * The process is in the middle of dying, nothing to do.
912 		 */
913 	} else if (sig_kernel_stop(sig)) {
914 		/*
915 		 * This is a stop signal.  Remove SIGCONT from all queues.
916 		 */
917 		siginitset(&flush, sigmask(SIGCONT));
918 		flush_sigqueue_mask(&flush, &signal->shared_pending);
919 		for_each_thread(p, t)
920 			flush_sigqueue_mask(&flush, &t->pending);
921 	} else if (sig == SIGCONT) {
922 		unsigned int why;
923 		/*
924 		 * Remove all stop signals from all queues, wake all threads.
925 		 */
926 		siginitset(&flush, SIG_KERNEL_STOP_MASK);
927 		flush_sigqueue_mask(&flush, &signal->shared_pending);
928 		for_each_thread(p, t) {
929 			flush_sigqueue_mask(&flush, &t->pending);
930 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
931 			if (likely(!(t->ptrace & PT_SEIZED)))
932 				wake_up_state(t, __TASK_STOPPED);
933 			else
934 				ptrace_trap_notify(t);
935 		}
936 
937 		/*
938 		 * Notify the parent with CLD_CONTINUED if we were stopped.
939 		 *
940 		 * If we were in the middle of a group stop, we pretend it
941 		 * was already finished, and then continued. Since SIGCHLD
942 		 * doesn't queue we report only CLD_STOPPED, as if the next
943 		 * CLD_CONTINUED was dropped.
944 		 */
945 		why = 0;
946 		if (signal->flags & SIGNAL_STOP_STOPPED)
947 			why |= SIGNAL_CLD_CONTINUED;
948 		else if (signal->group_stop_count)
949 			why |= SIGNAL_CLD_STOPPED;
950 
951 		if (why) {
952 			/*
953 			 * The first thread which returns from do_signal_stop()
954 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
955 			 * notify its parent. See get_signal().
956 			 */
957 			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
958 			signal->group_stop_count = 0;
959 			signal->group_exit_code = 0;
960 		}
961 	}
962 
963 	return !sig_ignored(p, sig, force);
964 }
965 
966 /*
967  * Test if P wants to take SIG.  After we've checked all threads with this,
968  * it's equivalent to finding no threads not blocking SIG.  Any threads not
969  * blocking SIG were ruled out because they are not running and already
970  * have pending signals.  Such threads will dequeue from the shared queue
971  * as soon as they're available, so putting the signal on the shared queue
972  * will be equivalent to sending it to one such thread.
973  */
974 static inline bool wants_signal(int sig, struct task_struct *p)
975 {
976 	if (sigismember(&p->blocked, sig))
977 		return false;
978 
979 	if (p->flags & PF_EXITING)
980 		return false;
981 
982 	if (sig == SIGKILL)
983 		return true;
984 
985 	if (task_is_stopped_or_traced(p))
986 		return false;
987 
988 	return task_curr(p) || !task_sigpending(p);
989 }
990 
991 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
992 {
993 	struct signal_struct *signal = p->signal;
994 	struct task_struct *t;
995 
996 	/*
997 	 * Now find a thread we can wake up to take the signal off the queue.
998 	 *
999 	 * If the main thread wants the signal, it gets first crack.
1000 	 * Probably the least surprising to the average bear.
1001 	 */
1002 	if (wants_signal(sig, p))
1003 		t = p;
1004 	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1005 		/*
1006 		 * There is just one thread and it does not need to be woken.
1007 		 * It will dequeue unblocked signals before it runs again.
1008 		 */
1009 		return;
1010 	else {
1011 		/*
1012 		 * Otherwise try to find a suitable thread.
1013 		 */
1014 		t = signal->curr_target;
1015 		while (!wants_signal(sig, t)) {
1016 			t = next_thread(t);
1017 			if (t == signal->curr_target)
1018 				/*
1019 				 * No thread needs to be woken.
1020 				 * Any eligible threads will see
1021 				 * the signal in the queue soon.
1022 				 */
1023 				return;
1024 		}
1025 		signal->curr_target = t;
1026 	}
1027 
1028 	/*
1029 	 * Found a killable thread.  If the signal will be fatal,
1030 	 * then start taking the whole group down immediately.
1031 	 */
1032 	if (sig_fatal(p, sig) &&
1033 	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
1034 	    !sigismember(&t->real_blocked, sig) &&
1035 	    (sig == SIGKILL || !p->ptrace)) {
1036 		/*
1037 		 * This signal will be fatal to the whole group.
1038 		 */
1039 		if (!sig_kernel_coredump(sig)) {
1040 			/*
1041 			 * Start a group exit and wake everybody up.
1042 			 * This way we don't have other threads
1043 			 * running and doing things after a slower
1044 			 * thread has the fatal signal pending.
1045 			 */
1046 			signal->flags = SIGNAL_GROUP_EXIT;
1047 			signal->group_exit_code = sig;
1048 			signal->group_stop_count = 0;
1049 			t = p;
1050 			do {
1051 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1052 				sigaddset(&t->pending.signal, SIGKILL);
1053 				signal_wake_up(t, 1);
1054 			} while_each_thread(p, t);
1055 			return;
1056 		}
1057 	}
1058 
1059 	/*
1060 	 * The signal is already in the shared-pending queue.
1061 	 * Tell the chosen thread to wake up and dequeue it.
1062 	 */
1063 	signal_wake_up(t, sig == SIGKILL);
1064 	return;
1065 }
1066 
1067 static inline bool legacy_queue(struct sigpending *signals, int sig)
1068 {
1069 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1070 }
1071 
1072 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1073 			enum pid_type type, bool force)
1074 {
1075 	struct sigpending *pending;
1076 	struct sigqueue *q;
1077 	int override_rlimit;
1078 	int ret = 0, result;
1079 
1080 	assert_spin_locked(&t->sighand->siglock);
1081 
1082 	result = TRACE_SIGNAL_IGNORED;
1083 	if (!prepare_signal(sig, t, force))
1084 		goto ret;
1085 
1086 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1087 	/*
1088 	 * Short-circuit ignored signals and support queuing
1089 	 * exactly one non-rt signal, so that we can get more
1090 	 * detailed information about the cause of the signal.
1091 	 */
1092 	result = TRACE_SIGNAL_ALREADY_PENDING;
1093 	if (legacy_queue(pending, sig))
1094 		goto ret;
1095 
1096 	result = TRACE_SIGNAL_DELIVERED;
1097 	/*
1098 	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1099 	 */
1100 	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1101 		goto out_set;
1102 
1103 	/*
1104 	 * Real-time signals must be queued if sent by sigqueue, or
1105 	 * some other real-time mechanism.  It is implementation
1106 	 * defined whether kill() does so.  We attempt to do so, on
1107 	 * the principle of least surprise, but since kill is not
1108 	 * allowed to fail with EAGAIN when low on memory we just
1109 	 * make sure at least one signal gets delivered and don't
1110 	 * pass on the info struct.
1111 	 */
1112 	if (sig < SIGRTMIN)
1113 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1114 	else
1115 		override_rlimit = 0;
1116 
1117 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1118 	if (q) {
1119 		list_add_tail(&q->list, &pending->list);
1120 		switch ((unsigned long) info) {
1121 		case (unsigned long) SEND_SIG_NOINFO:
1122 			clear_siginfo(&q->info);
1123 			q->info.si_signo = sig;
1124 			q->info.si_errno = 0;
1125 			q->info.si_code = SI_USER;
1126 			q->info.si_pid = task_tgid_nr_ns(current,
1127 							task_active_pid_ns(t));
1128 			rcu_read_lock();
1129 			q->info.si_uid =
1130 				from_kuid_munged(task_cred_xxx(t, user_ns),
1131 						 current_uid());
1132 			rcu_read_unlock();
1133 			break;
1134 		case (unsigned long) SEND_SIG_PRIV:
1135 			clear_siginfo(&q->info);
1136 			q->info.si_signo = sig;
1137 			q->info.si_errno = 0;
1138 			q->info.si_code = SI_KERNEL;
1139 			q->info.si_pid = 0;
1140 			q->info.si_uid = 0;
1141 			break;
1142 		default:
1143 			copy_siginfo(&q->info, info);
1144 			break;
1145 		}
1146 	} else if (!is_si_special(info) &&
1147 		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1148 		/*
1149 		 * Queue overflow, abort.  We may abort if the
1150 		 * signal was rt and sent by user using something
1151 		 * other than kill().
1152 		 */
1153 		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1154 		ret = -EAGAIN;
1155 		goto ret;
1156 	} else {
1157 		/*
1158 		 * This is a silent loss of information.  We still
1159 		 * send the signal, but the *info bits are lost.
1160 		 */
1161 		result = TRACE_SIGNAL_LOSE_INFO;
1162 	}
1163 
1164 out_set:
1165 	signalfd_notify(t, sig);
1166 	sigaddset(&pending->signal, sig);
1167 
1168 	/* Let multiprocess signals appear after on-going forks */
1169 	if (type > PIDTYPE_TGID) {
1170 		struct multiprocess_signals *delayed;
1171 		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1172 			sigset_t *signal = &delayed->signal;
1173 			/* Can't queue both a stop and a continue signal */
1174 			if (sig == SIGCONT)
1175 				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1176 			else if (sig_kernel_stop(sig))
1177 				sigdelset(signal, SIGCONT);
1178 			sigaddset(signal, sig);
1179 		}
1180 	}
1181 
1182 	complete_signal(sig, t, type);
1183 ret:
1184 	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1185 	return ret;
1186 }
1187 
1188 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1189 {
1190 	bool ret = false;
1191 	switch (siginfo_layout(info->si_signo, info->si_code)) {
1192 	case SIL_KILL:
1193 	case SIL_CHLD:
1194 	case SIL_RT:
1195 		ret = true;
1196 		break;
1197 	case SIL_TIMER:
1198 	case SIL_POLL:
1199 	case SIL_FAULT:
1200 	case SIL_FAULT_MCEERR:
1201 	case SIL_FAULT_BNDERR:
1202 	case SIL_FAULT_PKUERR:
1203 	case SIL_SYS:
1204 		ret = false;
1205 		break;
1206 	}
1207 	return ret;
1208 }
1209 
1210 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1211 			enum pid_type type)
1212 {
1213 	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1214 	bool force = false;
1215 
1216 	if (info == SEND_SIG_NOINFO) {
1217 		/* Force if sent from an ancestor pid namespace */
1218 		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1219 	} else if (info == SEND_SIG_PRIV) {
1220 		/* Don't ignore kernel generated signals */
1221 		force = true;
1222 	} else if (has_si_pid_and_uid(info)) {
1223 		/* SIGKILL and SIGSTOP is special or has ids */
1224 		struct user_namespace *t_user_ns;
1225 
1226 		rcu_read_lock();
1227 		t_user_ns = task_cred_xxx(t, user_ns);
1228 		if (current_user_ns() != t_user_ns) {
1229 			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1230 			info->si_uid = from_kuid_munged(t_user_ns, uid);
1231 		}
1232 		rcu_read_unlock();
1233 
1234 		/* A kernel generated signal? */
1235 		force = (info->si_code == SI_KERNEL);
1236 
1237 		/* From an ancestor pid namespace? */
1238 		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1239 			info->si_pid = 0;
1240 			force = true;
1241 		}
1242 	}
1243 	return __send_signal(sig, info, t, type, force);
1244 }
1245 
1246 static void print_fatal_signal(int signr)
1247 {
1248 	struct pt_regs *regs = signal_pt_regs();
1249 	pr_info("potentially unexpected fatal signal %d.\n", signr);
1250 
1251 #if defined(__i386__) && !defined(__arch_um__)
1252 	pr_info("code at %08lx: ", regs->ip);
1253 	{
1254 		int i;
1255 		for (i = 0; i < 16; i++) {
1256 			unsigned char insn;
1257 
1258 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1259 				break;
1260 			pr_cont("%02x ", insn);
1261 		}
1262 	}
1263 	pr_cont("\n");
1264 #endif
1265 	preempt_disable();
1266 	show_regs(regs);
1267 	preempt_enable();
1268 }
1269 
1270 static int __init setup_print_fatal_signals(char *str)
1271 {
1272 	get_option (&str, &print_fatal_signals);
1273 
1274 	return 1;
1275 }
1276 
1277 __setup("print-fatal-signals=", setup_print_fatal_signals);
1278 
1279 int
1280 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1281 {
1282 	return send_signal(sig, info, p, PIDTYPE_TGID);
1283 }
1284 
1285 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1286 			enum pid_type type)
1287 {
1288 	unsigned long flags;
1289 	int ret = -ESRCH;
1290 
1291 	if (lock_task_sighand(p, &flags)) {
1292 		ret = send_signal(sig, info, p, type);
1293 		unlock_task_sighand(p, &flags);
1294 	}
1295 
1296 	return ret;
1297 }
1298 
1299 /*
1300  * Force a signal that the process can't ignore: if necessary
1301  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1302  *
1303  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1304  * since we do not want to have a signal handler that was blocked
1305  * be invoked when user space had explicitly blocked it.
1306  *
1307  * We don't want to have recursive SIGSEGV's etc, for example,
1308  * that is why we also clear SIGNAL_UNKILLABLE.
1309  */
1310 static int
1311 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1312 {
1313 	unsigned long int flags;
1314 	int ret, blocked, ignored;
1315 	struct k_sigaction *action;
1316 	int sig = info->si_signo;
1317 
1318 	spin_lock_irqsave(&t->sighand->siglock, flags);
1319 	action = &t->sighand->action[sig-1];
1320 	ignored = action->sa.sa_handler == SIG_IGN;
1321 	blocked = sigismember(&t->blocked, sig);
1322 	if (blocked || ignored) {
1323 		action->sa.sa_handler = SIG_DFL;
1324 		if (blocked) {
1325 			sigdelset(&t->blocked, sig);
1326 			recalc_sigpending_and_wake(t);
1327 		}
1328 	}
1329 	/*
1330 	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1331 	 * debugging to leave init killable.
1332 	 */
1333 	if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1334 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1335 	ret = send_signal(sig, info, t, PIDTYPE_PID);
1336 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1337 
1338 	return ret;
1339 }
1340 
1341 int force_sig_info(struct kernel_siginfo *info)
1342 {
1343 	return force_sig_info_to_task(info, current);
1344 }
1345 
1346 /*
1347  * Nuke all other threads in the group.
1348  */
1349 int zap_other_threads(struct task_struct *p)
1350 {
1351 	struct task_struct *t = p;
1352 	int count = 0;
1353 
1354 	p->signal->group_stop_count = 0;
1355 
1356 	while_each_thread(p, t) {
1357 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1358 		count++;
1359 
1360 		/* Don't bother with already dead threads */
1361 		if (t->exit_state)
1362 			continue;
1363 		sigaddset(&t->pending.signal, SIGKILL);
1364 		signal_wake_up(t, 1);
1365 	}
1366 
1367 	return count;
1368 }
1369 
1370 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1371 					   unsigned long *flags)
1372 {
1373 	struct sighand_struct *sighand;
1374 
1375 	rcu_read_lock();
1376 	for (;;) {
1377 		sighand = rcu_dereference(tsk->sighand);
1378 		if (unlikely(sighand == NULL))
1379 			break;
1380 
1381 		/*
1382 		 * This sighand can be already freed and even reused, but
1383 		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1384 		 * initializes ->siglock: this slab can't go away, it has
1385 		 * the same object type, ->siglock can't be reinitialized.
1386 		 *
1387 		 * We need to ensure that tsk->sighand is still the same
1388 		 * after we take the lock, we can race with de_thread() or
1389 		 * __exit_signal(). In the latter case the next iteration
1390 		 * must see ->sighand == NULL.
1391 		 */
1392 		spin_lock_irqsave(&sighand->siglock, *flags);
1393 		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1394 			break;
1395 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1396 	}
1397 	rcu_read_unlock();
1398 
1399 	return sighand;
1400 }
1401 
1402 /*
1403  * send signal info to all the members of a group
1404  */
1405 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1406 			struct task_struct *p, enum pid_type type)
1407 {
1408 	int ret;
1409 
1410 	rcu_read_lock();
1411 	ret = check_kill_permission(sig, info, p);
1412 	rcu_read_unlock();
1413 
1414 	if (!ret && sig)
1415 		ret = do_send_sig_info(sig, info, p, type);
1416 
1417 	return ret;
1418 }
1419 
1420 /*
1421  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1422  * control characters do (^C, ^Z etc)
1423  * - the caller must hold at least a readlock on tasklist_lock
1424  */
1425 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1426 {
1427 	struct task_struct *p = NULL;
1428 	int retval, success;
1429 
1430 	success = 0;
1431 	retval = -ESRCH;
1432 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1433 		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1434 		success |= !err;
1435 		retval = err;
1436 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1437 	return success ? 0 : retval;
1438 }
1439 
1440 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1441 {
1442 	int error = -ESRCH;
1443 	struct task_struct *p;
1444 
1445 	for (;;) {
1446 		rcu_read_lock();
1447 		p = pid_task(pid, PIDTYPE_PID);
1448 		if (p)
1449 			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1450 		rcu_read_unlock();
1451 		if (likely(!p || error != -ESRCH))
1452 			return error;
1453 
1454 		/*
1455 		 * The task was unhashed in between, try again.  If it
1456 		 * is dead, pid_task() will return NULL, if we race with
1457 		 * de_thread() it will find the new leader.
1458 		 */
1459 	}
1460 }
1461 
1462 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1463 {
1464 	int error;
1465 	rcu_read_lock();
1466 	error = kill_pid_info(sig, info, find_vpid(pid));
1467 	rcu_read_unlock();
1468 	return error;
1469 }
1470 
1471 static inline bool kill_as_cred_perm(const struct cred *cred,
1472 				     struct task_struct *target)
1473 {
1474 	const struct cred *pcred = __task_cred(target);
1475 
1476 	return uid_eq(cred->euid, pcred->suid) ||
1477 	       uid_eq(cred->euid, pcred->uid) ||
1478 	       uid_eq(cred->uid, pcred->suid) ||
1479 	       uid_eq(cred->uid, pcred->uid);
1480 }
1481 
1482 /*
1483  * The usb asyncio usage of siginfo is wrong.  The glibc support
1484  * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1485  * AKA after the generic fields:
1486  *	kernel_pid_t	si_pid;
1487  *	kernel_uid32_t	si_uid;
1488  *	sigval_t	si_value;
1489  *
1490  * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1491  * after the generic fields is:
1492  *	void __user 	*si_addr;
1493  *
1494  * This is a practical problem when there is a 64bit big endian kernel
1495  * and a 32bit userspace.  As the 32bit address will encoded in the low
1496  * 32bits of the pointer.  Those low 32bits will be stored at higher
1497  * address than appear in a 32 bit pointer.  So userspace will not
1498  * see the address it was expecting for it's completions.
1499  *
1500  * There is nothing in the encoding that can allow
1501  * copy_siginfo_to_user32 to detect this confusion of formats, so
1502  * handle this by requiring the caller of kill_pid_usb_asyncio to
1503  * notice when this situration takes place and to store the 32bit
1504  * pointer in sival_int, instead of sival_addr of the sigval_t addr
1505  * parameter.
1506  */
1507 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1508 			 struct pid *pid, const struct cred *cred)
1509 {
1510 	struct kernel_siginfo info;
1511 	struct task_struct *p;
1512 	unsigned long flags;
1513 	int ret = -EINVAL;
1514 
1515 	if (!valid_signal(sig))
1516 		return ret;
1517 
1518 	clear_siginfo(&info);
1519 	info.si_signo = sig;
1520 	info.si_errno = errno;
1521 	info.si_code = SI_ASYNCIO;
1522 	*((sigval_t *)&info.si_pid) = addr;
1523 
1524 	rcu_read_lock();
1525 	p = pid_task(pid, PIDTYPE_PID);
1526 	if (!p) {
1527 		ret = -ESRCH;
1528 		goto out_unlock;
1529 	}
1530 	if (!kill_as_cred_perm(cred, p)) {
1531 		ret = -EPERM;
1532 		goto out_unlock;
1533 	}
1534 	ret = security_task_kill(p, &info, sig, cred);
1535 	if (ret)
1536 		goto out_unlock;
1537 
1538 	if (sig) {
1539 		if (lock_task_sighand(p, &flags)) {
1540 			ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1541 			unlock_task_sighand(p, &flags);
1542 		} else
1543 			ret = -ESRCH;
1544 	}
1545 out_unlock:
1546 	rcu_read_unlock();
1547 	return ret;
1548 }
1549 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1550 
1551 /*
1552  * kill_something_info() interprets pid in interesting ways just like kill(2).
1553  *
1554  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1555  * is probably wrong.  Should make it like BSD or SYSV.
1556  */
1557 
1558 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1559 {
1560 	int ret;
1561 
1562 	if (pid > 0)
1563 		return kill_proc_info(sig, info, pid);
1564 
1565 	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1566 	if (pid == INT_MIN)
1567 		return -ESRCH;
1568 
1569 	read_lock(&tasklist_lock);
1570 	if (pid != -1) {
1571 		ret = __kill_pgrp_info(sig, info,
1572 				pid ? find_vpid(-pid) : task_pgrp(current));
1573 	} else {
1574 		int retval = 0, count = 0;
1575 		struct task_struct * p;
1576 
1577 		for_each_process(p) {
1578 			if (task_pid_vnr(p) > 1 &&
1579 					!same_thread_group(p, current)) {
1580 				int err = group_send_sig_info(sig, info, p,
1581 							      PIDTYPE_MAX);
1582 				++count;
1583 				if (err != -EPERM)
1584 					retval = err;
1585 			}
1586 		}
1587 		ret = count ? retval : -ESRCH;
1588 	}
1589 	read_unlock(&tasklist_lock);
1590 
1591 	return ret;
1592 }
1593 
1594 /*
1595  * These are for backward compatibility with the rest of the kernel source.
1596  */
1597 
1598 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1599 {
1600 	/*
1601 	 * Make sure legacy kernel users don't send in bad values
1602 	 * (normal paths check this in check_kill_permission).
1603 	 */
1604 	if (!valid_signal(sig))
1605 		return -EINVAL;
1606 
1607 	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1608 }
1609 EXPORT_SYMBOL(send_sig_info);
1610 
1611 #define __si_special(priv) \
1612 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1613 
1614 int
1615 send_sig(int sig, struct task_struct *p, int priv)
1616 {
1617 	return send_sig_info(sig, __si_special(priv), p);
1618 }
1619 EXPORT_SYMBOL(send_sig);
1620 
1621 void force_sig(int sig)
1622 {
1623 	struct kernel_siginfo info;
1624 
1625 	clear_siginfo(&info);
1626 	info.si_signo = sig;
1627 	info.si_errno = 0;
1628 	info.si_code = SI_KERNEL;
1629 	info.si_pid = 0;
1630 	info.si_uid = 0;
1631 	force_sig_info(&info);
1632 }
1633 EXPORT_SYMBOL(force_sig);
1634 
1635 /*
1636  * When things go south during signal handling, we
1637  * will force a SIGSEGV. And if the signal that caused
1638  * the problem was already a SIGSEGV, we'll want to
1639  * make sure we don't even try to deliver the signal..
1640  */
1641 void force_sigsegv(int sig)
1642 {
1643 	struct task_struct *p = current;
1644 
1645 	if (sig == SIGSEGV) {
1646 		unsigned long flags;
1647 		spin_lock_irqsave(&p->sighand->siglock, flags);
1648 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1649 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1650 	}
1651 	force_sig(SIGSEGV);
1652 }
1653 
1654 int force_sig_fault_to_task(int sig, int code, void __user *addr
1655 	___ARCH_SI_TRAPNO(int trapno)
1656 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1657 	, struct task_struct *t)
1658 {
1659 	struct kernel_siginfo info;
1660 
1661 	clear_siginfo(&info);
1662 	info.si_signo = sig;
1663 	info.si_errno = 0;
1664 	info.si_code  = code;
1665 	info.si_addr  = addr;
1666 #ifdef __ARCH_SI_TRAPNO
1667 	info.si_trapno = trapno;
1668 #endif
1669 #ifdef __ia64__
1670 	info.si_imm = imm;
1671 	info.si_flags = flags;
1672 	info.si_isr = isr;
1673 #endif
1674 	return force_sig_info_to_task(&info, t);
1675 }
1676 
1677 int force_sig_fault(int sig, int code, void __user *addr
1678 	___ARCH_SI_TRAPNO(int trapno)
1679 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1680 {
1681 	return force_sig_fault_to_task(sig, code, addr
1682 				       ___ARCH_SI_TRAPNO(trapno)
1683 				       ___ARCH_SI_IA64(imm, flags, isr), current);
1684 }
1685 
1686 int send_sig_fault(int sig, int code, void __user *addr
1687 	___ARCH_SI_TRAPNO(int trapno)
1688 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1689 	, struct task_struct *t)
1690 {
1691 	struct kernel_siginfo info;
1692 
1693 	clear_siginfo(&info);
1694 	info.si_signo = sig;
1695 	info.si_errno = 0;
1696 	info.si_code  = code;
1697 	info.si_addr  = addr;
1698 #ifdef __ARCH_SI_TRAPNO
1699 	info.si_trapno = trapno;
1700 #endif
1701 #ifdef __ia64__
1702 	info.si_imm = imm;
1703 	info.si_flags = flags;
1704 	info.si_isr = isr;
1705 #endif
1706 	return send_sig_info(info.si_signo, &info, t);
1707 }
1708 
1709 int force_sig_mceerr(int code, void __user *addr, short lsb)
1710 {
1711 	struct kernel_siginfo info;
1712 
1713 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1714 	clear_siginfo(&info);
1715 	info.si_signo = SIGBUS;
1716 	info.si_errno = 0;
1717 	info.si_code = code;
1718 	info.si_addr = addr;
1719 	info.si_addr_lsb = lsb;
1720 	return force_sig_info(&info);
1721 }
1722 
1723 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1724 {
1725 	struct kernel_siginfo info;
1726 
1727 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1728 	clear_siginfo(&info);
1729 	info.si_signo = SIGBUS;
1730 	info.si_errno = 0;
1731 	info.si_code = code;
1732 	info.si_addr = addr;
1733 	info.si_addr_lsb = lsb;
1734 	return send_sig_info(info.si_signo, &info, t);
1735 }
1736 EXPORT_SYMBOL(send_sig_mceerr);
1737 
1738 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1739 {
1740 	struct kernel_siginfo info;
1741 
1742 	clear_siginfo(&info);
1743 	info.si_signo = SIGSEGV;
1744 	info.si_errno = 0;
1745 	info.si_code  = SEGV_BNDERR;
1746 	info.si_addr  = addr;
1747 	info.si_lower = lower;
1748 	info.si_upper = upper;
1749 	return force_sig_info(&info);
1750 }
1751 
1752 #ifdef SEGV_PKUERR
1753 int force_sig_pkuerr(void __user *addr, u32 pkey)
1754 {
1755 	struct kernel_siginfo info;
1756 
1757 	clear_siginfo(&info);
1758 	info.si_signo = SIGSEGV;
1759 	info.si_errno = 0;
1760 	info.si_code  = SEGV_PKUERR;
1761 	info.si_addr  = addr;
1762 	info.si_pkey  = pkey;
1763 	return force_sig_info(&info);
1764 }
1765 #endif
1766 
1767 /* For the crazy architectures that include trap information in
1768  * the errno field, instead of an actual errno value.
1769  */
1770 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1771 {
1772 	struct kernel_siginfo info;
1773 
1774 	clear_siginfo(&info);
1775 	info.si_signo = SIGTRAP;
1776 	info.si_errno = errno;
1777 	info.si_code  = TRAP_HWBKPT;
1778 	info.si_addr  = addr;
1779 	return force_sig_info(&info);
1780 }
1781 
1782 int kill_pgrp(struct pid *pid, int sig, int priv)
1783 {
1784 	int ret;
1785 
1786 	read_lock(&tasklist_lock);
1787 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1788 	read_unlock(&tasklist_lock);
1789 
1790 	return ret;
1791 }
1792 EXPORT_SYMBOL(kill_pgrp);
1793 
1794 int kill_pid(struct pid *pid, int sig, int priv)
1795 {
1796 	return kill_pid_info(sig, __si_special(priv), pid);
1797 }
1798 EXPORT_SYMBOL(kill_pid);
1799 
1800 /*
1801  * These functions support sending signals using preallocated sigqueue
1802  * structures.  This is needed "because realtime applications cannot
1803  * afford to lose notifications of asynchronous events, like timer
1804  * expirations or I/O completions".  In the case of POSIX Timers
1805  * we allocate the sigqueue structure from the timer_create.  If this
1806  * allocation fails we are able to report the failure to the application
1807  * with an EAGAIN error.
1808  */
1809 struct sigqueue *sigqueue_alloc(void)
1810 {
1811 	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1812 
1813 	if (q)
1814 		q->flags |= SIGQUEUE_PREALLOC;
1815 
1816 	return q;
1817 }
1818 
1819 void sigqueue_free(struct sigqueue *q)
1820 {
1821 	unsigned long flags;
1822 	spinlock_t *lock = &current->sighand->siglock;
1823 
1824 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1825 	/*
1826 	 * We must hold ->siglock while testing q->list
1827 	 * to serialize with collect_signal() or with
1828 	 * __exit_signal()->flush_sigqueue().
1829 	 */
1830 	spin_lock_irqsave(lock, flags);
1831 	q->flags &= ~SIGQUEUE_PREALLOC;
1832 	/*
1833 	 * If it is queued it will be freed when dequeued,
1834 	 * like the "regular" sigqueue.
1835 	 */
1836 	if (!list_empty(&q->list))
1837 		q = NULL;
1838 	spin_unlock_irqrestore(lock, flags);
1839 
1840 	if (q)
1841 		__sigqueue_free(q);
1842 }
1843 
1844 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1845 {
1846 	int sig = q->info.si_signo;
1847 	struct sigpending *pending;
1848 	struct task_struct *t;
1849 	unsigned long flags;
1850 	int ret, result;
1851 
1852 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1853 
1854 	ret = -1;
1855 	rcu_read_lock();
1856 	t = pid_task(pid, type);
1857 	if (!t || !likely(lock_task_sighand(t, &flags)))
1858 		goto ret;
1859 
1860 	ret = 1; /* the signal is ignored */
1861 	result = TRACE_SIGNAL_IGNORED;
1862 	if (!prepare_signal(sig, t, false))
1863 		goto out;
1864 
1865 	ret = 0;
1866 	if (unlikely(!list_empty(&q->list))) {
1867 		/*
1868 		 * If an SI_TIMER entry is already queue just increment
1869 		 * the overrun count.
1870 		 */
1871 		BUG_ON(q->info.si_code != SI_TIMER);
1872 		q->info.si_overrun++;
1873 		result = TRACE_SIGNAL_ALREADY_PENDING;
1874 		goto out;
1875 	}
1876 	q->info.si_overrun = 0;
1877 
1878 	signalfd_notify(t, sig);
1879 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1880 	list_add_tail(&q->list, &pending->list);
1881 	sigaddset(&pending->signal, sig);
1882 	complete_signal(sig, t, type);
1883 	result = TRACE_SIGNAL_DELIVERED;
1884 out:
1885 	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1886 	unlock_task_sighand(t, &flags);
1887 ret:
1888 	rcu_read_unlock();
1889 	return ret;
1890 }
1891 
1892 static void do_notify_pidfd(struct task_struct *task)
1893 {
1894 	struct pid *pid;
1895 
1896 	WARN_ON(task->exit_state == 0);
1897 	pid = task_pid(task);
1898 	wake_up_all(&pid->wait_pidfd);
1899 }
1900 
1901 /*
1902  * Let a parent know about the death of a child.
1903  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1904  *
1905  * Returns true if our parent ignored us and so we've switched to
1906  * self-reaping.
1907  */
1908 bool do_notify_parent(struct task_struct *tsk, int sig)
1909 {
1910 	struct kernel_siginfo info;
1911 	unsigned long flags;
1912 	struct sighand_struct *psig;
1913 	bool autoreap = false;
1914 	u64 utime, stime;
1915 
1916 	BUG_ON(sig == -1);
1917 
1918  	/* do_notify_parent_cldstop should have been called instead.  */
1919  	BUG_ON(task_is_stopped_or_traced(tsk));
1920 
1921 	BUG_ON(!tsk->ptrace &&
1922 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1923 
1924 	/* Wake up all pidfd waiters */
1925 	do_notify_pidfd(tsk);
1926 
1927 	if (sig != SIGCHLD) {
1928 		/*
1929 		 * This is only possible if parent == real_parent.
1930 		 * Check if it has changed security domain.
1931 		 */
1932 		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1933 			sig = SIGCHLD;
1934 	}
1935 
1936 	clear_siginfo(&info);
1937 	info.si_signo = sig;
1938 	info.si_errno = 0;
1939 	/*
1940 	 * We are under tasklist_lock here so our parent is tied to
1941 	 * us and cannot change.
1942 	 *
1943 	 * task_active_pid_ns will always return the same pid namespace
1944 	 * until a task passes through release_task.
1945 	 *
1946 	 * write_lock() currently calls preempt_disable() which is the
1947 	 * same as rcu_read_lock(), but according to Oleg, this is not
1948 	 * correct to rely on this
1949 	 */
1950 	rcu_read_lock();
1951 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1952 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1953 				       task_uid(tsk));
1954 	rcu_read_unlock();
1955 
1956 	task_cputime(tsk, &utime, &stime);
1957 	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1958 	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1959 
1960 	info.si_status = tsk->exit_code & 0x7f;
1961 	if (tsk->exit_code & 0x80)
1962 		info.si_code = CLD_DUMPED;
1963 	else if (tsk->exit_code & 0x7f)
1964 		info.si_code = CLD_KILLED;
1965 	else {
1966 		info.si_code = CLD_EXITED;
1967 		info.si_status = tsk->exit_code >> 8;
1968 	}
1969 
1970 	psig = tsk->parent->sighand;
1971 	spin_lock_irqsave(&psig->siglock, flags);
1972 	if (!tsk->ptrace && sig == SIGCHLD &&
1973 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1974 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1975 		/*
1976 		 * We are exiting and our parent doesn't care.  POSIX.1
1977 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1978 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1979 		 * automatically and not left for our parent's wait4 call.
1980 		 * Rather than having the parent do it as a magic kind of
1981 		 * signal handler, we just set this to tell do_exit that we
1982 		 * can be cleaned up without becoming a zombie.  Note that
1983 		 * we still call __wake_up_parent in this case, because a
1984 		 * blocked sys_wait4 might now return -ECHILD.
1985 		 *
1986 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1987 		 * is implementation-defined: we do (if you don't want
1988 		 * it, just use SIG_IGN instead).
1989 		 */
1990 		autoreap = true;
1991 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1992 			sig = 0;
1993 	}
1994 	/*
1995 	 * Send with __send_signal as si_pid and si_uid are in the
1996 	 * parent's namespaces.
1997 	 */
1998 	if (valid_signal(sig) && sig)
1999 		__send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2000 	__wake_up_parent(tsk, tsk->parent);
2001 	spin_unlock_irqrestore(&psig->siglock, flags);
2002 
2003 	return autoreap;
2004 }
2005 
2006 /**
2007  * do_notify_parent_cldstop - notify parent of stopped/continued state change
2008  * @tsk: task reporting the state change
2009  * @for_ptracer: the notification is for ptracer
2010  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2011  *
2012  * Notify @tsk's parent that the stopped/continued state has changed.  If
2013  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2014  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2015  *
2016  * CONTEXT:
2017  * Must be called with tasklist_lock at least read locked.
2018  */
2019 static void do_notify_parent_cldstop(struct task_struct *tsk,
2020 				     bool for_ptracer, int why)
2021 {
2022 	struct kernel_siginfo info;
2023 	unsigned long flags;
2024 	struct task_struct *parent;
2025 	struct sighand_struct *sighand;
2026 	u64 utime, stime;
2027 
2028 	if (for_ptracer) {
2029 		parent = tsk->parent;
2030 	} else {
2031 		tsk = tsk->group_leader;
2032 		parent = tsk->real_parent;
2033 	}
2034 
2035 	clear_siginfo(&info);
2036 	info.si_signo = SIGCHLD;
2037 	info.si_errno = 0;
2038 	/*
2039 	 * see comment in do_notify_parent() about the following 4 lines
2040 	 */
2041 	rcu_read_lock();
2042 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2043 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2044 	rcu_read_unlock();
2045 
2046 	task_cputime(tsk, &utime, &stime);
2047 	info.si_utime = nsec_to_clock_t(utime);
2048 	info.si_stime = nsec_to_clock_t(stime);
2049 
2050  	info.si_code = why;
2051  	switch (why) {
2052  	case CLD_CONTINUED:
2053  		info.si_status = SIGCONT;
2054  		break;
2055  	case CLD_STOPPED:
2056  		info.si_status = tsk->signal->group_exit_code & 0x7f;
2057  		break;
2058  	case CLD_TRAPPED:
2059  		info.si_status = tsk->exit_code & 0x7f;
2060  		break;
2061  	default:
2062  		BUG();
2063  	}
2064 
2065 	sighand = parent->sighand;
2066 	spin_lock_irqsave(&sighand->siglock, flags);
2067 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2068 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2069 		__group_send_sig_info(SIGCHLD, &info, parent);
2070 	/*
2071 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2072 	 */
2073 	__wake_up_parent(tsk, parent);
2074 	spin_unlock_irqrestore(&sighand->siglock, flags);
2075 }
2076 
2077 static inline bool may_ptrace_stop(void)
2078 {
2079 	if (!likely(current->ptrace))
2080 		return false;
2081 	/*
2082 	 * Are we in the middle of do_coredump?
2083 	 * If so and our tracer is also part of the coredump stopping
2084 	 * is a deadlock situation, and pointless because our tracer
2085 	 * is dead so don't allow us to stop.
2086 	 * If SIGKILL was already sent before the caller unlocked
2087 	 * ->siglock we must see ->core_state != NULL. Otherwise it
2088 	 * is safe to enter schedule().
2089 	 *
2090 	 * This is almost outdated, a task with the pending SIGKILL can't
2091 	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2092 	 * after SIGKILL was already dequeued.
2093 	 */
2094 	if (unlikely(current->mm->core_state) &&
2095 	    unlikely(current->mm == current->parent->mm))
2096 		return false;
2097 
2098 	return true;
2099 }
2100 
2101 /*
2102  * Return non-zero if there is a SIGKILL that should be waking us up.
2103  * Called with the siglock held.
2104  */
2105 static bool sigkill_pending(struct task_struct *tsk)
2106 {
2107 	return sigismember(&tsk->pending.signal, SIGKILL) ||
2108 	       sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2109 }
2110 
2111 /*
2112  * This must be called with current->sighand->siglock held.
2113  *
2114  * This should be the path for all ptrace stops.
2115  * We always set current->last_siginfo while stopped here.
2116  * That makes it a way to test a stopped process for
2117  * being ptrace-stopped vs being job-control-stopped.
2118  *
2119  * If we actually decide not to stop at all because the tracer
2120  * is gone, we keep current->exit_code unless clear_code.
2121  */
2122 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2123 	__releases(&current->sighand->siglock)
2124 	__acquires(&current->sighand->siglock)
2125 {
2126 	bool gstop_done = false;
2127 
2128 	if (arch_ptrace_stop_needed(exit_code, info)) {
2129 		/*
2130 		 * The arch code has something special to do before a
2131 		 * ptrace stop.  This is allowed to block, e.g. for faults
2132 		 * on user stack pages.  We can't keep the siglock while
2133 		 * calling arch_ptrace_stop, so we must release it now.
2134 		 * To preserve proper semantics, we must do this before
2135 		 * any signal bookkeeping like checking group_stop_count.
2136 		 * Meanwhile, a SIGKILL could come in before we retake the
2137 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
2138 		 * So after regaining the lock, we must check for SIGKILL.
2139 		 */
2140 		spin_unlock_irq(&current->sighand->siglock);
2141 		arch_ptrace_stop(exit_code, info);
2142 		spin_lock_irq(&current->sighand->siglock);
2143 		if (sigkill_pending(current))
2144 			return;
2145 	}
2146 
2147 	set_special_state(TASK_TRACED);
2148 
2149 	/*
2150 	 * We're committing to trapping.  TRACED should be visible before
2151 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2152 	 * Also, transition to TRACED and updates to ->jobctl should be
2153 	 * atomic with respect to siglock and should be done after the arch
2154 	 * hook as siglock is released and regrabbed across it.
2155 	 *
2156 	 *     TRACER				    TRACEE
2157 	 *
2158 	 *     ptrace_attach()
2159 	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2160 	 *     do_wait()
2161 	 *       set_current_state()                smp_wmb();
2162 	 *       ptrace_do_wait()
2163 	 *         wait_task_stopped()
2164 	 *           task_stopped_code()
2165 	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2166 	 */
2167 	smp_wmb();
2168 
2169 	current->last_siginfo = info;
2170 	current->exit_code = exit_code;
2171 
2172 	/*
2173 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2174 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2175 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2176 	 * could be clear now.  We act as if SIGCONT is received after
2177 	 * TASK_TRACED is entered - ignore it.
2178 	 */
2179 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2180 		gstop_done = task_participate_group_stop(current);
2181 
2182 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2183 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2184 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2185 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2186 
2187 	/* entering a trap, clear TRAPPING */
2188 	task_clear_jobctl_trapping(current);
2189 
2190 	spin_unlock_irq(&current->sighand->siglock);
2191 	read_lock(&tasklist_lock);
2192 	if (may_ptrace_stop()) {
2193 		/*
2194 		 * Notify parents of the stop.
2195 		 *
2196 		 * While ptraced, there are two parents - the ptracer and
2197 		 * the real_parent of the group_leader.  The ptracer should
2198 		 * know about every stop while the real parent is only
2199 		 * interested in the completion of group stop.  The states
2200 		 * for the two don't interact with each other.  Notify
2201 		 * separately unless they're gonna be duplicates.
2202 		 */
2203 		do_notify_parent_cldstop(current, true, why);
2204 		if (gstop_done && ptrace_reparented(current))
2205 			do_notify_parent_cldstop(current, false, why);
2206 
2207 		/*
2208 		 * Don't want to allow preemption here, because
2209 		 * sys_ptrace() needs this task to be inactive.
2210 		 *
2211 		 * XXX: implement read_unlock_no_resched().
2212 		 */
2213 		preempt_disable();
2214 		read_unlock(&tasklist_lock);
2215 		cgroup_enter_frozen();
2216 		preempt_enable_no_resched();
2217 		freezable_schedule();
2218 		cgroup_leave_frozen(true);
2219 	} else {
2220 		/*
2221 		 * By the time we got the lock, our tracer went away.
2222 		 * Don't drop the lock yet, another tracer may come.
2223 		 *
2224 		 * If @gstop_done, the ptracer went away between group stop
2225 		 * completion and here.  During detach, it would have set
2226 		 * JOBCTL_STOP_PENDING on us and we'll re-enter
2227 		 * TASK_STOPPED in do_signal_stop() on return, so notifying
2228 		 * the real parent of the group stop completion is enough.
2229 		 */
2230 		if (gstop_done)
2231 			do_notify_parent_cldstop(current, false, why);
2232 
2233 		/* tasklist protects us from ptrace_freeze_traced() */
2234 		__set_current_state(TASK_RUNNING);
2235 		if (clear_code)
2236 			current->exit_code = 0;
2237 		read_unlock(&tasklist_lock);
2238 	}
2239 
2240 	/*
2241 	 * We are back.  Now reacquire the siglock before touching
2242 	 * last_siginfo, so that we are sure to have synchronized with
2243 	 * any signal-sending on another CPU that wants to examine it.
2244 	 */
2245 	spin_lock_irq(&current->sighand->siglock);
2246 	current->last_siginfo = NULL;
2247 
2248 	/* LISTENING can be set only during STOP traps, clear it */
2249 	current->jobctl &= ~JOBCTL_LISTENING;
2250 
2251 	/*
2252 	 * Queued signals ignored us while we were stopped for tracing.
2253 	 * So check for any that we should take before resuming user mode.
2254 	 * This sets TIF_SIGPENDING, but never clears it.
2255 	 */
2256 	recalc_sigpending_tsk(current);
2257 }
2258 
2259 static void ptrace_do_notify(int signr, int exit_code, int why)
2260 {
2261 	kernel_siginfo_t info;
2262 
2263 	clear_siginfo(&info);
2264 	info.si_signo = signr;
2265 	info.si_code = exit_code;
2266 	info.si_pid = task_pid_vnr(current);
2267 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2268 
2269 	/* Let the debugger run.  */
2270 	ptrace_stop(exit_code, why, 1, &info);
2271 }
2272 
2273 void ptrace_notify(int exit_code)
2274 {
2275 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2276 	if (unlikely(current->task_works))
2277 		task_work_run();
2278 
2279 	spin_lock_irq(&current->sighand->siglock);
2280 	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2281 	spin_unlock_irq(&current->sighand->siglock);
2282 }
2283 
2284 /**
2285  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2286  * @signr: signr causing group stop if initiating
2287  *
2288  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2289  * and participate in it.  If already set, participate in the existing
2290  * group stop.  If participated in a group stop (and thus slept), %true is
2291  * returned with siglock released.
2292  *
2293  * If ptraced, this function doesn't handle stop itself.  Instead,
2294  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2295  * untouched.  The caller must ensure that INTERRUPT trap handling takes
2296  * places afterwards.
2297  *
2298  * CONTEXT:
2299  * Must be called with @current->sighand->siglock held, which is released
2300  * on %true return.
2301  *
2302  * RETURNS:
2303  * %false if group stop is already cancelled or ptrace trap is scheduled.
2304  * %true if participated in group stop.
2305  */
2306 static bool do_signal_stop(int signr)
2307 	__releases(&current->sighand->siglock)
2308 {
2309 	struct signal_struct *sig = current->signal;
2310 
2311 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2312 		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2313 		struct task_struct *t;
2314 
2315 		/* signr will be recorded in task->jobctl for retries */
2316 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2317 
2318 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2319 		    unlikely(signal_group_exit(sig)))
2320 			return false;
2321 		/*
2322 		 * There is no group stop already in progress.  We must
2323 		 * initiate one now.
2324 		 *
2325 		 * While ptraced, a task may be resumed while group stop is
2326 		 * still in effect and then receive a stop signal and
2327 		 * initiate another group stop.  This deviates from the
2328 		 * usual behavior as two consecutive stop signals can't
2329 		 * cause two group stops when !ptraced.  That is why we
2330 		 * also check !task_is_stopped(t) below.
2331 		 *
2332 		 * The condition can be distinguished by testing whether
2333 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2334 		 * group_exit_code in such case.
2335 		 *
2336 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2337 		 * an intervening stop signal is required to cause two
2338 		 * continued events regardless of ptrace.
2339 		 */
2340 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2341 			sig->group_exit_code = signr;
2342 
2343 		sig->group_stop_count = 0;
2344 
2345 		if (task_set_jobctl_pending(current, signr | gstop))
2346 			sig->group_stop_count++;
2347 
2348 		t = current;
2349 		while_each_thread(current, t) {
2350 			/*
2351 			 * Setting state to TASK_STOPPED for a group
2352 			 * stop is always done with the siglock held,
2353 			 * so this check has no races.
2354 			 */
2355 			if (!task_is_stopped(t) &&
2356 			    task_set_jobctl_pending(t, signr | gstop)) {
2357 				sig->group_stop_count++;
2358 				if (likely(!(t->ptrace & PT_SEIZED)))
2359 					signal_wake_up(t, 0);
2360 				else
2361 					ptrace_trap_notify(t);
2362 			}
2363 		}
2364 	}
2365 
2366 	if (likely(!current->ptrace)) {
2367 		int notify = 0;
2368 
2369 		/*
2370 		 * If there are no other threads in the group, or if there
2371 		 * is a group stop in progress and we are the last to stop,
2372 		 * report to the parent.
2373 		 */
2374 		if (task_participate_group_stop(current))
2375 			notify = CLD_STOPPED;
2376 
2377 		set_special_state(TASK_STOPPED);
2378 		spin_unlock_irq(&current->sighand->siglock);
2379 
2380 		/*
2381 		 * Notify the parent of the group stop completion.  Because
2382 		 * we're not holding either the siglock or tasklist_lock
2383 		 * here, ptracer may attach inbetween; however, this is for
2384 		 * group stop and should always be delivered to the real
2385 		 * parent of the group leader.  The new ptracer will get
2386 		 * its notification when this task transitions into
2387 		 * TASK_TRACED.
2388 		 */
2389 		if (notify) {
2390 			read_lock(&tasklist_lock);
2391 			do_notify_parent_cldstop(current, false, notify);
2392 			read_unlock(&tasklist_lock);
2393 		}
2394 
2395 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2396 		cgroup_enter_frozen();
2397 		freezable_schedule();
2398 		return true;
2399 	} else {
2400 		/*
2401 		 * While ptraced, group stop is handled by STOP trap.
2402 		 * Schedule it and let the caller deal with it.
2403 		 */
2404 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2405 		return false;
2406 	}
2407 }
2408 
2409 /**
2410  * do_jobctl_trap - take care of ptrace jobctl traps
2411  *
2412  * When PT_SEIZED, it's used for both group stop and explicit
2413  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2414  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2415  * the stop signal; otherwise, %SIGTRAP.
2416  *
2417  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2418  * number as exit_code and no siginfo.
2419  *
2420  * CONTEXT:
2421  * Must be called with @current->sighand->siglock held, which may be
2422  * released and re-acquired before returning with intervening sleep.
2423  */
2424 static void do_jobctl_trap(void)
2425 {
2426 	struct signal_struct *signal = current->signal;
2427 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2428 
2429 	if (current->ptrace & PT_SEIZED) {
2430 		if (!signal->group_stop_count &&
2431 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2432 			signr = SIGTRAP;
2433 		WARN_ON_ONCE(!signr);
2434 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2435 				 CLD_STOPPED);
2436 	} else {
2437 		WARN_ON_ONCE(!signr);
2438 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2439 		current->exit_code = 0;
2440 	}
2441 }
2442 
2443 /**
2444  * do_freezer_trap - handle the freezer jobctl trap
2445  *
2446  * Puts the task into frozen state, if only the task is not about to quit.
2447  * In this case it drops JOBCTL_TRAP_FREEZE.
2448  *
2449  * CONTEXT:
2450  * Must be called with @current->sighand->siglock held,
2451  * which is always released before returning.
2452  */
2453 static void do_freezer_trap(void)
2454 	__releases(&current->sighand->siglock)
2455 {
2456 	/*
2457 	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2458 	 * let's make another loop to give it a chance to be handled.
2459 	 * In any case, we'll return back.
2460 	 */
2461 	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2462 	     JOBCTL_TRAP_FREEZE) {
2463 		spin_unlock_irq(&current->sighand->siglock);
2464 		return;
2465 	}
2466 
2467 	/*
2468 	 * Now we're sure that there is no pending fatal signal and no
2469 	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2470 	 * immediately (if there is a non-fatal signal pending), and
2471 	 * put the task into sleep.
2472 	 */
2473 	__set_current_state(TASK_INTERRUPTIBLE);
2474 	clear_thread_flag(TIF_SIGPENDING);
2475 	spin_unlock_irq(&current->sighand->siglock);
2476 	cgroup_enter_frozen();
2477 	freezable_schedule();
2478 }
2479 
2480 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2481 {
2482 	/*
2483 	 * We do not check sig_kernel_stop(signr) but set this marker
2484 	 * unconditionally because we do not know whether debugger will
2485 	 * change signr. This flag has no meaning unless we are going
2486 	 * to stop after return from ptrace_stop(). In this case it will
2487 	 * be checked in do_signal_stop(), we should only stop if it was
2488 	 * not cleared by SIGCONT while we were sleeping. See also the
2489 	 * comment in dequeue_signal().
2490 	 */
2491 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2492 	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2493 
2494 	/* We're back.  Did the debugger cancel the sig?  */
2495 	signr = current->exit_code;
2496 	if (signr == 0)
2497 		return signr;
2498 
2499 	current->exit_code = 0;
2500 
2501 	/*
2502 	 * Update the siginfo structure if the signal has
2503 	 * changed.  If the debugger wanted something
2504 	 * specific in the siginfo structure then it should
2505 	 * have updated *info via PTRACE_SETSIGINFO.
2506 	 */
2507 	if (signr != info->si_signo) {
2508 		clear_siginfo(info);
2509 		info->si_signo = signr;
2510 		info->si_errno = 0;
2511 		info->si_code = SI_USER;
2512 		rcu_read_lock();
2513 		info->si_pid = task_pid_vnr(current->parent);
2514 		info->si_uid = from_kuid_munged(current_user_ns(),
2515 						task_uid(current->parent));
2516 		rcu_read_unlock();
2517 	}
2518 
2519 	/* If the (new) signal is now blocked, requeue it.  */
2520 	if (sigismember(&current->blocked, signr)) {
2521 		send_signal(signr, info, current, PIDTYPE_PID);
2522 		signr = 0;
2523 	}
2524 
2525 	return signr;
2526 }
2527 
2528 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2529 {
2530 	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2531 	case SIL_FAULT:
2532 	case SIL_FAULT_MCEERR:
2533 	case SIL_FAULT_BNDERR:
2534 	case SIL_FAULT_PKUERR:
2535 		ksig->info.si_addr = arch_untagged_si_addr(
2536 			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2537 		break;
2538 	case SIL_KILL:
2539 	case SIL_TIMER:
2540 	case SIL_POLL:
2541 	case SIL_CHLD:
2542 	case SIL_RT:
2543 	case SIL_SYS:
2544 		break;
2545 	}
2546 }
2547 
2548 bool get_signal(struct ksignal *ksig)
2549 {
2550 	struct sighand_struct *sighand = current->sighand;
2551 	struct signal_struct *signal = current->signal;
2552 	int signr;
2553 
2554 	if (unlikely(current->task_works))
2555 		task_work_run();
2556 
2557 	/*
2558 	 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2559 	 * that the arch handlers don't all have to do it. If we get here
2560 	 * without TIF_SIGPENDING, just exit after running signal work.
2561 	 */
2562 	if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2563 		if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2564 			tracehook_notify_signal();
2565 		if (!task_sigpending(current))
2566 			return false;
2567 	}
2568 
2569 	if (unlikely(uprobe_deny_signal()))
2570 		return false;
2571 
2572 	/*
2573 	 * Do this once, we can't return to user-mode if freezing() == T.
2574 	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2575 	 * thus do not need another check after return.
2576 	 */
2577 	try_to_freeze();
2578 
2579 relock:
2580 	spin_lock_irq(&sighand->siglock);
2581 
2582 	/*
2583 	 * Every stopped thread goes here after wakeup. Check to see if
2584 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2585 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2586 	 */
2587 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2588 		int why;
2589 
2590 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2591 			why = CLD_CONTINUED;
2592 		else
2593 			why = CLD_STOPPED;
2594 
2595 		signal->flags &= ~SIGNAL_CLD_MASK;
2596 
2597 		spin_unlock_irq(&sighand->siglock);
2598 
2599 		/*
2600 		 * Notify the parent that we're continuing.  This event is
2601 		 * always per-process and doesn't make whole lot of sense
2602 		 * for ptracers, who shouldn't consume the state via
2603 		 * wait(2) either, but, for backward compatibility, notify
2604 		 * the ptracer of the group leader too unless it's gonna be
2605 		 * a duplicate.
2606 		 */
2607 		read_lock(&tasklist_lock);
2608 		do_notify_parent_cldstop(current, false, why);
2609 
2610 		if (ptrace_reparented(current->group_leader))
2611 			do_notify_parent_cldstop(current->group_leader,
2612 						true, why);
2613 		read_unlock(&tasklist_lock);
2614 
2615 		goto relock;
2616 	}
2617 
2618 	/* Has this task already been marked for death? */
2619 	if (signal_group_exit(signal)) {
2620 		ksig->info.si_signo = signr = SIGKILL;
2621 		sigdelset(&current->pending.signal, SIGKILL);
2622 		trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2623 				&sighand->action[SIGKILL - 1]);
2624 		recalc_sigpending();
2625 		goto fatal;
2626 	}
2627 
2628 	for (;;) {
2629 		struct k_sigaction *ka;
2630 
2631 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2632 		    do_signal_stop(0))
2633 			goto relock;
2634 
2635 		if (unlikely(current->jobctl &
2636 			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2637 			if (current->jobctl & JOBCTL_TRAP_MASK) {
2638 				do_jobctl_trap();
2639 				spin_unlock_irq(&sighand->siglock);
2640 			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2641 				do_freezer_trap();
2642 
2643 			goto relock;
2644 		}
2645 
2646 		/*
2647 		 * If the task is leaving the frozen state, let's update
2648 		 * cgroup counters and reset the frozen bit.
2649 		 */
2650 		if (unlikely(cgroup_task_frozen(current))) {
2651 			spin_unlock_irq(&sighand->siglock);
2652 			cgroup_leave_frozen(false);
2653 			goto relock;
2654 		}
2655 
2656 		/*
2657 		 * Signals generated by the execution of an instruction
2658 		 * need to be delivered before any other pending signals
2659 		 * so that the instruction pointer in the signal stack
2660 		 * frame points to the faulting instruction.
2661 		 */
2662 		signr = dequeue_synchronous_signal(&ksig->info);
2663 		if (!signr)
2664 			signr = dequeue_signal(current, &current->blocked, &ksig->info);
2665 
2666 		if (!signr)
2667 			break; /* will return 0 */
2668 
2669 		if (unlikely(current->ptrace) && signr != SIGKILL) {
2670 			signr = ptrace_signal(signr, &ksig->info);
2671 			if (!signr)
2672 				continue;
2673 		}
2674 
2675 		ka = &sighand->action[signr-1];
2676 
2677 		/* Trace actually delivered signals. */
2678 		trace_signal_deliver(signr, &ksig->info, ka);
2679 
2680 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2681 			continue;
2682 		if (ka->sa.sa_handler != SIG_DFL) {
2683 			/* Run the handler.  */
2684 			ksig->ka = *ka;
2685 
2686 			if (ka->sa.sa_flags & SA_ONESHOT)
2687 				ka->sa.sa_handler = SIG_DFL;
2688 
2689 			break; /* will return non-zero "signr" value */
2690 		}
2691 
2692 		/*
2693 		 * Now we are doing the default action for this signal.
2694 		 */
2695 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2696 			continue;
2697 
2698 		/*
2699 		 * Global init gets no signals it doesn't want.
2700 		 * Container-init gets no signals it doesn't want from same
2701 		 * container.
2702 		 *
2703 		 * Note that if global/container-init sees a sig_kernel_only()
2704 		 * signal here, the signal must have been generated internally
2705 		 * or must have come from an ancestor namespace. In either
2706 		 * case, the signal cannot be dropped.
2707 		 */
2708 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2709 				!sig_kernel_only(signr))
2710 			continue;
2711 
2712 		if (sig_kernel_stop(signr)) {
2713 			/*
2714 			 * The default action is to stop all threads in
2715 			 * the thread group.  The job control signals
2716 			 * do nothing in an orphaned pgrp, but SIGSTOP
2717 			 * always works.  Note that siglock needs to be
2718 			 * dropped during the call to is_orphaned_pgrp()
2719 			 * because of lock ordering with tasklist_lock.
2720 			 * This allows an intervening SIGCONT to be posted.
2721 			 * We need to check for that and bail out if necessary.
2722 			 */
2723 			if (signr != SIGSTOP) {
2724 				spin_unlock_irq(&sighand->siglock);
2725 
2726 				/* signals can be posted during this window */
2727 
2728 				if (is_current_pgrp_orphaned())
2729 					goto relock;
2730 
2731 				spin_lock_irq(&sighand->siglock);
2732 			}
2733 
2734 			if (likely(do_signal_stop(ksig->info.si_signo))) {
2735 				/* It released the siglock.  */
2736 				goto relock;
2737 			}
2738 
2739 			/*
2740 			 * We didn't actually stop, due to a race
2741 			 * with SIGCONT or something like that.
2742 			 */
2743 			continue;
2744 		}
2745 
2746 	fatal:
2747 		spin_unlock_irq(&sighand->siglock);
2748 		if (unlikely(cgroup_task_frozen(current)))
2749 			cgroup_leave_frozen(true);
2750 
2751 		/*
2752 		 * Anything else is fatal, maybe with a core dump.
2753 		 */
2754 		current->flags |= PF_SIGNALED;
2755 
2756 		if (sig_kernel_coredump(signr)) {
2757 			if (print_fatal_signals)
2758 				print_fatal_signal(ksig->info.si_signo);
2759 			proc_coredump_connector(current);
2760 			/*
2761 			 * If it was able to dump core, this kills all
2762 			 * other threads in the group and synchronizes with
2763 			 * their demise.  If we lost the race with another
2764 			 * thread getting here, it set group_exit_code
2765 			 * first and our do_group_exit call below will use
2766 			 * that value and ignore the one we pass it.
2767 			 */
2768 			do_coredump(&ksig->info);
2769 		}
2770 
2771 		/*
2772 		 * PF_IO_WORKER threads will catch and exit on fatal signals
2773 		 * themselves. They have cleanup that must be performed, so
2774 		 * we cannot call do_exit() on their behalf.
2775 		 */
2776 		if (current->flags & PF_IO_WORKER)
2777 			goto out;
2778 
2779 		/*
2780 		 * Death signals, no core dump.
2781 		 */
2782 		do_group_exit(ksig->info.si_signo);
2783 		/* NOTREACHED */
2784 	}
2785 	spin_unlock_irq(&sighand->siglock);
2786 out:
2787 	ksig->sig = signr;
2788 
2789 	if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2790 		hide_si_addr_tag_bits(ksig);
2791 
2792 	return ksig->sig > 0;
2793 }
2794 
2795 /**
2796  * signal_delivered -
2797  * @ksig:		kernel signal struct
2798  * @stepping:		nonzero if debugger single-step or block-step in use
2799  *
2800  * This function should be called when a signal has successfully been
2801  * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2802  * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2803  * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2804  */
2805 static void signal_delivered(struct ksignal *ksig, int stepping)
2806 {
2807 	sigset_t blocked;
2808 
2809 	/* A signal was successfully delivered, and the
2810 	   saved sigmask was stored on the signal frame,
2811 	   and will be restored by sigreturn.  So we can
2812 	   simply clear the restore sigmask flag.  */
2813 	clear_restore_sigmask();
2814 
2815 	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2816 	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2817 		sigaddset(&blocked, ksig->sig);
2818 	set_current_blocked(&blocked);
2819 	tracehook_signal_handler(stepping);
2820 }
2821 
2822 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2823 {
2824 	if (failed)
2825 		force_sigsegv(ksig->sig);
2826 	else
2827 		signal_delivered(ksig, stepping);
2828 }
2829 
2830 /*
2831  * It could be that complete_signal() picked us to notify about the
2832  * group-wide signal. Other threads should be notified now to take
2833  * the shared signals in @which since we will not.
2834  */
2835 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2836 {
2837 	sigset_t retarget;
2838 	struct task_struct *t;
2839 
2840 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2841 	if (sigisemptyset(&retarget))
2842 		return;
2843 
2844 	t = tsk;
2845 	while_each_thread(tsk, t) {
2846 		if (t->flags & PF_EXITING)
2847 			continue;
2848 
2849 		if (!has_pending_signals(&retarget, &t->blocked))
2850 			continue;
2851 		/* Remove the signals this thread can handle. */
2852 		sigandsets(&retarget, &retarget, &t->blocked);
2853 
2854 		if (!task_sigpending(t))
2855 			signal_wake_up(t, 0);
2856 
2857 		if (sigisemptyset(&retarget))
2858 			break;
2859 	}
2860 }
2861 
2862 void exit_signals(struct task_struct *tsk)
2863 {
2864 	int group_stop = 0;
2865 	sigset_t unblocked;
2866 
2867 	/*
2868 	 * @tsk is about to have PF_EXITING set - lock out users which
2869 	 * expect stable threadgroup.
2870 	 */
2871 	cgroup_threadgroup_change_begin(tsk);
2872 
2873 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2874 		tsk->flags |= PF_EXITING;
2875 		cgroup_threadgroup_change_end(tsk);
2876 		return;
2877 	}
2878 
2879 	spin_lock_irq(&tsk->sighand->siglock);
2880 	/*
2881 	 * From now this task is not visible for group-wide signals,
2882 	 * see wants_signal(), do_signal_stop().
2883 	 */
2884 	tsk->flags |= PF_EXITING;
2885 
2886 	cgroup_threadgroup_change_end(tsk);
2887 
2888 	if (!task_sigpending(tsk))
2889 		goto out;
2890 
2891 	unblocked = tsk->blocked;
2892 	signotset(&unblocked);
2893 	retarget_shared_pending(tsk, &unblocked);
2894 
2895 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2896 	    task_participate_group_stop(tsk))
2897 		group_stop = CLD_STOPPED;
2898 out:
2899 	spin_unlock_irq(&tsk->sighand->siglock);
2900 
2901 	/*
2902 	 * If group stop has completed, deliver the notification.  This
2903 	 * should always go to the real parent of the group leader.
2904 	 */
2905 	if (unlikely(group_stop)) {
2906 		read_lock(&tasklist_lock);
2907 		do_notify_parent_cldstop(tsk, false, group_stop);
2908 		read_unlock(&tasklist_lock);
2909 	}
2910 }
2911 
2912 /*
2913  * System call entry points.
2914  */
2915 
2916 /**
2917  *  sys_restart_syscall - restart a system call
2918  */
2919 SYSCALL_DEFINE0(restart_syscall)
2920 {
2921 	struct restart_block *restart = &current->restart_block;
2922 	return restart->fn(restart);
2923 }
2924 
2925 long do_no_restart_syscall(struct restart_block *param)
2926 {
2927 	return -EINTR;
2928 }
2929 
2930 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2931 {
2932 	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2933 		sigset_t newblocked;
2934 		/* A set of now blocked but previously unblocked signals. */
2935 		sigandnsets(&newblocked, newset, &current->blocked);
2936 		retarget_shared_pending(tsk, &newblocked);
2937 	}
2938 	tsk->blocked = *newset;
2939 	recalc_sigpending();
2940 }
2941 
2942 /**
2943  * set_current_blocked - change current->blocked mask
2944  * @newset: new mask
2945  *
2946  * It is wrong to change ->blocked directly, this helper should be used
2947  * to ensure the process can't miss a shared signal we are going to block.
2948  */
2949 void set_current_blocked(sigset_t *newset)
2950 {
2951 	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2952 	__set_current_blocked(newset);
2953 }
2954 
2955 void __set_current_blocked(const sigset_t *newset)
2956 {
2957 	struct task_struct *tsk = current;
2958 
2959 	/*
2960 	 * In case the signal mask hasn't changed, there is nothing we need
2961 	 * to do. The current->blocked shouldn't be modified by other task.
2962 	 */
2963 	if (sigequalsets(&tsk->blocked, newset))
2964 		return;
2965 
2966 	spin_lock_irq(&tsk->sighand->siglock);
2967 	__set_task_blocked(tsk, newset);
2968 	spin_unlock_irq(&tsk->sighand->siglock);
2969 }
2970 
2971 /*
2972  * This is also useful for kernel threads that want to temporarily
2973  * (or permanently) block certain signals.
2974  *
2975  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2976  * interface happily blocks "unblockable" signals like SIGKILL
2977  * and friends.
2978  */
2979 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2980 {
2981 	struct task_struct *tsk = current;
2982 	sigset_t newset;
2983 
2984 	/* Lockless, only current can change ->blocked, never from irq */
2985 	if (oldset)
2986 		*oldset = tsk->blocked;
2987 
2988 	switch (how) {
2989 	case SIG_BLOCK:
2990 		sigorsets(&newset, &tsk->blocked, set);
2991 		break;
2992 	case SIG_UNBLOCK:
2993 		sigandnsets(&newset, &tsk->blocked, set);
2994 		break;
2995 	case SIG_SETMASK:
2996 		newset = *set;
2997 		break;
2998 	default:
2999 		return -EINVAL;
3000 	}
3001 
3002 	__set_current_blocked(&newset);
3003 	return 0;
3004 }
3005 EXPORT_SYMBOL(sigprocmask);
3006 
3007 /*
3008  * The api helps set app-provided sigmasks.
3009  *
3010  * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3011  * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3012  *
3013  * Note that it does set_restore_sigmask() in advance, so it must be always
3014  * paired with restore_saved_sigmask_unless() before return from syscall.
3015  */
3016 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3017 {
3018 	sigset_t kmask;
3019 
3020 	if (!umask)
3021 		return 0;
3022 	if (sigsetsize != sizeof(sigset_t))
3023 		return -EINVAL;
3024 	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3025 		return -EFAULT;
3026 
3027 	set_restore_sigmask();
3028 	current->saved_sigmask = current->blocked;
3029 	set_current_blocked(&kmask);
3030 
3031 	return 0;
3032 }
3033 
3034 #ifdef CONFIG_COMPAT
3035 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3036 			    size_t sigsetsize)
3037 {
3038 	sigset_t kmask;
3039 
3040 	if (!umask)
3041 		return 0;
3042 	if (sigsetsize != sizeof(compat_sigset_t))
3043 		return -EINVAL;
3044 	if (get_compat_sigset(&kmask, umask))
3045 		return -EFAULT;
3046 
3047 	set_restore_sigmask();
3048 	current->saved_sigmask = current->blocked;
3049 	set_current_blocked(&kmask);
3050 
3051 	return 0;
3052 }
3053 #endif
3054 
3055 /**
3056  *  sys_rt_sigprocmask - change the list of currently blocked signals
3057  *  @how: whether to add, remove, or set signals
3058  *  @nset: stores pending signals
3059  *  @oset: previous value of signal mask if non-null
3060  *  @sigsetsize: size of sigset_t type
3061  */
3062 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3063 		sigset_t __user *, oset, size_t, sigsetsize)
3064 {
3065 	sigset_t old_set, new_set;
3066 	int error;
3067 
3068 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3069 	if (sigsetsize != sizeof(sigset_t))
3070 		return -EINVAL;
3071 
3072 	old_set = current->blocked;
3073 
3074 	if (nset) {
3075 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3076 			return -EFAULT;
3077 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3078 
3079 		error = sigprocmask(how, &new_set, NULL);
3080 		if (error)
3081 			return error;
3082 	}
3083 
3084 	if (oset) {
3085 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3086 			return -EFAULT;
3087 	}
3088 
3089 	return 0;
3090 }
3091 
3092 #ifdef CONFIG_COMPAT
3093 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3094 		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3095 {
3096 	sigset_t old_set = current->blocked;
3097 
3098 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3099 	if (sigsetsize != sizeof(sigset_t))
3100 		return -EINVAL;
3101 
3102 	if (nset) {
3103 		sigset_t new_set;
3104 		int error;
3105 		if (get_compat_sigset(&new_set, nset))
3106 			return -EFAULT;
3107 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3108 
3109 		error = sigprocmask(how, &new_set, NULL);
3110 		if (error)
3111 			return error;
3112 	}
3113 	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3114 }
3115 #endif
3116 
3117 static void do_sigpending(sigset_t *set)
3118 {
3119 	spin_lock_irq(&current->sighand->siglock);
3120 	sigorsets(set, &current->pending.signal,
3121 		  &current->signal->shared_pending.signal);
3122 	spin_unlock_irq(&current->sighand->siglock);
3123 
3124 	/* Outside the lock because only this thread touches it.  */
3125 	sigandsets(set, &current->blocked, set);
3126 }
3127 
3128 /**
3129  *  sys_rt_sigpending - examine a pending signal that has been raised
3130  *			while blocked
3131  *  @uset: stores pending signals
3132  *  @sigsetsize: size of sigset_t type or larger
3133  */
3134 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3135 {
3136 	sigset_t set;
3137 
3138 	if (sigsetsize > sizeof(*uset))
3139 		return -EINVAL;
3140 
3141 	do_sigpending(&set);
3142 
3143 	if (copy_to_user(uset, &set, sigsetsize))
3144 		return -EFAULT;
3145 
3146 	return 0;
3147 }
3148 
3149 #ifdef CONFIG_COMPAT
3150 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3151 		compat_size_t, sigsetsize)
3152 {
3153 	sigset_t set;
3154 
3155 	if (sigsetsize > sizeof(*uset))
3156 		return -EINVAL;
3157 
3158 	do_sigpending(&set);
3159 
3160 	return put_compat_sigset(uset, &set, sigsetsize);
3161 }
3162 #endif
3163 
3164 static const struct {
3165 	unsigned char limit, layout;
3166 } sig_sicodes[] = {
3167 	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3168 	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3169 	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3170 	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3171 	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3172 #if defined(SIGEMT)
3173 	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3174 #endif
3175 	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3176 	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3177 	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3178 };
3179 
3180 static bool known_siginfo_layout(unsigned sig, int si_code)
3181 {
3182 	if (si_code == SI_KERNEL)
3183 		return true;
3184 	else if ((si_code > SI_USER)) {
3185 		if (sig_specific_sicodes(sig)) {
3186 			if (si_code <= sig_sicodes[sig].limit)
3187 				return true;
3188 		}
3189 		else if (si_code <= NSIGPOLL)
3190 			return true;
3191 	}
3192 	else if (si_code >= SI_DETHREAD)
3193 		return true;
3194 	else if (si_code == SI_ASYNCNL)
3195 		return true;
3196 	return false;
3197 }
3198 
3199 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3200 {
3201 	enum siginfo_layout layout = SIL_KILL;
3202 	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3203 		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3204 		    (si_code <= sig_sicodes[sig].limit)) {
3205 			layout = sig_sicodes[sig].layout;
3206 			/* Handle the exceptions */
3207 			if ((sig == SIGBUS) &&
3208 			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3209 				layout = SIL_FAULT_MCEERR;
3210 			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3211 				layout = SIL_FAULT_BNDERR;
3212 #ifdef SEGV_PKUERR
3213 			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3214 				layout = SIL_FAULT_PKUERR;
3215 #endif
3216 		}
3217 		else if (si_code <= NSIGPOLL)
3218 			layout = SIL_POLL;
3219 	} else {
3220 		if (si_code == SI_TIMER)
3221 			layout = SIL_TIMER;
3222 		else if (si_code == SI_SIGIO)
3223 			layout = SIL_POLL;
3224 		else if (si_code < 0)
3225 			layout = SIL_RT;
3226 	}
3227 	return layout;
3228 }
3229 
3230 static inline char __user *si_expansion(const siginfo_t __user *info)
3231 {
3232 	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3233 }
3234 
3235 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3236 {
3237 	char __user *expansion = si_expansion(to);
3238 	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3239 		return -EFAULT;
3240 	if (clear_user(expansion, SI_EXPANSION_SIZE))
3241 		return -EFAULT;
3242 	return 0;
3243 }
3244 
3245 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3246 				       const siginfo_t __user *from)
3247 {
3248 	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3249 		char __user *expansion = si_expansion(from);
3250 		char buf[SI_EXPANSION_SIZE];
3251 		int i;
3252 		/*
3253 		 * An unknown si_code might need more than
3254 		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3255 		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3256 		 * will return this data to userspace exactly.
3257 		 */
3258 		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3259 			return -EFAULT;
3260 		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3261 			if (buf[i] != 0)
3262 				return -E2BIG;
3263 		}
3264 	}
3265 	return 0;
3266 }
3267 
3268 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3269 				    const siginfo_t __user *from)
3270 {
3271 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3272 		return -EFAULT;
3273 	to->si_signo = signo;
3274 	return post_copy_siginfo_from_user(to, from);
3275 }
3276 
3277 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3278 {
3279 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3280 		return -EFAULT;
3281 	return post_copy_siginfo_from_user(to, from);
3282 }
3283 
3284 #ifdef CONFIG_COMPAT
3285 /**
3286  * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3287  * @to: compat siginfo destination
3288  * @from: kernel siginfo source
3289  *
3290  * Note: This function does not work properly for the SIGCHLD on x32, but
3291  * fortunately it doesn't have to.  The only valid callers for this function are
3292  * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3293  * The latter does not care because SIGCHLD will never cause a coredump.
3294  */
3295 void copy_siginfo_to_external32(struct compat_siginfo *to,
3296 		const struct kernel_siginfo *from)
3297 {
3298 	memset(to, 0, sizeof(*to));
3299 
3300 	to->si_signo = from->si_signo;
3301 	to->si_errno = from->si_errno;
3302 	to->si_code  = from->si_code;
3303 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3304 	case SIL_KILL:
3305 		to->si_pid = from->si_pid;
3306 		to->si_uid = from->si_uid;
3307 		break;
3308 	case SIL_TIMER:
3309 		to->si_tid     = from->si_tid;
3310 		to->si_overrun = from->si_overrun;
3311 		to->si_int     = from->si_int;
3312 		break;
3313 	case SIL_POLL:
3314 		to->si_band = from->si_band;
3315 		to->si_fd   = from->si_fd;
3316 		break;
3317 	case SIL_FAULT:
3318 		to->si_addr = ptr_to_compat(from->si_addr);
3319 #ifdef __ARCH_SI_TRAPNO
3320 		to->si_trapno = from->si_trapno;
3321 #endif
3322 		break;
3323 	case SIL_FAULT_MCEERR:
3324 		to->si_addr = ptr_to_compat(from->si_addr);
3325 #ifdef __ARCH_SI_TRAPNO
3326 		to->si_trapno = from->si_trapno;
3327 #endif
3328 		to->si_addr_lsb = from->si_addr_lsb;
3329 		break;
3330 	case SIL_FAULT_BNDERR:
3331 		to->si_addr = ptr_to_compat(from->si_addr);
3332 #ifdef __ARCH_SI_TRAPNO
3333 		to->si_trapno = from->si_trapno;
3334 #endif
3335 		to->si_lower = ptr_to_compat(from->si_lower);
3336 		to->si_upper = ptr_to_compat(from->si_upper);
3337 		break;
3338 	case SIL_FAULT_PKUERR:
3339 		to->si_addr = ptr_to_compat(from->si_addr);
3340 #ifdef __ARCH_SI_TRAPNO
3341 		to->si_trapno = from->si_trapno;
3342 #endif
3343 		to->si_pkey = from->si_pkey;
3344 		break;
3345 	case SIL_CHLD:
3346 		to->si_pid = from->si_pid;
3347 		to->si_uid = from->si_uid;
3348 		to->si_status = from->si_status;
3349 		to->si_utime = from->si_utime;
3350 		to->si_stime = from->si_stime;
3351 		break;
3352 	case SIL_RT:
3353 		to->si_pid = from->si_pid;
3354 		to->si_uid = from->si_uid;
3355 		to->si_int = from->si_int;
3356 		break;
3357 	case SIL_SYS:
3358 		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3359 		to->si_syscall   = from->si_syscall;
3360 		to->si_arch      = from->si_arch;
3361 		break;
3362 	}
3363 }
3364 
3365 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3366 			   const struct kernel_siginfo *from)
3367 {
3368 	struct compat_siginfo new;
3369 
3370 	copy_siginfo_to_external32(&new, from);
3371 	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3372 		return -EFAULT;
3373 	return 0;
3374 }
3375 
3376 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3377 					 const struct compat_siginfo *from)
3378 {
3379 	clear_siginfo(to);
3380 	to->si_signo = from->si_signo;
3381 	to->si_errno = from->si_errno;
3382 	to->si_code  = from->si_code;
3383 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3384 	case SIL_KILL:
3385 		to->si_pid = from->si_pid;
3386 		to->si_uid = from->si_uid;
3387 		break;
3388 	case SIL_TIMER:
3389 		to->si_tid     = from->si_tid;
3390 		to->si_overrun = from->si_overrun;
3391 		to->si_int     = from->si_int;
3392 		break;
3393 	case SIL_POLL:
3394 		to->si_band = from->si_band;
3395 		to->si_fd   = from->si_fd;
3396 		break;
3397 	case SIL_FAULT:
3398 		to->si_addr = compat_ptr(from->si_addr);
3399 #ifdef __ARCH_SI_TRAPNO
3400 		to->si_trapno = from->si_trapno;
3401 #endif
3402 		break;
3403 	case SIL_FAULT_MCEERR:
3404 		to->si_addr = compat_ptr(from->si_addr);
3405 #ifdef __ARCH_SI_TRAPNO
3406 		to->si_trapno = from->si_trapno;
3407 #endif
3408 		to->si_addr_lsb = from->si_addr_lsb;
3409 		break;
3410 	case SIL_FAULT_BNDERR:
3411 		to->si_addr = compat_ptr(from->si_addr);
3412 #ifdef __ARCH_SI_TRAPNO
3413 		to->si_trapno = from->si_trapno;
3414 #endif
3415 		to->si_lower = compat_ptr(from->si_lower);
3416 		to->si_upper = compat_ptr(from->si_upper);
3417 		break;
3418 	case SIL_FAULT_PKUERR:
3419 		to->si_addr = compat_ptr(from->si_addr);
3420 #ifdef __ARCH_SI_TRAPNO
3421 		to->si_trapno = from->si_trapno;
3422 #endif
3423 		to->si_pkey = from->si_pkey;
3424 		break;
3425 	case SIL_CHLD:
3426 		to->si_pid    = from->si_pid;
3427 		to->si_uid    = from->si_uid;
3428 		to->si_status = from->si_status;
3429 #ifdef CONFIG_X86_X32_ABI
3430 		if (in_x32_syscall()) {
3431 			to->si_utime = from->_sifields._sigchld_x32._utime;
3432 			to->si_stime = from->_sifields._sigchld_x32._stime;
3433 		} else
3434 #endif
3435 		{
3436 			to->si_utime = from->si_utime;
3437 			to->si_stime = from->si_stime;
3438 		}
3439 		break;
3440 	case SIL_RT:
3441 		to->si_pid = from->si_pid;
3442 		to->si_uid = from->si_uid;
3443 		to->si_int = from->si_int;
3444 		break;
3445 	case SIL_SYS:
3446 		to->si_call_addr = compat_ptr(from->si_call_addr);
3447 		to->si_syscall   = from->si_syscall;
3448 		to->si_arch      = from->si_arch;
3449 		break;
3450 	}
3451 	return 0;
3452 }
3453 
3454 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3455 				      const struct compat_siginfo __user *ufrom)
3456 {
3457 	struct compat_siginfo from;
3458 
3459 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3460 		return -EFAULT;
3461 
3462 	from.si_signo = signo;
3463 	return post_copy_siginfo_from_user32(to, &from);
3464 }
3465 
3466 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3467 			     const struct compat_siginfo __user *ufrom)
3468 {
3469 	struct compat_siginfo from;
3470 
3471 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3472 		return -EFAULT;
3473 
3474 	return post_copy_siginfo_from_user32(to, &from);
3475 }
3476 #endif /* CONFIG_COMPAT */
3477 
3478 /**
3479  *  do_sigtimedwait - wait for queued signals specified in @which
3480  *  @which: queued signals to wait for
3481  *  @info: if non-null, the signal's siginfo is returned here
3482  *  @ts: upper bound on process time suspension
3483  */
3484 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3485 		    const struct timespec64 *ts)
3486 {
3487 	ktime_t *to = NULL, timeout = KTIME_MAX;
3488 	struct task_struct *tsk = current;
3489 	sigset_t mask = *which;
3490 	int sig, ret = 0;
3491 
3492 	if (ts) {
3493 		if (!timespec64_valid(ts))
3494 			return -EINVAL;
3495 		timeout = timespec64_to_ktime(*ts);
3496 		to = &timeout;
3497 	}
3498 
3499 	/*
3500 	 * Invert the set of allowed signals to get those we want to block.
3501 	 */
3502 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3503 	signotset(&mask);
3504 
3505 	spin_lock_irq(&tsk->sighand->siglock);
3506 	sig = dequeue_signal(tsk, &mask, info);
3507 	if (!sig && timeout) {
3508 		/*
3509 		 * None ready, temporarily unblock those we're interested
3510 		 * while we are sleeping in so that we'll be awakened when
3511 		 * they arrive. Unblocking is always fine, we can avoid
3512 		 * set_current_blocked().
3513 		 */
3514 		tsk->real_blocked = tsk->blocked;
3515 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3516 		recalc_sigpending();
3517 		spin_unlock_irq(&tsk->sighand->siglock);
3518 
3519 		__set_current_state(TASK_INTERRUPTIBLE);
3520 		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3521 							 HRTIMER_MODE_REL);
3522 		spin_lock_irq(&tsk->sighand->siglock);
3523 		__set_task_blocked(tsk, &tsk->real_blocked);
3524 		sigemptyset(&tsk->real_blocked);
3525 		sig = dequeue_signal(tsk, &mask, info);
3526 	}
3527 	spin_unlock_irq(&tsk->sighand->siglock);
3528 
3529 	if (sig)
3530 		return sig;
3531 	return ret ? -EINTR : -EAGAIN;
3532 }
3533 
3534 /**
3535  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3536  *			in @uthese
3537  *  @uthese: queued signals to wait for
3538  *  @uinfo: if non-null, the signal's siginfo is returned here
3539  *  @uts: upper bound on process time suspension
3540  *  @sigsetsize: size of sigset_t type
3541  */
3542 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3543 		siginfo_t __user *, uinfo,
3544 		const struct __kernel_timespec __user *, uts,
3545 		size_t, sigsetsize)
3546 {
3547 	sigset_t these;
3548 	struct timespec64 ts;
3549 	kernel_siginfo_t info;
3550 	int ret;
3551 
3552 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3553 	if (sigsetsize != sizeof(sigset_t))
3554 		return -EINVAL;
3555 
3556 	if (copy_from_user(&these, uthese, sizeof(these)))
3557 		return -EFAULT;
3558 
3559 	if (uts) {
3560 		if (get_timespec64(&ts, uts))
3561 			return -EFAULT;
3562 	}
3563 
3564 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3565 
3566 	if (ret > 0 && uinfo) {
3567 		if (copy_siginfo_to_user(uinfo, &info))
3568 			ret = -EFAULT;
3569 	}
3570 
3571 	return ret;
3572 }
3573 
3574 #ifdef CONFIG_COMPAT_32BIT_TIME
3575 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3576 		siginfo_t __user *, uinfo,
3577 		const struct old_timespec32 __user *, uts,
3578 		size_t, sigsetsize)
3579 {
3580 	sigset_t these;
3581 	struct timespec64 ts;
3582 	kernel_siginfo_t info;
3583 	int ret;
3584 
3585 	if (sigsetsize != sizeof(sigset_t))
3586 		return -EINVAL;
3587 
3588 	if (copy_from_user(&these, uthese, sizeof(these)))
3589 		return -EFAULT;
3590 
3591 	if (uts) {
3592 		if (get_old_timespec32(&ts, uts))
3593 			return -EFAULT;
3594 	}
3595 
3596 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3597 
3598 	if (ret > 0 && uinfo) {
3599 		if (copy_siginfo_to_user(uinfo, &info))
3600 			ret = -EFAULT;
3601 	}
3602 
3603 	return ret;
3604 }
3605 #endif
3606 
3607 #ifdef CONFIG_COMPAT
3608 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3609 		struct compat_siginfo __user *, uinfo,
3610 		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3611 {
3612 	sigset_t s;
3613 	struct timespec64 t;
3614 	kernel_siginfo_t info;
3615 	long ret;
3616 
3617 	if (sigsetsize != sizeof(sigset_t))
3618 		return -EINVAL;
3619 
3620 	if (get_compat_sigset(&s, uthese))
3621 		return -EFAULT;
3622 
3623 	if (uts) {
3624 		if (get_timespec64(&t, uts))
3625 			return -EFAULT;
3626 	}
3627 
3628 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3629 
3630 	if (ret > 0 && uinfo) {
3631 		if (copy_siginfo_to_user32(uinfo, &info))
3632 			ret = -EFAULT;
3633 	}
3634 
3635 	return ret;
3636 }
3637 
3638 #ifdef CONFIG_COMPAT_32BIT_TIME
3639 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3640 		struct compat_siginfo __user *, uinfo,
3641 		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3642 {
3643 	sigset_t s;
3644 	struct timespec64 t;
3645 	kernel_siginfo_t info;
3646 	long ret;
3647 
3648 	if (sigsetsize != sizeof(sigset_t))
3649 		return -EINVAL;
3650 
3651 	if (get_compat_sigset(&s, uthese))
3652 		return -EFAULT;
3653 
3654 	if (uts) {
3655 		if (get_old_timespec32(&t, uts))
3656 			return -EFAULT;
3657 	}
3658 
3659 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3660 
3661 	if (ret > 0 && uinfo) {
3662 		if (copy_siginfo_to_user32(uinfo, &info))
3663 			ret = -EFAULT;
3664 	}
3665 
3666 	return ret;
3667 }
3668 #endif
3669 #endif
3670 
3671 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3672 {
3673 	clear_siginfo(info);
3674 	info->si_signo = sig;
3675 	info->si_errno = 0;
3676 	info->si_code = SI_USER;
3677 	info->si_pid = task_tgid_vnr(current);
3678 	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3679 }
3680 
3681 /**
3682  *  sys_kill - send a signal to a process
3683  *  @pid: the PID of the process
3684  *  @sig: signal to be sent
3685  */
3686 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3687 {
3688 	struct kernel_siginfo info;
3689 
3690 	prepare_kill_siginfo(sig, &info);
3691 
3692 	return kill_something_info(sig, &info, pid);
3693 }
3694 
3695 /*
3696  * Verify that the signaler and signalee either are in the same pid namespace
3697  * or that the signaler's pid namespace is an ancestor of the signalee's pid
3698  * namespace.
3699  */
3700 static bool access_pidfd_pidns(struct pid *pid)
3701 {
3702 	struct pid_namespace *active = task_active_pid_ns(current);
3703 	struct pid_namespace *p = ns_of_pid(pid);
3704 
3705 	for (;;) {
3706 		if (!p)
3707 			return false;
3708 		if (p == active)
3709 			break;
3710 		p = p->parent;
3711 	}
3712 
3713 	return true;
3714 }
3715 
3716 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3717 		siginfo_t __user *info)
3718 {
3719 #ifdef CONFIG_COMPAT
3720 	/*
3721 	 * Avoid hooking up compat syscalls and instead handle necessary
3722 	 * conversions here. Note, this is a stop-gap measure and should not be
3723 	 * considered a generic solution.
3724 	 */
3725 	if (in_compat_syscall())
3726 		return copy_siginfo_from_user32(
3727 			kinfo, (struct compat_siginfo __user *)info);
3728 #endif
3729 	return copy_siginfo_from_user(kinfo, info);
3730 }
3731 
3732 static struct pid *pidfd_to_pid(const struct file *file)
3733 {
3734 	struct pid *pid;
3735 
3736 	pid = pidfd_pid(file);
3737 	if (!IS_ERR(pid))
3738 		return pid;
3739 
3740 	return tgid_pidfd_to_pid(file);
3741 }
3742 
3743 /**
3744  * sys_pidfd_send_signal - Signal a process through a pidfd
3745  * @pidfd:  file descriptor of the process
3746  * @sig:    signal to send
3747  * @info:   signal info
3748  * @flags:  future flags
3749  *
3750  * The syscall currently only signals via PIDTYPE_PID which covers
3751  * kill(<positive-pid>, <signal>. It does not signal threads or process
3752  * groups.
3753  * In order to extend the syscall to threads and process groups the @flags
3754  * argument should be used. In essence, the @flags argument will determine
3755  * what is signaled and not the file descriptor itself. Put in other words,
3756  * grouping is a property of the flags argument not a property of the file
3757  * descriptor.
3758  *
3759  * Return: 0 on success, negative errno on failure
3760  */
3761 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3762 		siginfo_t __user *, info, unsigned int, flags)
3763 {
3764 	int ret;
3765 	struct fd f;
3766 	struct pid *pid;
3767 	kernel_siginfo_t kinfo;
3768 
3769 	/* Enforce flags be set to 0 until we add an extension. */
3770 	if (flags)
3771 		return -EINVAL;
3772 
3773 	f = fdget(pidfd);
3774 	if (!f.file)
3775 		return -EBADF;
3776 
3777 	/* Is this a pidfd? */
3778 	pid = pidfd_to_pid(f.file);
3779 	if (IS_ERR(pid)) {
3780 		ret = PTR_ERR(pid);
3781 		goto err;
3782 	}
3783 
3784 	ret = -EINVAL;
3785 	if (!access_pidfd_pidns(pid))
3786 		goto err;
3787 
3788 	if (info) {
3789 		ret = copy_siginfo_from_user_any(&kinfo, info);
3790 		if (unlikely(ret))
3791 			goto err;
3792 
3793 		ret = -EINVAL;
3794 		if (unlikely(sig != kinfo.si_signo))
3795 			goto err;
3796 
3797 		/* Only allow sending arbitrary signals to yourself. */
3798 		ret = -EPERM;
3799 		if ((task_pid(current) != pid) &&
3800 		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3801 			goto err;
3802 	} else {
3803 		prepare_kill_siginfo(sig, &kinfo);
3804 	}
3805 
3806 	ret = kill_pid_info(sig, &kinfo, pid);
3807 
3808 err:
3809 	fdput(f);
3810 	return ret;
3811 }
3812 
3813 static int
3814 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3815 {
3816 	struct task_struct *p;
3817 	int error = -ESRCH;
3818 
3819 	rcu_read_lock();
3820 	p = find_task_by_vpid(pid);
3821 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3822 		error = check_kill_permission(sig, info, p);
3823 		/*
3824 		 * The null signal is a permissions and process existence
3825 		 * probe.  No signal is actually delivered.
3826 		 */
3827 		if (!error && sig) {
3828 			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3829 			/*
3830 			 * If lock_task_sighand() failed we pretend the task
3831 			 * dies after receiving the signal. The window is tiny,
3832 			 * and the signal is private anyway.
3833 			 */
3834 			if (unlikely(error == -ESRCH))
3835 				error = 0;
3836 		}
3837 	}
3838 	rcu_read_unlock();
3839 
3840 	return error;
3841 }
3842 
3843 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3844 {
3845 	struct kernel_siginfo info;
3846 
3847 	clear_siginfo(&info);
3848 	info.si_signo = sig;
3849 	info.si_errno = 0;
3850 	info.si_code = SI_TKILL;
3851 	info.si_pid = task_tgid_vnr(current);
3852 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3853 
3854 	return do_send_specific(tgid, pid, sig, &info);
3855 }
3856 
3857 /**
3858  *  sys_tgkill - send signal to one specific thread
3859  *  @tgid: the thread group ID of the thread
3860  *  @pid: the PID of the thread
3861  *  @sig: signal to be sent
3862  *
3863  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
3864  *  exists but it's not belonging to the target process anymore. This
3865  *  method solves the problem of threads exiting and PIDs getting reused.
3866  */
3867 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3868 {
3869 	/* This is only valid for single tasks */
3870 	if (pid <= 0 || tgid <= 0)
3871 		return -EINVAL;
3872 
3873 	return do_tkill(tgid, pid, sig);
3874 }
3875 
3876 /**
3877  *  sys_tkill - send signal to one specific task
3878  *  @pid: the PID of the task
3879  *  @sig: signal to be sent
3880  *
3881  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
3882  */
3883 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3884 {
3885 	/* This is only valid for single tasks */
3886 	if (pid <= 0)
3887 		return -EINVAL;
3888 
3889 	return do_tkill(0, pid, sig);
3890 }
3891 
3892 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3893 {
3894 	/* Not even root can pretend to send signals from the kernel.
3895 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3896 	 */
3897 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3898 	    (task_pid_vnr(current) != pid))
3899 		return -EPERM;
3900 
3901 	/* POSIX.1b doesn't mention process groups.  */
3902 	return kill_proc_info(sig, info, pid);
3903 }
3904 
3905 /**
3906  *  sys_rt_sigqueueinfo - send signal information to a signal
3907  *  @pid: the PID of the thread
3908  *  @sig: signal to be sent
3909  *  @uinfo: signal info to be sent
3910  */
3911 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3912 		siginfo_t __user *, uinfo)
3913 {
3914 	kernel_siginfo_t info;
3915 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3916 	if (unlikely(ret))
3917 		return ret;
3918 	return do_rt_sigqueueinfo(pid, sig, &info);
3919 }
3920 
3921 #ifdef CONFIG_COMPAT
3922 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3923 			compat_pid_t, pid,
3924 			int, sig,
3925 			struct compat_siginfo __user *, uinfo)
3926 {
3927 	kernel_siginfo_t info;
3928 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3929 	if (unlikely(ret))
3930 		return ret;
3931 	return do_rt_sigqueueinfo(pid, sig, &info);
3932 }
3933 #endif
3934 
3935 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3936 {
3937 	/* This is only valid for single tasks */
3938 	if (pid <= 0 || tgid <= 0)
3939 		return -EINVAL;
3940 
3941 	/* Not even root can pretend to send signals from the kernel.
3942 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3943 	 */
3944 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3945 	    (task_pid_vnr(current) != pid))
3946 		return -EPERM;
3947 
3948 	return do_send_specific(tgid, pid, sig, info);
3949 }
3950 
3951 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3952 		siginfo_t __user *, uinfo)
3953 {
3954 	kernel_siginfo_t info;
3955 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3956 	if (unlikely(ret))
3957 		return ret;
3958 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3959 }
3960 
3961 #ifdef CONFIG_COMPAT
3962 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3963 			compat_pid_t, tgid,
3964 			compat_pid_t, pid,
3965 			int, sig,
3966 			struct compat_siginfo __user *, uinfo)
3967 {
3968 	kernel_siginfo_t info;
3969 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3970 	if (unlikely(ret))
3971 		return ret;
3972 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3973 }
3974 #endif
3975 
3976 /*
3977  * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3978  */
3979 void kernel_sigaction(int sig, __sighandler_t action)
3980 {
3981 	spin_lock_irq(&current->sighand->siglock);
3982 	current->sighand->action[sig - 1].sa.sa_handler = action;
3983 	if (action == SIG_IGN) {
3984 		sigset_t mask;
3985 
3986 		sigemptyset(&mask);
3987 		sigaddset(&mask, sig);
3988 
3989 		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3990 		flush_sigqueue_mask(&mask, &current->pending);
3991 		recalc_sigpending();
3992 	}
3993 	spin_unlock_irq(&current->sighand->siglock);
3994 }
3995 EXPORT_SYMBOL(kernel_sigaction);
3996 
3997 void __weak sigaction_compat_abi(struct k_sigaction *act,
3998 		struct k_sigaction *oact)
3999 {
4000 }
4001 
4002 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4003 {
4004 	struct task_struct *p = current, *t;
4005 	struct k_sigaction *k;
4006 	sigset_t mask;
4007 
4008 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4009 		return -EINVAL;
4010 
4011 	k = &p->sighand->action[sig-1];
4012 
4013 	spin_lock_irq(&p->sighand->siglock);
4014 	if (oact)
4015 		*oact = *k;
4016 
4017 	/*
4018 	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4019 	 * e.g. by having an architecture use the bit in their uapi.
4020 	 */
4021 	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4022 
4023 	/*
4024 	 * Clear unknown flag bits in order to allow userspace to detect missing
4025 	 * support for flag bits and to allow the kernel to use non-uapi bits
4026 	 * internally.
4027 	 */
4028 	if (act)
4029 		act->sa.sa_flags &= UAPI_SA_FLAGS;
4030 	if (oact)
4031 		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4032 
4033 	sigaction_compat_abi(act, oact);
4034 
4035 	if (act) {
4036 		sigdelsetmask(&act->sa.sa_mask,
4037 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4038 		*k = *act;
4039 		/*
4040 		 * POSIX 3.3.1.3:
4041 		 *  "Setting a signal action to SIG_IGN for a signal that is
4042 		 *   pending shall cause the pending signal to be discarded,
4043 		 *   whether or not it is blocked."
4044 		 *
4045 		 *  "Setting a signal action to SIG_DFL for a signal that is
4046 		 *   pending and whose default action is to ignore the signal
4047 		 *   (for example, SIGCHLD), shall cause the pending signal to
4048 		 *   be discarded, whether or not it is blocked"
4049 		 */
4050 		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4051 			sigemptyset(&mask);
4052 			sigaddset(&mask, sig);
4053 			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4054 			for_each_thread(p, t)
4055 				flush_sigqueue_mask(&mask, &t->pending);
4056 		}
4057 	}
4058 
4059 	spin_unlock_irq(&p->sighand->siglock);
4060 	return 0;
4061 }
4062 
4063 static int
4064 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4065 		size_t min_ss_size)
4066 {
4067 	struct task_struct *t = current;
4068 
4069 	if (oss) {
4070 		memset(oss, 0, sizeof(stack_t));
4071 		oss->ss_sp = (void __user *) t->sas_ss_sp;
4072 		oss->ss_size = t->sas_ss_size;
4073 		oss->ss_flags = sas_ss_flags(sp) |
4074 			(current->sas_ss_flags & SS_FLAG_BITS);
4075 	}
4076 
4077 	if (ss) {
4078 		void __user *ss_sp = ss->ss_sp;
4079 		size_t ss_size = ss->ss_size;
4080 		unsigned ss_flags = ss->ss_flags;
4081 		int ss_mode;
4082 
4083 		if (unlikely(on_sig_stack(sp)))
4084 			return -EPERM;
4085 
4086 		ss_mode = ss_flags & ~SS_FLAG_BITS;
4087 		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4088 				ss_mode != 0))
4089 			return -EINVAL;
4090 
4091 		if (ss_mode == SS_DISABLE) {
4092 			ss_size = 0;
4093 			ss_sp = NULL;
4094 		} else {
4095 			if (unlikely(ss_size < min_ss_size))
4096 				return -ENOMEM;
4097 		}
4098 
4099 		t->sas_ss_sp = (unsigned long) ss_sp;
4100 		t->sas_ss_size = ss_size;
4101 		t->sas_ss_flags = ss_flags;
4102 	}
4103 	return 0;
4104 }
4105 
4106 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4107 {
4108 	stack_t new, old;
4109 	int err;
4110 	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4111 		return -EFAULT;
4112 	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4113 			      current_user_stack_pointer(),
4114 			      MINSIGSTKSZ);
4115 	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4116 		err = -EFAULT;
4117 	return err;
4118 }
4119 
4120 int restore_altstack(const stack_t __user *uss)
4121 {
4122 	stack_t new;
4123 	if (copy_from_user(&new, uss, sizeof(stack_t)))
4124 		return -EFAULT;
4125 	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4126 			     MINSIGSTKSZ);
4127 	/* squash all but EFAULT for now */
4128 	return 0;
4129 }
4130 
4131 int __save_altstack(stack_t __user *uss, unsigned long sp)
4132 {
4133 	struct task_struct *t = current;
4134 	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4135 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4136 		__put_user(t->sas_ss_size, &uss->ss_size);
4137 	if (err)
4138 		return err;
4139 	if (t->sas_ss_flags & SS_AUTODISARM)
4140 		sas_ss_reset(t);
4141 	return 0;
4142 }
4143 
4144 #ifdef CONFIG_COMPAT
4145 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4146 				 compat_stack_t __user *uoss_ptr)
4147 {
4148 	stack_t uss, uoss;
4149 	int ret;
4150 
4151 	if (uss_ptr) {
4152 		compat_stack_t uss32;
4153 		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4154 			return -EFAULT;
4155 		uss.ss_sp = compat_ptr(uss32.ss_sp);
4156 		uss.ss_flags = uss32.ss_flags;
4157 		uss.ss_size = uss32.ss_size;
4158 	}
4159 	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4160 			     compat_user_stack_pointer(),
4161 			     COMPAT_MINSIGSTKSZ);
4162 	if (ret >= 0 && uoss_ptr)  {
4163 		compat_stack_t old;
4164 		memset(&old, 0, sizeof(old));
4165 		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4166 		old.ss_flags = uoss.ss_flags;
4167 		old.ss_size = uoss.ss_size;
4168 		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4169 			ret = -EFAULT;
4170 	}
4171 	return ret;
4172 }
4173 
4174 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4175 			const compat_stack_t __user *, uss_ptr,
4176 			compat_stack_t __user *, uoss_ptr)
4177 {
4178 	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4179 }
4180 
4181 int compat_restore_altstack(const compat_stack_t __user *uss)
4182 {
4183 	int err = do_compat_sigaltstack(uss, NULL);
4184 	/* squash all but -EFAULT for now */
4185 	return err == -EFAULT ? err : 0;
4186 }
4187 
4188 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4189 {
4190 	int err;
4191 	struct task_struct *t = current;
4192 	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4193 			 &uss->ss_sp) |
4194 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4195 		__put_user(t->sas_ss_size, &uss->ss_size);
4196 	if (err)
4197 		return err;
4198 	if (t->sas_ss_flags & SS_AUTODISARM)
4199 		sas_ss_reset(t);
4200 	return 0;
4201 }
4202 #endif
4203 
4204 #ifdef __ARCH_WANT_SYS_SIGPENDING
4205 
4206 /**
4207  *  sys_sigpending - examine pending signals
4208  *  @uset: where mask of pending signal is returned
4209  */
4210 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4211 {
4212 	sigset_t set;
4213 
4214 	if (sizeof(old_sigset_t) > sizeof(*uset))
4215 		return -EINVAL;
4216 
4217 	do_sigpending(&set);
4218 
4219 	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4220 		return -EFAULT;
4221 
4222 	return 0;
4223 }
4224 
4225 #ifdef CONFIG_COMPAT
4226 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4227 {
4228 	sigset_t set;
4229 
4230 	do_sigpending(&set);
4231 
4232 	return put_user(set.sig[0], set32);
4233 }
4234 #endif
4235 
4236 #endif
4237 
4238 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4239 /**
4240  *  sys_sigprocmask - examine and change blocked signals
4241  *  @how: whether to add, remove, or set signals
4242  *  @nset: signals to add or remove (if non-null)
4243  *  @oset: previous value of signal mask if non-null
4244  *
4245  * Some platforms have their own version with special arguments;
4246  * others support only sys_rt_sigprocmask.
4247  */
4248 
4249 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4250 		old_sigset_t __user *, oset)
4251 {
4252 	old_sigset_t old_set, new_set;
4253 	sigset_t new_blocked;
4254 
4255 	old_set = current->blocked.sig[0];
4256 
4257 	if (nset) {
4258 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4259 			return -EFAULT;
4260 
4261 		new_blocked = current->blocked;
4262 
4263 		switch (how) {
4264 		case SIG_BLOCK:
4265 			sigaddsetmask(&new_blocked, new_set);
4266 			break;
4267 		case SIG_UNBLOCK:
4268 			sigdelsetmask(&new_blocked, new_set);
4269 			break;
4270 		case SIG_SETMASK:
4271 			new_blocked.sig[0] = new_set;
4272 			break;
4273 		default:
4274 			return -EINVAL;
4275 		}
4276 
4277 		set_current_blocked(&new_blocked);
4278 	}
4279 
4280 	if (oset) {
4281 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4282 			return -EFAULT;
4283 	}
4284 
4285 	return 0;
4286 }
4287 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4288 
4289 #ifndef CONFIG_ODD_RT_SIGACTION
4290 /**
4291  *  sys_rt_sigaction - alter an action taken by a process
4292  *  @sig: signal to be sent
4293  *  @act: new sigaction
4294  *  @oact: used to save the previous sigaction
4295  *  @sigsetsize: size of sigset_t type
4296  */
4297 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4298 		const struct sigaction __user *, act,
4299 		struct sigaction __user *, oact,
4300 		size_t, sigsetsize)
4301 {
4302 	struct k_sigaction new_sa, old_sa;
4303 	int ret;
4304 
4305 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4306 	if (sigsetsize != sizeof(sigset_t))
4307 		return -EINVAL;
4308 
4309 	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4310 		return -EFAULT;
4311 
4312 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4313 	if (ret)
4314 		return ret;
4315 
4316 	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4317 		return -EFAULT;
4318 
4319 	return 0;
4320 }
4321 #ifdef CONFIG_COMPAT
4322 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4323 		const struct compat_sigaction __user *, act,
4324 		struct compat_sigaction __user *, oact,
4325 		compat_size_t, sigsetsize)
4326 {
4327 	struct k_sigaction new_ka, old_ka;
4328 #ifdef __ARCH_HAS_SA_RESTORER
4329 	compat_uptr_t restorer;
4330 #endif
4331 	int ret;
4332 
4333 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4334 	if (sigsetsize != sizeof(compat_sigset_t))
4335 		return -EINVAL;
4336 
4337 	if (act) {
4338 		compat_uptr_t handler;
4339 		ret = get_user(handler, &act->sa_handler);
4340 		new_ka.sa.sa_handler = compat_ptr(handler);
4341 #ifdef __ARCH_HAS_SA_RESTORER
4342 		ret |= get_user(restorer, &act->sa_restorer);
4343 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4344 #endif
4345 		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4346 		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4347 		if (ret)
4348 			return -EFAULT;
4349 	}
4350 
4351 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4352 	if (!ret && oact) {
4353 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4354 			       &oact->sa_handler);
4355 		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4356 					 sizeof(oact->sa_mask));
4357 		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4358 #ifdef __ARCH_HAS_SA_RESTORER
4359 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4360 				&oact->sa_restorer);
4361 #endif
4362 	}
4363 	return ret;
4364 }
4365 #endif
4366 #endif /* !CONFIG_ODD_RT_SIGACTION */
4367 
4368 #ifdef CONFIG_OLD_SIGACTION
4369 SYSCALL_DEFINE3(sigaction, int, sig,
4370 		const struct old_sigaction __user *, act,
4371 	        struct old_sigaction __user *, oact)
4372 {
4373 	struct k_sigaction new_ka, old_ka;
4374 	int ret;
4375 
4376 	if (act) {
4377 		old_sigset_t mask;
4378 		if (!access_ok(act, sizeof(*act)) ||
4379 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4380 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4381 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4382 		    __get_user(mask, &act->sa_mask))
4383 			return -EFAULT;
4384 #ifdef __ARCH_HAS_KA_RESTORER
4385 		new_ka.ka_restorer = NULL;
4386 #endif
4387 		siginitset(&new_ka.sa.sa_mask, mask);
4388 	}
4389 
4390 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4391 
4392 	if (!ret && oact) {
4393 		if (!access_ok(oact, sizeof(*oact)) ||
4394 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4395 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4396 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4397 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4398 			return -EFAULT;
4399 	}
4400 
4401 	return ret;
4402 }
4403 #endif
4404 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4405 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4406 		const struct compat_old_sigaction __user *, act,
4407 	        struct compat_old_sigaction __user *, oact)
4408 {
4409 	struct k_sigaction new_ka, old_ka;
4410 	int ret;
4411 	compat_old_sigset_t mask;
4412 	compat_uptr_t handler, restorer;
4413 
4414 	if (act) {
4415 		if (!access_ok(act, sizeof(*act)) ||
4416 		    __get_user(handler, &act->sa_handler) ||
4417 		    __get_user(restorer, &act->sa_restorer) ||
4418 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4419 		    __get_user(mask, &act->sa_mask))
4420 			return -EFAULT;
4421 
4422 #ifdef __ARCH_HAS_KA_RESTORER
4423 		new_ka.ka_restorer = NULL;
4424 #endif
4425 		new_ka.sa.sa_handler = compat_ptr(handler);
4426 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4427 		siginitset(&new_ka.sa.sa_mask, mask);
4428 	}
4429 
4430 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4431 
4432 	if (!ret && oact) {
4433 		if (!access_ok(oact, sizeof(*oact)) ||
4434 		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4435 			       &oact->sa_handler) ||
4436 		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4437 			       &oact->sa_restorer) ||
4438 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4439 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4440 			return -EFAULT;
4441 	}
4442 	return ret;
4443 }
4444 #endif
4445 
4446 #ifdef CONFIG_SGETMASK_SYSCALL
4447 
4448 /*
4449  * For backwards compatibility.  Functionality superseded by sigprocmask.
4450  */
4451 SYSCALL_DEFINE0(sgetmask)
4452 {
4453 	/* SMP safe */
4454 	return current->blocked.sig[0];
4455 }
4456 
4457 SYSCALL_DEFINE1(ssetmask, int, newmask)
4458 {
4459 	int old = current->blocked.sig[0];
4460 	sigset_t newset;
4461 
4462 	siginitset(&newset, newmask);
4463 	set_current_blocked(&newset);
4464 
4465 	return old;
4466 }
4467 #endif /* CONFIG_SGETMASK_SYSCALL */
4468 
4469 #ifdef __ARCH_WANT_SYS_SIGNAL
4470 /*
4471  * For backwards compatibility.  Functionality superseded by sigaction.
4472  */
4473 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4474 {
4475 	struct k_sigaction new_sa, old_sa;
4476 	int ret;
4477 
4478 	new_sa.sa.sa_handler = handler;
4479 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4480 	sigemptyset(&new_sa.sa.sa_mask);
4481 
4482 	ret = do_sigaction(sig, &new_sa, &old_sa);
4483 
4484 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4485 }
4486 #endif /* __ARCH_WANT_SYS_SIGNAL */
4487 
4488 #ifdef __ARCH_WANT_SYS_PAUSE
4489 
4490 SYSCALL_DEFINE0(pause)
4491 {
4492 	while (!signal_pending(current)) {
4493 		__set_current_state(TASK_INTERRUPTIBLE);
4494 		schedule();
4495 	}
4496 	return -ERESTARTNOHAND;
4497 }
4498 
4499 #endif
4500 
4501 static int sigsuspend(sigset_t *set)
4502 {
4503 	current->saved_sigmask = current->blocked;
4504 	set_current_blocked(set);
4505 
4506 	while (!signal_pending(current)) {
4507 		__set_current_state(TASK_INTERRUPTIBLE);
4508 		schedule();
4509 	}
4510 	set_restore_sigmask();
4511 	return -ERESTARTNOHAND;
4512 }
4513 
4514 /**
4515  *  sys_rt_sigsuspend - replace the signal mask for a value with the
4516  *	@unewset value until a signal is received
4517  *  @unewset: new signal mask value
4518  *  @sigsetsize: size of sigset_t type
4519  */
4520 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4521 {
4522 	sigset_t newset;
4523 
4524 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4525 	if (sigsetsize != sizeof(sigset_t))
4526 		return -EINVAL;
4527 
4528 	if (copy_from_user(&newset, unewset, sizeof(newset)))
4529 		return -EFAULT;
4530 	return sigsuspend(&newset);
4531 }
4532 
4533 #ifdef CONFIG_COMPAT
4534 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4535 {
4536 	sigset_t newset;
4537 
4538 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4539 	if (sigsetsize != sizeof(sigset_t))
4540 		return -EINVAL;
4541 
4542 	if (get_compat_sigset(&newset, unewset))
4543 		return -EFAULT;
4544 	return sigsuspend(&newset);
4545 }
4546 #endif
4547 
4548 #ifdef CONFIG_OLD_SIGSUSPEND
4549 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4550 {
4551 	sigset_t blocked;
4552 	siginitset(&blocked, mask);
4553 	return sigsuspend(&blocked);
4554 }
4555 #endif
4556 #ifdef CONFIG_OLD_SIGSUSPEND3
4557 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4558 {
4559 	sigset_t blocked;
4560 	siginitset(&blocked, mask);
4561 	return sigsuspend(&blocked);
4562 }
4563 #endif
4564 
4565 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4566 {
4567 	return NULL;
4568 }
4569 
4570 static inline void siginfo_buildtime_checks(void)
4571 {
4572 	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4573 
4574 	/* Verify the offsets in the two siginfos match */
4575 #define CHECK_OFFSET(field) \
4576 	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4577 
4578 	/* kill */
4579 	CHECK_OFFSET(si_pid);
4580 	CHECK_OFFSET(si_uid);
4581 
4582 	/* timer */
4583 	CHECK_OFFSET(si_tid);
4584 	CHECK_OFFSET(si_overrun);
4585 	CHECK_OFFSET(si_value);
4586 
4587 	/* rt */
4588 	CHECK_OFFSET(si_pid);
4589 	CHECK_OFFSET(si_uid);
4590 	CHECK_OFFSET(si_value);
4591 
4592 	/* sigchld */
4593 	CHECK_OFFSET(si_pid);
4594 	CHECK_OFFSET(si_uid);
4595 	CHECK_OFFSET(si_status);
4596 	CHECK_OFFSET(si_utime);
4597 	CHECK_OFFSET(si_stime);
4598 
4599 	/* sigfault */
4600 	CHECK_OFFSET(si_addr);
4601 	CHECK_OFFSET(si_addr_lsb);
4602 	CHECK_OFFSET(si_lower);
4603 	CHECK_OFFSET(si_upper);
4604 	CHECK_OFFSET(si_pkey);
4605 
4606 	/* sigpoll */
4607 	CHECK_OFFSET(si_band);
4608 	CHECK_OFFSET(si_fd);
4609 
4610 	/* sigsys */
4611 	CHECK_OFFSET(si_call_addr);
4612 	CHECK_OFFSET(si_syscall);
4613 	CHECK_OFFSET(si_arch);
4614 #undef CHECK_OFFSET
4615 
4616 	/* usb asyncio */
4617 	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4618 		     offsetof(struct siginfo, si_addr));
4619 	if (sizeof(int) == sizeof(void __user *)) {
4620 		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4621 			     sizeof(void __user *));
4622 	} else {
4623 		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4624 			      sizeof_field(struct siginfo, si_uid)) !=
4625 			     sizeof(void __user *));
4626 		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4627 			     offsetof(struct siginfo, si_uid));
4628 	}
4629 #ifdef CONFIG_COMPAT
4630 	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4631 		     offsetof(struct compat_siginfo, si_addr));
4632 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4633 		     sizeof(compat_uptr_t));
4634 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4635 		     sizeof_field(struct siginfo, si_pid));
4636 #endif
4637 }
4638 
4639 void __init signals_init(void)
4640 {
4641 	siginfo_buildtime_checks();
4642 
4643 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4644 }
4645 
4646 #ifdef CONFIG_KGDB_KDB
4647 #include <linux/kdb.h>
4648 /*
4649  * kdb_send_sig - Allows kdb to send signals without exposing
4650  * signal internals.  This function checks if the required locks are
4651  * available before calling the main signal code, to avoid kdb
4652  * deadlocks.
4653  */
4654 void kdb_send_sig(struct task_struct *t, int sig)
4655 {
4656 	static struct task_struct *kdb_prev_t;
4657 	int new_t, ret;
4658 	if (!spin_trylock(&t->sighand->siglock)) {
4659 		kdb_printf("Can't do kill command now.\n"
4660 			   "The sigmask lock is held somewhere else in "
4661 			   "kernel, try again later\n");
4662 		return;
4663 	}
4664 	new_t = kdb_prev_t != t;
4665 	kdb_prev_t = t;
4666 	if (t->state != TASK_RUNNING && new_t) {
4667 		spin_unlock(&t->sighand->siglock);
4668 		kdb_printf("Process is not RUNNING, sending a signal from "
4669 			   "kdb risks deadlock\n"
4670 			   "on the run queue locks. "
4671 			   "The signal has _not_ been sent.\n"
4672 			   "Reissue the kill command if you want to risk "
4673 			   "the deadlock.\n");
4674 		return;
4675 	}
4676 	ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4677 	spin_unlock(&t->sighand->siglock);
4678 	if (ret)
4679 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
4680 			   sig, t->pid);
4681 	else
4682 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4683 }
4684 #endif	/* CONFIG_KGDB_KDB */
4685