xref: /linux/kernel/signal.c (revision b9f0bfd16d8b390b35dbec67c3ed74e74a0ade24)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   *  linux/kernel/signal.c
4   *
5   *  Copyright (C) 1991, 1992  Linus Torvalds
6   *
7   *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
8   *
9   *  2003-06-02  Jim Houston - Concurrent Computer Corp.
10   *		Changes to use preallocated sigqueue structures
11   *		to allow signals to be sent reliably.
12   */
13  
14  #include <linux/slab.h>
15  #include <linux/export.h>
16  #include <linux/init.h>
17  #include <linux/sched/mm.h>
18  #include <linux/sched/user.h>
19  #include <linux/sched/debug.h>
20  #include <linux/sched/task.h>
21  #include <linux/sched/task_stack.h>
22  #include <linux/sched/cputime.h>
23  #include <linux/file.h>
24  #include <linux/fs.h>
25  #include <linux/proc_fs.h>
26  #include <linux/tty.h>
27  #include <linux/binfmts.h>
28  #include <linux/coredump.h>
29  #include <linux/security.h>
30  #include <linux/syscalls.h>
31  #include <linux/ptrace.h>
32  #include <linux/signal.h>
33  #include <linux/signalfd.h>
34  #include <linux/ratelimit.h>
35  #include <linux/tracehook.h>
36  #include <linux/capability.h>
37  #include <linux/freezer.h>
38  #include <linux/pid_namespace.h>
39  #include <linux/nsproxy.h>
40  #include <linux/user_namespace.h>
41  #include <linux/uprobes.h>
42  #include <linux/compat.h>
43  #include <linux/cn_proc.h>
44  #include <linux/compiler.h>
45  #include <linux/posix-timers.h>
46  #include <linux/cgroup.h>
47  #include <linux/audit.h>
48  
49  #define CREATE_TRACE_POINTS
50  #include <trace/events/signal.h>
51  
52  #include <asm/param.h>
53  #include <linux/uaccess.h>
54  #include <asm/unistd.h>
55  #include <asm/siginfo.h>
56  #include <asm/cacheflush.h>
57  #include <asm/syscall.h>	/* for syscall_get_* */
58  
59  /*
60   * SLAB caches for signal bits.
61   */
62  
63  static struct kmem_cache *sigqueue_cachep;
64  
65  int print_fatal_signals __read_mostly;
66  
67  static void __user *sig_handler(struct task_struct *t, int sig)
68  {
69  	return t->sighand->action[sig - 1].sa.sa_handler;
70  }
71  
72  static inline bool sig_handler_ignored(void __user *handler, int sig)
73  {
74  	/* Is it explicitly or implicitly ignored? */
75  	return handler == SIG_IGN ||
76  	       (handler == SIG_DFL && sig_kernel_ignore(sig));
77  }
78  
79  static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
80  {
81  	void __user *handler;
82  
83  	handler = sig_handler(t, sig);
84  
85  	/* SIGKILL and SIGSTOP may not be sent to the global init */
86  	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
87  		return true;
88  
89  	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90  	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91  		return true;
92  
93  	/* Only allow kernel generated signals to this kthread */
94  	if (unlikely((t->flags & PF_KTHREAD) &&
95  		     (handler == SIG_KTHREAD_KERNEL) && !force))
96  		return true;
97  
98  	return sig_handler_ignored(handler, sig);
99  }
100  
101  static bool sig_ignored(struct task_struct *t, int sig, bool force)
102  {
103  	/*
104  	 * Blocked signals are never ignored, since the
105  	 * signal handler may change by the time it is
106  	 * unblocked.
107  	 */
108  	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
109  		return false;
110  
111  	/*
112  	 * Tracers may want to know about even ignored signal unless it
113  	 * is SIGKILL which can't be reported anyway but can be ignored
114  	 * by SIGNAL_UNKILLABLE task.
115  	 */
116  	if (t->ptrace && sig != SIGKILL)
117  		return false;
118  
119  	return sig_task_ignored(t, sig, force);
120  }
121  
122  /*
123   * Re-calculate pending state from the set of locally pending
124   * signals, globally pending signals, and blocked signals.
125   */
126  static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
127  {
128  	unsigned long ready;
129  	long i;
130  
131  	switch (_NSIG_WORDS) {
132  	default:
133  		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134  			ready |= signal->sig[i] &~ blocked->sig[i];
135  		break;
136  
137  	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
138  		ready |= signal->sig[2] &~ blocked->sig[2];
139  		ready |= signal->sig[1] &~ blocked->sig[1];
140  		ready |= signal->sig[0] &~ blocked->sig[0];
141  		break;
142  
143  	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
144  		ready |= signal->sig[0] &~ blocked->sig[0];
145  		break;
146  
147  	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
148  	}
149  	return ready !=	0;
150  }
151  
152  #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153  
154  static bool recalc_sigpending_tsk(struct task_struct *t)
155  {
156  	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157  	    PENDING(&t->pending, &t->blocked) ||
158  	    PENDING(&t->signal->shared_pending, &t->blocked) ||
159  	    cgroup_task_frozen(t)) {
160  		set_tsk_thread_flag(t, TIF_SIGPENDING);
161  		return true;
162  	}
163  
164  	/*
165  	 * We must never clear the flag in another thread, or in current
166  	 * when it's possible the current syscall is returning -ERESTART*.
167  	 * So we don't clear it here, and only callers who know they should do.
168  	 */
169  	return false;
170  }
171  
172  /*
173   * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174   * This is superfluous when called on current, the wakeup is a harmless no-op.
175   */
176  void recalc_sigpending_and_wake(struct task_struct *t)
177  {
178  	if (recalc_sigpending_tsk(t))
179  		signal_wake_up(t, 0);
180  }
181  
182  void recalc_sigpending(void)
183  {
184  	if (!recalc_sigpending_tsk(current) && !freezing(current))
185  		clear_thread_flag(TIF_SIGPENDING);
186  
187  }
188  EXPORT_SYMBOL(recalc_sigpending);
189  
190  void calculate_sigpending(void)
191  {
192  	/* Have any signals or users of TIF_SIGPENDING been delayed
193  	 * until after fork?
194  	 */
195  	spin_lock_irq(&current->sighand->siglock);
196  	set_tsk_thread_flag(current, TIF_SIGPENDING);
197  	recalc_sigpending();
198  	spin_unlock_irq(&current->sighand->siglock);
199  }
200  
201  /* Given the mask, find the first available signal that should be serviced. */
202  
203  #define SYNCHRONOUS_MASK \
204  	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
205  	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
206  
207  int next_signal(struct sigpending *pending, sigset_t *mask)
208  {
209  	unsigned long i, *s, *m, x;
210  	int sig = 0;
211  
212  	s = pending->signal.sig;
213  	m = mask->sig;
214  
215  	/*
216  	 * Handle the first word specially: it contains the
217  	 * synchronous signals that need to be dequeued first.
218  	 */
219  	x = *s &~ *m;
220  	if (x) {
221  		if (x & SYNCHRONOUS_MASK)
222  			x &= SYNCHRONOUS_MASK;
223  		sig = ffz(~x) + 1;
224  		return sig;
225  	}
226  
227  	switch (_NSIG_WORDS) {
228  	default:
229  		for (i = 1; i < _NSIG_WORDS; ++i) {
230  			x = *++s &~ *++m;
231  			if (!x)
232  				continue;
233  			sig = ffz(~x) + i*_NSIG_BPW + 1;
234  			break;
235  		}
236  		break;
237  
238  	case 2:
239  		x = s[1] &~ m[1];
240  		if (!x)
241  			break;
242  		sig = ffz(~x) + _NSIG_BPW + 1;
243  		break;
244  
245  	case 1:
246  		/* Nothing to do */
247  		break;
248  	}
249  
250  	return sig;
251  }
252  
253  static inline void print_dropped_signal(int sig)
254  {
255  	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
256  
257  	if (!print_fatal_signals)
258  		return;
259  
260  	if (!__ratelimit(&ratelimit_state))
261  		return;
262  
263  	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
264  				current->comm, current->pid, sig);
265  }
266  
267  /**
268   * task_set_jobctl_pending - set jobctl pending bits
269   * @task: target task
270   * @mask: pending bits to set
271   *
272   * Clear @mask from @task->jobctl.  @mask must be subset of
273   * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
274   * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
275   * cleared.  If @task is already being killed or exiting, this function
276   * becomes noop.
277   *
278   * CONTEXT:
279   * Must be called with @task->sighand->siglock held.
280   *
281   * RETURNS:
282   * %true if @mask is set, %false if made noop because @task was dying.
283   */
284  bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
285  {
286  	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
287  			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
288  	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
289  
290  	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
291  		return false;
292  
293  	if (mask & JOBCTL_STOP_SIGMASK)
294  		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
295  
296  	task->jobctl |= mask;
297  	return true;
298  }
299  
300  /**
301   * task_clear_jobctl_trapping - clear jobctl trapping bit
302   * @task: target task
303   *
304   * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
305   * Clear it and wake up the ptracer.  Note that we don't need any further
306   * locking.  @task->siglock guarantees that @task->parent points to the
307   * ptracer.
308   *
309   * CONTEXT:
310   * Must be called with @task->sighand->siglock held.
311   */
312  void task_clear_jobctl_trapping(struct task_struct *task)
313  {
314  	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
315  		task->jobctl &= ~JOBCTL_TRAPPING;
316  		smp_mb();	/* advised by wake_up_bit() */
317  		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
318  	}
319  }
320  
321  /**
322   * task_clear_jobctl_pending - clear jobctl pending bits
323   * @task: target task
324   * @mask: pending bits to clear
325   *
326   * Clear @mask from @task->jobctl.  @mask must be subset of
327   * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
328   * STOP bits are cleared together.
329   *
330   * If clearing of @mask leaves no stop or trap pending, this function calls
331   * task_clear_jobctl_trapping().
332   *
333   * CONTEXT:
334   * Must be called with @task->sighand->siglock held.
335   */
336  void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
337  {
338  	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
339  
340  	if (mask & JOBCTL_STOP_PENDING)
341  		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
342  
343  	task->jobctl &= ~mask;
344  
345  	if (!(task->jobctl & JOBCTL_PENDING_MASK))
346  		task_clear_jobctl_trapping(task);
347  }
348  
349  /**
350   * task_participate_group_stop - participate in a group stop
351   * @task: task participating in a group stop
352   *
353   * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
354   * Group stop states are cleared and the group stop count is consumed if
355   * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
356   * stop, the appropriate `SIGNAL_*` flags are set.
357   *
358   * CONTEXT:
359   * Must be called with @task->sighand->siglock held.
360   *
361   * RETURNS:
362   * %true if group stop completion should be notified to the parent, %false
363   * otherwise.
364   */
365  static bool task_participate_group_stop(struct task_struct *task)
366  {
367  	struct signal_struct *sig = task->signal;
368  	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
369  
370  	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
371  
372  	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
373  
374  	if (!consume)
375  		return false;
376  
377  	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
378  		sig->group_stop_count--;
379  
380  	/*
381  	 * Tell the caller to notify completion iff we are entering into a
382  	 * fresh group stop.  Read comment in do_signal_stop() for details.
383  	 */
384  	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
385  		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
386  		return true;
387  	}
388  	return false;
389  }
390  
391  void task_join_group_stop(struct task_struct *task)
392  {
393  	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
394  	struct signal_struct *sig = current->signal;
395  
396  	if (sig->group_stop_count) {
397  		sig->group_stop_count++;
398  		mask |= JOBCTL_STOP_CONSUME;
399  	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
400  		return;
401  
402  	/* Have the new thread join an on-going signal group stop */
403  	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
404  }
405  
406  /*
407   * allocate a new signal queue record
408   * - this may be called without locks if and only if t == current, otherwise an
409   *   appropriate lock must be held to stop the target task from exiting
410   */
411  static struct sigqueue *
412  __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
413  		 int override_rlimit, const unsigned int sigqueue_flags)
414  {
415  	struct sigqueue *q = NULL;
416  	struct ucounts *ucounts = NULL;
417  	long sigpending;
418  
419  	/*
420  	 * Protect access to @t credentials. This can go away when all
421  	 * callers hold rcu read lock.
422  	 *
423  	 * NOTE! A pending signal will hold on to the user refcount,
424  	 * and we get/put the refcount only when the sigpending count
425  	 * changes from/to zero.
426  	 */
427  	rcu_read_lock();
428  	ucounts = task_ucounts(t);
429  	sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
430  	rcu_read_unlock();
431  	if (!sigpending)
432  		return NULL;
433  
434  	if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
435  		q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
436  	} else {
437  		print_dropped_signal(sig);
438  	}
439  
440  	if (unlikely(q == NULL)) {
441  		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
442  	} else {
443  		INIT_LIST_HEAD(&q->list);
444  		q->flags = sigqueue_flags;
445  		q->ucounts = ucounts;
446  	}
447  	return q;
448  }
449  
450  static void __sigqueue_free(struct sigqueue *q)
451  {
452  	if (q->flags & SIGQUEUE_PREALLOC)
453  		return;
454  	if (q->ucounts) {
455  		dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
456  		q->ucounts = NULL;
457  	}
458  	kmem_cache_free(sigqueue_cachep, q);
459  }
460  
461  void flush_sigqueue(struct sigpending *queue)
462  {
463  	struct sigqueue *q;
464  
465  	sigemptyset(&queue->signal);
466  	while (!list_empty(&queue->list)) {
467  		q = list_entry(queue->list.next, struct sigqueue , list);
468  		list_del_init(&q->list);
469  		__sigqueue_free(q);
470  	}
471  }
472  
473  /*
474   * Flush all pending signals for this kthread.
475   */
476  void flush_signals(struct task_struct *t)
477  {
478  	unsigned long flags;
479  
480  	spin_lock_irqsave(&t->sighand->siglock, flags);
481  	clear_tsk_thread_flag(t, TIF_SIGPENDING);
482  	flush_sigqueue(&t->pending);
483  	flush_sigqueue(&t->signal->shared_pending);
484  	spin_unlock_irqrestore(&t->sighand->siglock, flags);
485  }
486  EXPORT_SYMBOL(flush_signals);
487  
488  #ifdef CONFIG_POSIX_TIMERS
489  static void __flush_itimer_signals(struct sigpending *pending)
490  {
491  	sigset_t signal, retain;
492  	struct sigqueue *q, *n;
493  
494  	signal = pending->signal;
495  	sigemptyset(&retain);
496  
497  	list_for_each_entry_safe(q, n, &pending->list, list) {
498  		int sig = q->info.si_signo;
499  
500  		if (likely(q->info.si_code != SI_TIMER)) {
501  			sigaddset(&retain, sig);
502  		} else {
503  			sigdelset(&signal, sig);
504  			list_del_init(&q->list);
505  			__sigqueue_free(q);
506  		}
507  	}
508  
509  	sigorsets(&pending->signal, &signal, &retain);
510  }
511  
512  void flush_itimer_signals(void)
513  {
514  	struct task_struct *tsk = current;
515  	unsigned long flags;
516  
517  	spin_lock_irqsave(&tsk->sighand->siglock, flags);
518  	__flush_itimer_signals(&tsk->pending);
519  	__flush_itimer_signals(&tsk->signal->shared_pending);
520  	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
521  }
522  #endif
523  
524  void ignore_signals(struct task_struct *t)
525  {
526  	int i;
527  
528  	for (i = 0; i < _NSIG; ++i)
529  		t->sighand->action[i].sa.sa_handler = SIG_IGN;
530  
531  	flush_signals(t);
532  }
533  
534  /*
535   * Flush all handlers for a task.
536   */
537  
538  void
539  flush_signal_handlers(struct task_struct *t, int force_default)
540  {
541  	int i;
542  	struct k_sigaction *ka = &t->sighand->action[0];
543  	for (i = _NSIG ; i != 0 ; i--) {
544  		if (force_default || ka->sa.sa_handler != SIG_IGN)
545  			ka->sa.sa_handler = SIG_DFL;
546  		ka->sa.sa_flags = 0;
547  #ifdef __ARCH_HAS_SA_RESTORER
548  		ka->sa.sa_restorer = NULL;
549  #endif
550  		sigemptyset(&ka->sa.sa_mask);
551  		ka++;
552  	}
553  }
554  
555  bool unhandled_signal(struct task_struct *tsk, int sig)
556  {
557  	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
558  	if (is_global_init(tsk))
559  		return true;
560  
561  	if (handler != SIG_IGN && handler != SIG_DFL)
562  		return false;
563  
564  	/* if ptraced, let the tracer determine */
565  	return !tsk->ptrace;
566  }
567  
568  static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
569  			   bool *resched_timer)
570  {
571  	struct sigqueue *q, *first = NULL;
572  
573  	/*
574  	 * Collect the siginfo appropriate to this signal.  Check if
575  	 * there is another siginfo for the same signal.
576  	*/
577  	list_for_each_entry(q, &list->list, list) {
578  		if (q->info.si_signo == sig) {
579  			if (first)
580  				goto still_pending;
581  			first = q;
582  		}
583  	}
584  
585  	sigdelset(&list->signal, sig);
586  
587  	if (first) {
588  still_pending:
589  		list_del_init(&first->list);
590  		copy_siginfo(info, &first->info);
591  
592  		*resched_timer =
593  			(first->flags & SIGQUEUE_PREALLOC) &&
594  			(info->si_code == SI_TIMER) &&
595  			(info->si_sys_private);
596  
597  		__sigqueue_free(first);
598  	} else {
599  		/*
600  		 * Ok, it wasn't in the queue.  This must be
601  		 * a fast-pathed signal or we must have been
602  		 * out of queue space.  So zero out the info.
603  		 */
604  		clear_siginfo(info);
605  		info->si_signo = sig;
606  		info->si_errno = 0;
607  		info->si_code = SI_USER;
608  		info->si_pid = 0;
609  		info->si_uid = 0;
610  	}
611  }
612  
613  static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
614  			kernel_siginfo_t *info, bool *resched_timer)
615  {
616  	int sig = next_signal(pending, mask);
617  
618  	if (sig)
619  		collect_signal(sig, pending, info, resched_timer);
620  	return sig;
621  }
622  
623  /*
624   * Dequeue a signal and return the element to the caller, which is
625   * expected to free it.
626   *
627   * All callers have to hold the siglock.
628   */
629  int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
630  {
631  	bool resched_timer = false;
632  	int signr;
633  
634  	/* We only dequeue private signals from ourselves, we don't let
635  	 * signalfd steal them
636  	 */
637  	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
638  	if (!signr) {
639  		signr = __dequeue_signal(&tsk->signal->shared_pending,
640  					 mask, info, &resched_timer);
641  #ifdef CONFIG_POSIX_TIMERS
642  		/*
643  		 * itimer signal ?
644  		 *
645  		 * itimers are process shared and we restart periodic
646  		 * itimers in the signal delivery path to prevent DoS
647  		 * attacks in the high resolution timer case. This is
648  		 * compliant with the old way of self-restarting
649  		 * itimers, as the SIGALRM is a legacy signal and only
650  		 * queued once. Changing the restart behaviour to
651  		 * restart the timer in the signal dequeue path is
652  		 * reducing the timer noise on heavy loaded !highres
653  		 * systems too.
654  		 */
655  		if (unlikely(signr == SIGALRM)) {
656  			struct hrtimer *tmr = &tsk->signal->real_timer;
657  
658  			if (!hrtimer_is_queued(tmr) &&
659  			    tsk->signal->it_real_incr != 0) {
660  				hrtimer_forward(tmr, tmr->base->get_time(),
661  						tsk->signal->it_real_incr);
662  				hrtimer_restart(tmr);
663  			}
664  		}
665  #endif
666  	}
667  
668  	recalc_sigpending();
669  	if (!signr)
670  		return 0;
671  
672  	if (unlikely(sig_kernel_stop(signr))) {
673  		/*
674  		 * Set a marker that we have dequeued a stop signal.  Our
675  		 * caller might release the siglock and then the pending
676  		 * stop signal it is about to process is no longer in the
677  		 * pending bitmasks, but must still be cleared by a SIGCONT
678  		 * (and overruled by a SIGKILL).  So those cases clear this
679  		 * shared flag after we've set it.  Note that this flag may
680  		 * remain set after the signal we return is ignored or
681  		 * handled.  That doesn't matter because its only purpose
682  		 * is to alert stop-signal processing code when another
683  		 * processor has come along and cleared the flag.
684  		 */
685  		current->jobctl |= JOBCTL_STOP_DEQUEUED;
686  	}
687  #ifdef CONFIG_POSIX_TIMERS
688  	if (resched_timer) {
689  		/*
690  		 * Release the siglock to ensure proper locking order
691  		 * of timer locks outside of siglocks.  Note, we leave
692  		 * irqs disabled here, since the posix-timers code is
693  		 * about to disable them again anyway.
694  		 */
695  		spin_unlock(&tsk->sighand->siglock);
696  		posixtimer_rearm(info);
697  		spin_lock(&tsk->sighand->siglock);
698  
699  		/* Don't expose the si_sys_private value to userspace */
700  		info->si_sys_private = 0;
701  	}
702  #endif
703  	return signr;
704  }
705  EXPORT_SYMBOL_GPL(dequeue_signal);
706  
707  static int dequeue_synchronous_signal(kernel_siginfo_t *info)
708  {
709  	struct task_struct *tsk = current;
710  	struct sigpending *pending = &tsk->pending;
711  	struct sigqueue *q, *sync = NULL;
712  
713  	/*
714  	 * Might a synchronous signal be in the queue?
715  	 */
716  	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
717  		return 0;
718  
719  	/*
720  	 * Return the first synchronous signal in the queue.
721  	 */
722  	list_for_each_entry(q, &pending->list, list) {
723  		/* Synchronous signals have a positive si_code */
724  		if ((q->info.si_code > SI_USER) &&
725  		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
726  			sync = q;
727  			goto next;
728  		}
729  	}
730  	return 0;
731  next:
732  	/*
733  	 * Check if there is another siginfo for the same signal.
734  	 */
735  	list_for_each_entry_continue(q, &pending->list, list) {
736  		if (q->info.si_signo == sync->info.si_signo)
737  			goto still_pending;
738  	}
739  
740  	sigdelset(&pending->signal, sync->info.si_signo);
741  	recalc_sigpending();
742  still_pending:
743  	list_del_init(&sync->list);
744  	copy_siginfo(info, &sync->info);
745  	__sigqueue_free(sync);
746  	return info->si_signo;
747  }
748  
749  /*
750   * Tell a process that it has a new active signal..
751   *
752   * NOTE! we rely on the previous spin_lock to
753   * lock interrupts for us! We can only be called with
754   * "siglock" held, and the local interrupt must
755   * have been disabled when that got acquired!
756   *
757   * No need to set need_resched since signal event passing
758   * goes through ->blocked
759   */
760  void signal_wake_up_state(struct task_struct *t, unsigned int state)
761  {
762  	set_tsk_thread_flag(t, TIF_SIGPENDING);
763  	/*
764  	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
765  	 * case. We don't check t->state here because there is a race with it
766  	 * executing another processor and just now entering stopped state.
767  	 * By using wake_up_state, we ensure the process will wake up and
768  	 * handle its death signal.
769  	 */
770  	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
771  		kick_process(t);
772  }
773  
774  /*
775   * Remove signals in mask from the pending set and queue.
776   * Returns 1 if any signals were found.
777   *
778   * All callers must be holding the siglock.
779   */
780  static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
781  {
782  	struct sigqueue *q, *n;
783  	sigset_t m;
784  
785  	sigandsets(&m, mask, &s->signal);
786  	if (sigisemptyset(&m))
787  		return;
788  
789  	sigandnsets(&s->signal, &s->signal, mask);
790  	list_for_each_entry_safe(q, n, &s->list, list) {
791  		if (sigismember(mask, q->info.si_signo)) {
792  			list_del_init(&q->list);
793  			__sigqueue_free(q);
794  		}
795  	}
796  }
797  
798  static inline int is_si_special(const struct kernel_siginfo *info)
799  {
800  	return info <= SEND_SIG_PRIV;
801  }
802  
803  static inline bool si_fromuser(const struct kernel_siginfo *info)
804  {
805  	return info == SEND_SIG_NOINFO ||
806  		(!is_si_special(info) && SI_FROMUSER(info));
807  }
808  
809  /*
810   * called with RCU read lock from check_kill_permission()
811   */
812  static bool kill_ok_by_cred(struct task_struct *t)
813  {
814  	const struct cred *cred = current_cred();
815  	const struct cred *tcred = __task_cred(t);
816  
817  	return uid_eq(cred->euid, tcred->suid) ||
818  	       uid_eq(cred->euid, tcred->uid) ||
819  	       uid_eq(cred->uid, tcred->suid) ||
820  	       uid_eq(cred->uid, tcred->uid) ||
821  	       ns_capable(tcred->user_ns, CAP_KILL);
822  }
823  
824  /*
825   * Bad permissions for sending the signal
826   * - the caller must hold the RCU read lock
827   */
828  static int check_kill_permission(int sig, struct kernel_siginfo *info,
829  				 struct task_struct *t)
830  {
831  	struct pid *sid;
832  	int error;
833  
834  	if (!valid_signal(sig))
835  		return -EINVAL;
836  
837  	if (!si_fromuser(info))
838  		return 0;
839  
840  	error = audit_signal_info(sig, t); /* Let audit system see the signal */
841  	if (error)
842  		return error;
843  
844  	if (!same_thread_group(current, t) &&
845  	    !kill_ok_by_cred(t)) {
846  		switch (sig) {
847  		case SIGCONT:
848  			sid = task_session(t);
849  			/*
850  			 * We don't return the error if sid == NULL. The
851  			 * task was unhashed, the caller must notice this.
852  			 */
853  			if (!sid || sid == task_session(current))
854  				break;
855  			fallthrough;
856  		default:
857  			return -EPERM;
858  		}
859  	}
860  
861  	return security_task_kill(t, info, sig, NULL);
862  }
863  
864  /**
865   * ptrace_trap_notify - schedule trap to notify ptracer
866   * @t: tracee wanting to notify tracer
867   *
868   * This function schedules sticky ptrace trap which is cleared on the next
869   * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
870   * ptracer.
871   *
872   * If @t is running, STOP trap will be taken.  If trapped for STOP and
873   * ptracer is listening for events, tracee is woken up so that it can
874   * re-trap for the new event.  If trapped otherwise, STOP trap will be
875   * eventually taken without returning to userland after the existing traps
876   * are finished by PTRACE_CONT.
877   *
878   * CONTEXT:
879   * Must be called with @task->sighand->siglock held.
880   */
881  static void ptrace_trap_notify(struct task_struct *t)
882  {
883  	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
884  	assert_spin_locked(&t->sighand->siglock);
885  
886  	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
887  	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
888  }
889  
890  /*
891   * Handle magic process-wide effects of stop/continue signals. Unlike
892   * the signal actions, these happen immediately at signal-generation
893   * time regardless of blocking, ignoring, or handling.  This does the
894   * actual continuing for SIGCONT, but not the actual stopping for stop
895   * signals. The process stop is done as a signal action for SIG_DFL.
896   *
897   * Returns true if the signal should be actually delivered, otherwise
898   * it should be dropped.
899   */
900  static bool prepare_signal(int sig, struct task_struct *p, bool force)
901  {
902  	struct signal_struct *signal = p->signal;
903  	struct task_struct *t;
904  	sigset_t flush;
905  
906  	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
907  		if (!(signal->flags & SIGNAL_GROUP_EXIT))
908  			return sig == SIGKILL;
909  		/*
910  		 * The process is in the middle of dying, nothing to do.
911  		 */
912  	} else if (sig_kernel_stop(sig)) {
913  		/*
914  		 * This is a stop signal.  Remove SIGCONT from all queues.
915  		 */
916  		siginitset(&flush, sigmask(SIGCONT));
917  		flush_sigqueue_mask(&flush, &signal->shared_pending);
918  		for_each_thread(p, t)
919  			flush_sigqueue_mask(&flush, &t->pending);
920  	} else if (sig == SIGCONT) {
921  		unsigned int why;
922  		/*
923  		 * Remove all stop signals from all queues, wake all threads.
924  		 */
925  		siginitset(&flush, SIG_KERNEL_STOP_MASK);
926  		flush_sigqueue_mask(&flush, &signal->shared_pending);
927  		for_each_thread(p, t) {
928  			flush_sigqueue_mask(&flush, &t->pending);
929  			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
930  			if (likely(!(t->ptrace & PT_SEIZED)))
931  				wake_up_state(t, __TASK_STOPPED);
932  			else
933  				ptrace_trap_notify(t);
934  		}
935  
936  		/*
937  		 * Notify the parent with CLD_CONTINUED if we were stopped.
938  		 *
939  		 * If we were in the middle of a group stop, we pretend it
940  		 * was already finished, and then continued. Since SIGCHLD
941  		 * doesn't queue we report only CLD_STOPPED, as if the next
942  		 * CLD_CONTINUED was dropped.
943  		 */
944  		why = 0;
945  		if (signal->flags & SIGNAL_STOP_STOPPED)
946  			why |= SIGNAL_CLD_CONTINUED;
947  		else if (signal->group_stop_count)
948  			why |= SIGNAL_CLD_STOPPED;
949  
950  		if (why) {
951  			/*
952  			 * The first thread which returns from do_signal_stop()
953  			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
954  			 * notify its parent. See get_signal().
955  			 */
956  			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
957  			signal->group_stop_count = 0;
958  			signal->group_exit_code = 0;
959  		}
960  	}
961  
962  	return !sig_ignored(p, sig, force);
963  }
964  
965  /*
966   * Test if P wants to take SIG.  After we've checked all threads with this,
967   * it's equivalent to finding no threads not blocking SIG.  Any threads not
968   * blocking SIG were ruled out because they are not running and already
969   * have pending signals.  Such threads will dequeue from the shared queue
970   * as soon as they're available, so putting the signal on the shared queue
971   * will be equivalent to sending it to one such thread.
972   */
973  static inline bool wants_signal(int sig, struct task_struct *p)
974  {
975  	if (sigismember(&p->blocked, sig))
976  		return false;
977  
978  	if (p->flags & PF_EXITING)
979  		return false;
980  
981  	if (sig == SIGKILL)
982  		return true;
983  
984  	if (task_is_stopped_or_traced(p))
985  		return false;
986  
987  	return task_curr(p) || !task_sigpending(p);
988  }
989  
990  static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
991  {
992  	struct signal_struct *signal = p->signal;
993  	struct task_struct *t;
994  
995  	/*
996  	 * Now find a thread we can wake up to take the signal off the queue.
997  	 *
998  	 * If the main thread wants the signal, it gets first crack.
999  	 * Probably the least surprising to the average bear.
1000  	 */
1001  	if (wants_signal(sig, p))
1002  		t = p;
1003  	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1004  		/*
1005  		 * There is just one thread and it does not need to be woken.
1006  		 * It will dequeue unblocked signals before it runs again.
1007  		 */
1008  		return;
1009  	else {
1010  		/*
1011  		 * Otherwise try to find a suitable thread.
1012  		 */
1013  		t = signal->curr_target;
1014  		while (!wants_signal(sig, t)) {
1015  			t = next_thread(t);
1016  			if (t == signal->curr_target)
1017  				/*
1018  				 * No thread needs to be woken.
1019  				 * Any eligible threads will see
1020  				 * the signal in the queue soon.
1021  				 */
1022  				return;
1023  		}
1024  		signal->curr_target = t;
1025  	}
1026  
1027  	/*
1028  	 * Found a killable thread.  If the signal will be fatal,
1029  	 * then start taking the whole group down immediately.
1030  	 */
1031  	if (sig_fatal(p, sig) &&
1032  	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
1033  	    !sigismember(&t->real_blocked, sig) &&
1034  	    (sig == SIGKILL || !p->ptrace)) {
1035  		/*
1036  		 * This signal will be fatal to the whole group.
1037  		 */
1038  		if (!sig_kernel_coredump(sig)) {
1039  			/*
1040  			 * Start a group exit and wake everybody up.
1041  			 * This way we don't have other threads
1042  			 * running and doing things after a slower
1043  			 * thread has the fatal signal pending.
1044  			 */
1045  			signal->flags = SIGNAL_GROUP_EXIT;
1046  			signal->group_exit_code = sig;
1047  			signal->group_stop_count = 0;
1048  			t = p;
1049  			do {
1050  				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1051  				sigaddset(&t->pending.signal, SIGKILL);
1052  				signal_wake_up(t, 1);
1053  			} while_each_thread(p, t);
1054  			return;
1055  		}
1056  	}
1057  
1058  	/*
1059  	 * The signal is already in the shared-pending queue.
1060  	 * Tell the chosen thread to wake up and dequeue it.
1061  	 */
1062  	signal_wake_up(t, sig == SIGKILL);
1063  	return;
1064  }
1065  
1066  static inline bool legacy_queue(struct sigpending *signals, int sig)
1067  {
1068  	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1069  }
1070  
1071  static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1072  			enum pid_type type, bool force)
1073  {
1074  	struct sigpending *pending;
1075  	struct sigqueue *q;
1076  	int override_rlimit;
1077  	int ret = 0, result;
1078  
1079  	assert_spin_locked(&t->sighand->siglock);
1080  
1081  	result = TRACE_SIGNAL_IGNORED;
1082  	if (!prepare_signal(sig, t, force))
1083  		goto ret;
1084  
1085  	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1086  	/*
1087  	 * Short-circuit ignored signals and support queuing
1088  	 * exactly one non-rt signal, so that we can get more
1089  	 * detailed information about the cause of the signal.
1090  	 */
1091  	result = TRACE_SIGNAL_ALREADY_PENDING;
1092  	if (legacy_queue(pending, sig))
1093  		goto ret;
1094  
1095  	result = TRACE_SIGNAL_DELIVERED;
1096  	/*
1097  	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1098  	 */
1099  	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1100  		goto out_set;
1101  
1102  	/*
1103  	 * Real-time signals must be queued if sent by sigqueue, or
1104  	 * some other real-time mechanism.  It is implementation
1105  	 * defined whether kill() does so.  We attempt to do so, on
1106  	 * the principle of least surprise, but since kill is not
1107  	 * allowed to fail with EAGAIN when low on memory we just
1108  	 * make sure at least one signal gets delivered and don't
1109  	 * pass on the info struct.
1110  	 */
1111  	if (sig < SIGRTMIN)
1112  		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1113  	else
1114  		override_rlimit = 0;
1115  
1116  	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1117  
1118  	if (q) {
1119  		list_add_tail(&q->list, &pending->list);
1120  		switch ((unsigned long) info) {
1121  		case (unsigned long) SEND_SIG_NOINFO:
1122  			clear_siginfo(&q->info);
1123  			q->info.si_signo = sig;
1124  			q->info.si_errno = 0;
1125  			q->info.si_code = SI_USER;
1126  			q->info.si_pid = task_tgid_nr_ns(current,
1127  							task_active_pid_ns(t));
1128  			rcu_read_lock();
1129  			q->info.si_uid =
1130  				from_kuid_munged(task_cred_xxx(t, user_ns),
1131  						 current_uid());
1132  			rcu_read_unlock();
1133  			break;
1134  		case (unsigned long) SEND_SIG_PRIV:
1135  			clear_siginfo(&q->info);
1136  			q->info.si_signo = sig;
1137  			q->info.si_errno = 0;
1138  			q->info.si_code = SI_KERNEL;
1139  			q->info.si_pid = 0;
1140  			q->info.si_uid = 0;
1141  			break;
1142  		default:
1143  			copy_siginfo(&q->info, info);
1144  			break;
1145  		}
1146  	} else if (!is_si_special(info) &&
1147  		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1148  		/*
1149  		 * Queue overflow, abort.  We may abort if the
1150  		 * signal was rt and sent by user using something
1151  		 * other than kill().
1152  		 */
1153  		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1154  		ret = -EAGAIN;
1155  		goto ret;
1156  	} else {
1157  		/*
1158  		 * This is a silent loss of information.  We still
1159  		 * send the signal, but the *info bits are lost.
1160  		 */
1161  		result = TRACE_SIGNAL_LOSE_INFO;
1162  	}
1163  
1164  out_set:
1165  	signalfd_notify(t, sig);
1166  	sigaddset(&pending->signal, sig);
1167  
1168  	/* Let multiprocess signals appear after on-going forks */
1169  	if (type > PIDTYPE_TGID) {
1170  		struct multiprocess_signals *delayed;
1171  		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1172  			sigset_t *signal = &delayed->signal;
1173  			/* Can't queue both a stop and a continue signal */
1174  			if (sig == SIGCONT)
1175  				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1176  			else if (sig_kernel_stop(sig))
1177  				sigdelset(signal, SIGCONT);
1178  			sigaddset(signal, sig);
1179  		}
1180  	}
1181  
1182  	complete_signal(sig, t, type);
1183  ret:
1184  	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1185  	return ret;
1186  }
1187  
1188  static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1189  {
1190  	bool ret = false;
1191  	switch (siginfo_layout(info->si_signo, info->si_code)) {
1192  	case SIL_KILL:
1193  	case SIL_CHLD:
1194  	case SIL_RT:
1195  		ret = true;
1196  		break;
1197  	case SIL_TIMER:
1198  	case SIL_POLL:
1199  	case SIL_FAULT:
1200  	case SIL_FAULT_TRAPNO:
1201  	case SIL_FAULT_MCEERR:
1202  	case SIL_FAULT_BNDERR:
1203  	case SIL_FAULT_PKUERR:
1204  	case SIL_FAULT_PERF_EVENT:
1205  	case SIL_SYS:
1206  		ret = false;
1207  		break;
1208  	}
1209  	return ret;
1210  }
1211  
1212  static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1213  			enum pid_type type)
1214  {
1215  	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1216  	bool force = false;
1217  
1218  	if (info == SEND_SIG_NOINFO) {
1219  		/* Force if sent from an ancestor pid namespace */
1220  		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1221  	} else if (info == SEND_SIG_PRIV) {
1222  		/* Don't ignore kernel generated signals */
1223  		force = true;
1224  	} else if (has_si_pid_and_uid(info)) {
1225  		/* SIGKILL and SIGSTOP is special or has ids */
1226  		struct user_namespace *t_user_ns;
1227  
1228  		rcu_read_lock();
1229  		t_user_ns = task_cred_xxx(t, user_ns);
1230  		if (current_user_ns() != t_user_ns) {
1231  			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1232  			info->si_uid = from_kuid_munged(t_user_ns, uid);
1233  		}
1234  		rcu_read_unlock();
1235  
1236  		/* A kernel generated signal? */
1237  		force = (info->si_code == SI_KERNEL);
1238  
1239  		/* From an ancestor pid namespace? */
1240  		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1241  			info->si_pid = 0;
1242  			force = true;
1243  		}
1244  	}
1245  	return __send_signal(sig, info, t, type, force);
1246  }
1247  
1248  static void print_fatal_signal(int signr)
1249  {
1250  	struct pt_regs *regs = signal_pt_regs();
1251  	pr_info("potentially unexpected fatal signal %d.\n", signr);
1252  
1253  #if defined(__i386__) && !defined(__arch_um__)
1254  	pr_info("code at %08lx: ", regs->ip);
1255  	{
1256  		int i;
1257  		for (i = 0; i < 16; i++) {
1258  			unsigned char insn;
1259  
1260  			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1261  				break;
1262  			pr_cont("%02x ", insn);
1263  		}
1264  	}
1265  	pr_cont("\n");
1266  #endif
1267  	preempt_disable();
1268  	show_regs(regs);
1269  	preempt_enable();
1270  }
1271  
1272  static int __init setup_print_fatal_signals(char *str)
1273  {
1274  	get_option (&str, &print_fatal_signals);
1275  
1276  	return 1;
1277  }
1278  
1279  __setup("print-fatal-signals=", setup_print_fatal_signals);
1280  
1281  int
1282  __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1283  {
1284  	return send_signal(sig, info, p, PIDTYPE_TGID);
1285  }
1286  
1287  int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1288  			enum pid_type type)
1289  {
1290  	unsigned long flags;
1291  	int ret = -ESRCH;
1292  
1293  	if (lock_task_sighand(p, &flags)) {
1294  		ret = send_signal(sig, info, p, type);
1295  		unlock_task_sighand(p, &flags);
1296  	}
1297  
1298  	return ret;
1299  }
1300  
1301  /*
1302   * Force a signal that the process can't ignore: if necessary
1303   * we unblock the signal and change any SIG_IGN to SIG_DFL.
1304   *
1305   * Note: If we unblock the signal, we always reset it to SIG_DFL,
1306   * since we do not want to have a signal handler that was blocked
1307   * be invoked when user space had explicitly blocked it.
1308   *
1309   * We don't want to have recursive SIGSEGV's etc, for example,
1310   * that is why we also clear SIGNAL_UNKILLABLE.
1311   */
1312  static int
1313  force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool sigdfl)
1314  {
1315  	unsigned long int flags;
1316  	int ret, blocked, ignored;
1317  	struct k_sigaction *action;
1318  	int sig = info->si_signo;
1319  
1320  	spin_lock_irqsave(&t->sighand->siglock, flags);
1321  	action = &t->sighand->action[sig-1];
1322  	ignored = action->sa.sa_handler == SIG_IGN;
1323  	blocked = sigismember(&t->blocked, sig);
1324  	if (blocked || ignored || sigdfl) {
1325  		action->sa.sa_handler = SIG_DFL;
1326  		action->sa.sa_flags |= SA_IMMUTABLE;
1327  		if (blocked) {
1328  			sigdelset(&t->blocked, sig);
1329  			recalc_sigpending_and_wake(t);
1330  		}
1331  	}
1332  	/*
1333  	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1334  	 * debugging to leave init killable.
1335  	 */
1336  	if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1337  		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1338  	ret = send_signal(sig, info, t, PIDTYPE_PID);
1339  	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1340  
1341  	return ret;
1342  }
1343  
1344  int force_sig_info(struct kernel_siginfo *info)
1345  {
1346  	return force_sig_info_to_task(info, current, false);
1347  }
1348  
1349  /*
1350   * Nuke all other threads in the group.
1351   */
1352  int zap_other_threads(struct task_struct *p)
1353  {
1354  	struct task_struct *t = p;
1355  	int count = 0;
1356  
1357  	p->signal->group_stop_count = 0;
1358  
1359  	while_each_thread(p, t) {
1360  		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1361  		count++;
1362  
1363  		/* Don't bother with already dead threads */
1364  		if (t->exit_state)
1365  			continue;
1366  		sigaddset(&t->pending.signal, SIGKILL);
1367  		signal_wake_up(t, 1);
1368  	}
1369  
1370  	return count;
1371  }
1372  
1373  struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1374  					   unsigned long *flags)
1375  {
1376  	struct sighand_struct *sighand;
1377  
1378  	rcu_read_lock();
1379  	for (;;) {
1380  		sighand = rcu_dereference(tsk->sighand);
1381  		if (unlikely(sighand == NULL))
1382  			break;
1383  
1384  		/*
1385  		 * This sighand can be already freed and even reused, but
1386  		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1387  		 * initializes ->siglock: this slab can't go away, it has
1388  		 * the same object type, ->siglock can't be reinitialized.
1389  		 *
1390  		 * We need to ensure that tsk->sighand is still the same
1391  		 * after we take the lock, we can race with de_thread() or
1392  		 * __exit_signal(). In the latter case the next iteration
1393  		 * must see ->sighand == NULL.
1394  		 */
1395  		spin_lock_irqsave(&sighand->siglock, *flags);
1396  		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1397  			break;
1398  		spin_unlock_irqrestore(&sighand->siglock, *flags);
1399  	}
1400  	rcu_read_unlock();
1401  
1402  	return sighand;
1403  }
1404  
1405  #ifdef CONFIG_LOCKDEP
1406  void lockdep_assert_task_sighand_held(struct task_struct *task)
1407  {
1408  	struct sighand_struct *sighand;
1409  
1410  	rcu_read_lock();
1411  	sighand = rcu_dereference(task->sighand);
1412  	if (sighand)
1413  		lockdep_assert_held(&sighand->siglock);
1414  	else
1415  		WARN_ON_ONCE(1);
1416  	rcu_read_unlock();
1417  }
1418  #endif
1419  
1420  /*
1421   * send signal info to all the members of a group
1422   */
1423  int group_send_sig_info(int sig, struct kernel_siginfo *info,
1424  			struct task_struct *p, enum pid_type type)
1425  {
1426  	int ret;
1427  
1428  	rcu_read_lock();
1429  	ret = check_kill_permission(sig, info, p);
1430  	rcu_read_unlock();
1431  
1432  	if (!ret && sig)
1433  		ret = do_send_sig_info(sig, info, p, type);
1434  
1435  	return ret;
1436  }
1437  
1438  /*
1439   * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1440   * control characters do (^C, ^Z etc)
1441   * - the caller must hold at least a readlock on tasklist_lock
1442   */
1443  int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1444  {
1445  	struct task_struct *p = NULL;
1446  	int retval, success;
1447  
1448  	success = 0;
1449  	retval = -ESRCH;
1450  	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1451  		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1452  		success |= !err;
1453  		retval = err;
1454  	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1455  	return success ? 0 : retval;
1456  }
1457  
1458  int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1459  {
1460  	int error = -ESRCH;
1461  	struct task_struct *p;
1462  
1463  	for (;;) {
1464  		rcu_read_lock();
1465  		p = pid_task(pid, PIDTYPE_PID);
1466  		if (p)
1467  			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1468  		rcu_read_unlock();
1469  		if (likely(!p || error != -ESRCH))
1470  			return error;
1471  
1472  		/*
1473  		 * The task was unhashed in between, try again.  If it
1474  		 * is dead, pid_task() will return NULL, if we race with
1475  		 * de_thread() it will find the new leader.
1476  		 */
1477  	}
1478  }
1479  
1480  static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1481  {
1482  	int error;
1483  	rcu_read_lock();
1484  	error = kill_pid_info(sig, info, find_vpid(pid));
1485  	rcu_read_unlock();
1486  	return error;
1487  }
1488  
1489  static inline bool kill_as_cred_perm(const struct cred *cred,
1490  				     struct task_struct *target)
1491  {
1492  	const struct cred *pcred = __task_cred(target);
1493  
1494  	return uid_eq(cred->euid, pcred->suid) ||
1495  	       uid_eq(cred->euid, pcred->uid) ||
1496  	       uid_eq(cred->uid, pcred->suid) ||
1497  	       uid_eq(cred->uid, pcred->uid);
1498  }
1499  
1500  /*
1501   * The usb asyncio usage of siginfo is wrong.  The glibc support
1502   * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1503   * AKA after the generic fields:
1504   *	kernel_pid_t	si_pid;
1505   *	kernel_uid32_t	si_uid;
1506   *	sigval_t	si_value;
1507   *
1508   * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1509   * after the generic fields is:
1510   *	void __user 	*si_addr;
1511   *
1512   * This is a practical problem when there is a 64bit big endian kernel
1513   * and a 32bit userspace.  As the 32bit address will encoded in the low
1514   * 32bits of the pointer.  Those low 32bits will be stored at higher
1515   * address than appear in a 32 bit pointer.  So userspace will not
1516   * see the address it was expecting for it's completions.
1517   *
1518   * There is nothing in the encoding that can allow
1519   * copy_siginfo_to_user32 to detect this confusion of formats, so
1520   * handle this by requiring the caller of kill_pid_usb_asyncio to
1521   * notice when this situration takes place and to store the 32bit
1522   * pointer in sival_int, instead of sival_addr of the sigval_t addr
1523   * parameter.
1524   */
1525  int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1526  			 struct pid *pid, const struct cred *cred)
1527  {
1528  	struct kernel_siginfo info;
1529  	struct task_struct *p;
1530  	unsigned long flags;
1531  	int ret = -EINVAL;
1532  
1533  	if (!valid_signal(sig))
1534  		return ret;
1535  
1536  	clear_siginfo(&info);
1537  	info.si_signo = sig;
1538  	info.si_errno = errno;
1539  	info.si_code = SI_ASYNCIO;
1540  	*((sigval_t *)&info.si_pid) = addr;
1541  
1542  	rcu_read_lock();
1543  	p = pid_task(pid, PIDTYPE_PID);
1544  	if (!p) {
1545  		ret = -ESRCH;
1546  		goto out_unlock;
1547  	}
1548  	if (!kill_as_cred_perm(cred, p)) {
1549  		ret = -EPERM;
1550  		goto out_unlock;
1551  	}
1552  	ret = security_task_kill(p, &info, sig, cred);
1553  	if (ret)
1554  		goto out_unlock;
1555  
1556  	if (sig) {
1557  		if (lock_task_sighand(p, &flags)) {
1558  			ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1559  			unlock_task_sighand(p, &flags);
1560  		} else
1561  			ret = -ESRCH;
1562  	}
1563  out_unlock:
1564  	rcu_read_unlock();
1565  	return ret;
1566  }
1567  EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1568  
1569  /*
1570   * kill_something_info() interprets pid in interesting ways just like kill(2).
1571   *
1572   * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1573   * is probably wrong.  Should make it like BSD or SYSV.
1574   */
1575  
1576  static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1577  {
1578  	int ret;
1579  
1580  	if (pid > 0)
1581  		return kill_proc_info(sig, info, pid);
1582  
1583  	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1584  	if (pid == INT_MIN)
1585  		return -ESRCH;
1586  
1587  	read_lock(&tasklist_lock);
1588  	if (pid != -1) {
1589  		ret = __kill_pgrp_info(sig, info,
1590  				pid ? find_vpid(-pid) : task_pgrp(current));
1591  	} else {
1592  		int retval = 0, count = 0;
1593  		struct task_struct * p;
1594  
1595  		for_each_process(p) {
1596  			if (task_pid_vnr(p) > 1 &&
1597  					!same_thread_group(p, current)) {
1598  				int err = group_send_sig_info(sig, info, p,
1599  							      PIDTYPE_MAX);
1600  				++count;
1601  				if (err != -EPERM)
1602  					retval = err;
1603  			}
1604  		}
1605  		ret = count ? retval : -ESRCH;
1606  	}
1607  	read_unlock(&tasklist_lock);
1608  
1609  	return ret;
1610  }
1611  
1612  /*
1613   * These are for backward compatibility with the rest of the kernel source.
1614   */
1615  
1616  int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1617  {
1618  	/*
1619  	 * Make sure legacy kernel users don't send in bad values
1620  	 * (normal paths check this in check_kill_permission).
1621  	 */
1622  	if (!valid_signal(sig))
1623  		return -EINVAL;
1624  
1625  	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1626  }
1627  EXPORT_SYMBOL(send_sig_info);
1628  
1629  #define __si_special(priv) \
1630  	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1631  
1632  int
1633  send_sig(int sig, struct task_struct *p, int priv)
1634  {
1635  	return send_sig_info(sig, __si_special(priv), p);
1636  }
1637  EXPORT_SYMBOL(send_sig);
1638  
1639  void force_sig(int sig)
1640  {
1641  	struct kernel_siginfo info;
1642  
1643  	clear_siginfo(&info);
1644  	info.si_signo = sig;
1645  	info.si_errno = 0;
1646  	info.si_code = SI_KERNEL;
1647  	info.si_pid = 0;
1648  	info.si_uid = 0;
1649  	force_sig_info(&info);
1650  }
1651  EXPORT_SYMBOL(force_sig);
1652  
1653  void force_fatal_sig(int sig)
1654  {
1655  	struct kernel_siginfo info;
1656  
1657  	clear_siginfo(&info);
1658  	info.si_signo = sig;
1659  	info.si_errno = 0;
1660  	info.si_code = SI_KERNEL;
1661  	info.si_pid = 0;
1662  	info.si_uid = 0;
1663  	force_sig_info_to_task(&info, current, true);
1664  }
1665  
1666  /*
1667   * When things go south during signal handling, we
1668   * will force a SIGSEGV. And if the signal that caused
1669   * the problem was already a SIGSEGV, we'll want to
1670   * make sure we don't even try to deliver the signal..
1671   */
1672  void force_sigsegv(int sig)
1673  {
1674  	if (sig == SIGSEGV)
1675  		force_fatal_sig(SIGSEGV);
1676  	else
1677  		force_sig(SIGSEGV);
1678  }
1679  
1680  int force_sig_fault_to_task(int sig, int code, void __user *addr
1681  	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1682  	, struct task_struct *t)
1683  {
1684  	struct kernel_siginfo info;
1685  
1686  	clear_siginfo(&info);
1687  	info.si_signo = sig;
1688  	info.si_errno = 0;
1689  	info.si_code  = code;
1690  	info.si_addr  = addr;
1691  #ifdef __ia64__
1692  	info.si_imm = imm;
1693  	info.si_flags = flags;
1694  	info.si_isr = isr;
1695  #endif
1696  	return force_sig_info_to_task(&info, t, false);
1697  }
1698  
1699  int force_sig_fault(int sig, int code, void __user *addr
1700  	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1701  {
1702  	return force_sig_fault_to_task(sig, code, addr
1703  				       ___ARCH_SI_IA64(imm, flags, isr), current);
1704  }
1705  
1706  int send_sig_fault(int sig, int code, void __user *addr
1707  	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1708  	, struct task_struct *t)
1709  {
1710  	struct kernel_siginfo info;
1711  
1712  	clear_siginfo(&info);
1713  	info.si_signo = sig;
1714  	info.si_errno = 0;
1715  	info.si_code  = code;
1716  	info.si_addr  = addr;
1717  #ifdef __ia64__
1718  	info.si_imm = imm;
1719  	info.si_flags = flags;
1720  	info.si_isr = isr;
1721  #endif
1722  	return send_sig_info(info.si_signo, &info, t);
1723  }
1724  
1725  int force_sig_mceerr(int code, void __user *addr, short lsb)
1726  {
1727  	struct kernel_siginfo info;
1728  
1729  	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1730  	clear_siginfo(&info);
1731  	info.si_signo = SIGBUS;
1732  	info.si_errno = 0;
1733  	info.si_code = code;
1734  	info.si_addr = addr;
1735  	info.si_addr_lsb = lsb;
1736  	return force_sig_info(&info);
1737  }
1738  
1739  int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1740  {
1741  	struct kernel_siginfo info;
1742  
1743  	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1744  	clear_siginfo(&info);
1745  	info.si_signo = SIGBUS;
1746  	info.si_errno = 0;
1747  	info.si_code = code;
1748  	info.si_addr = addr;
1749  	info.si_addr_lsb = lsb;
1750  	return send_sig_info(info.si_signo, &info, t);
1751  }
1752  EXPORT_SYMBOL(send_sig_mceerr);
1753  
1754  int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1755  {
1756  	struct kernel_siginfo info;
1757  
1758  	clear_siginfo(&info);
1759  	info.si_signo = SIGSEGV;
1760  	info.si_errno = 0;
1761  	info.si_code  = SEGV_BNDERR;
1762  	info.si_addr  = addr;
1763  	info.si_lower = lower;
1764  	info.si_upper = upper;
1765  	return force_sig_info(&info);
1766  }
1767  
1768  #ifdef SEGV_PKUERR
1769  int force_sig_pkuerr(void __user *addr, u32 pkey)
1770  {
1771  	struct kernel_siginfo info;
1772  
1773  	clear_siginfo(&info);
1774  	info.si_signo = SIGSEGV;
1775  	info.si_errno = 0;
1776  	info.si_code  = SEGV_PKUERR;
1777  	info.si_addr  = addr;
1778  	info.si_pkey  = pkey;
1779  	return force_sig_info(&info);
1780  }
1781  #endif
1782  
1783  int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1784  {
1785  	struct kernel_siginfo info;
1786  
1787  	clear_siginfo(&info);
1788  	info.si_signo     = SIGTRAP;
1789  	info.si_errno     = 0;
1790  	info.si_code      = TRAP_PERF;
1791  	info.si_addr      = addr;
1792  	info.si_perf_data = sig_data;
1793  	info.si_perf_type = type;
1794  
1795  	return force_sig_info(&info);
1796  }
1797  
1798  /**
1799   * force_sig_seccomp - signals the task to allow in-process syscall emulation
1800   * @syscall: syscall number to send to userland
1801   * @reason: filter-supplied reason code to send to userland (via si_errno)
1802   *
1803   * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1804   */
1805  int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1806  {
1807  	struct kernel_siginfo info;
1808  
1809  	clear_siginfo(&info);
1810  	info.si_signo = SIGSYS;
1811  	info.si_code = SYS_SECCOMP;
1812  	info.si_call_addr = (void __user *)KSTK_EIP(current);
1813  	info.si_errno = reason;
1814  	info.si_arch = syscall_get_arch(current);
1815  	info.si_syscall = syscall;
1816  	return force_sig_info_to_task(&info, current, force_coredump);
1817  }
1818  
1819  /* For the crazy architectures that include trap information in
1820   * the errno field, instead of an actual errno value.
1821   */
1822  int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1823  {
1824  	struct kernel_siginfo info;
1825  
1826  	clear_siginfo(&info);
1827  	info.si_signo = SIGTRAP;
1828  	info.si_errno = errno;
1829  	info.si_code  = TRAP_HWBKPT;
1830  	info.si_addr  = addr;
1831  	return force_sig_info(&info);
1832  }
1833  
1834  /* For the rare architectures that include trap information using
1835   * si_trapno.
1836   */
1837  int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1838  {
1839  	struct kernel_siginfo info;
1840  
1841  	clear_siginfo(&info);
1842  	info.si_signo = sig;
1843  	info.si_errno = 0;
1844  	info.si_code  = code;
1845  	info.si_addr  = addr;
1846  	info.si_trapno = trapno;
1847  	return force_sig_info(&info);
1848  }
1849  
1850  /* For the rare architectures that include trap information using
1851   * si_trapno.
1852   */
1853  int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1854  			  struct task_struct *t)
1855  {
1856  	struct kernel_siginfo info;
1857  
1858  	clear_siginfo(&info);
1859  	info.si_signo = sig;
1860  	info.si_errno = 0;
1861  	info.si_code  = code;
1862  	info.si_addr  = addr;
1863  	info.si_trapno = trapno;
1864  	return send_sig_info(info.si_signo, &info, t);
1865  }
1866  
1867  int kill_pgrp(struct pid *pid, int sig, int priv)
1868  {
1869  	int ret;
1870  
1871  	read_lock(&tasklist_lock);
1872  	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1873  	read_unlock(&tasklist_lock);
1874  
1875  	return ret;
1876  }
1877  EXPORT_SYMBOL(kill_pgrp);
1878  
1879  int kill_pid(struct pid *pid, int sig, int priv)
1880  {
1881  	return kill_pid_info(sig, __si_special(priv), pid);
1882  }
1883  EXPORT_SYMBOL(kill_pid);
1884  
1885  /*
1886   * These functions support sending signals using preallocated sigqueue
1887   * structures.  This is needed "because realtime applications cannot
1888   * afford to lose notifications of asynchronous events, like timer
1889   * expirations or I/O completions".  In the case of POSIX Timers
1890   * we allocate the sigqueue structure from the timer_create.  If this
1891   * allocation fails we are able to report the failure to the application
1892   * with an EAGAIN error.
1893   */
1894  struct sigqueue *sigqueue_alloc(void)
1895  {
1896  	return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1897  }
1898  
1899  void sigqueue_free(struct sigqueue *q)
1900  {
1901  	unsigned long flags;
1902  	spinlock_t *lock = &current->sighand->siglock;
1903  
1904  	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1905  	/*
1906  	 * We must hold ->siglock while testing q->list
1907  	 * to serialize with collect_signal() or with
1908  	 * __exit_signal()->flush_sigqueue().
1909  	 */
1910  	spin_lock_irqsave(lock, flags);
1911  	q->flags &= ~SIGQUEUE_PREALLOC;
1912  	/*
1913  	 * If it is queued it will be freed when dequeued,
1914  	 * like the "regular" sigqueue.
1915  	 */
1916  	if (!list_empty(&q->list))
1917  		q = NULL;
1918  	spin_unlock_irqrestore(lock, flags);
1919  
1920  	if (q)
1921  		__sigqueue_free(q);
1922  }
1923  
1924  int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1925  {
1926  	int sig = q->info.si_signo;
1927  	struct sigpending *pending;
1928  	struct task_struct *t;
1929  	unsigned long flags;
1930  	int ret, result;
1931  
1932  	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1933  
1934  	ret = -1;
1935  	rcu_read_lock();
1936  	t = pid_task(pid, type);
1937  	if (!t || !likely(lock_task_sighand(t, &flags)))
1938  		goto ret;
1939  
1940  	ret = 1; /* the signal is ignored */
1941  	result = TRACE_SIGNAL_IGNORED;
1942  	if (!prepare_signal(sig, t, false))
1943  		goto out;
1944  
1945  	ret = 0;
1946  	if (unlikely(!list_empty(&q->list))) {
1947  		/*
1948  		 * If an SI_TIMER entry is already queue just increment
1949  		 * the overrun count.
1950  		 */
1951  		BUG_ON(q->info.si_code != SI_TIMER);
1952  		q->info.si_overrun++;
1953  		result = TRACE_SIGNAL_ALREADY_PENDING;
1954  		goto out;
1955  	}
1956  	q->info.si_overrun = 0;
1957  
1958  	signalfd_notify(t, sig);
1959  	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1960  	list_add_tail(&q->list, &pending->list);
1961  	sigaddset(&pending->signal, sig);
1962  	complete_signal(sig, t, type);
1963  	result = TRACE_SIGNAL_DELIVERED;
1964  out:
1965  	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1966  	unlock_task_sighand(t, &flags);
1967  ret:
1968  	rcu_read_unlock();
1969  	return ret;
1970  }
1971  
1972  static void do_notify_pidfd(struct task_struct *task)
1973  {
1974  	struct pid *pid;
1975  
1976  	WARN_ON(task->exit_state == 0);
1977  	pid = task_pid(task);
1978  	wake_up_all(&pid->wait_pidfd);
1979  }
1980  
1981  /*
1982   * Let a parent know about the death of a child.
1983   * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1984   *
1985   * Returns true if our parent ignored us and so we've switched to
1986   * self-reaping.
1987   */
1988  bool do_notify_parent(struct task_struct *tsk, int sig)
1989  {
1990  	struct kernel_siginfo info;
1991  	unsigned long flags;
1992  	struct sighand_struct *psig;
1993  	bool autoreap = false;
1994  	u64 utime, stime;
1995  
1996  	BUG_ON(sig == -1);
1997  
1998   	/* do_notify_parent_cldstop should have been called instead.  */
1999   	BUG_ON(task_is_stopped_or_traced(tsk));
2000  
2001  	BUG_ON(!tsk->ptrace &&
2002  	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2003  
2004  	/* Wake up all pidfd waiters */
2005  	do_notify_pidfd(tsk);
2006  
2007  	if (sig != SIGCHLD) {
2008  		/*
2009  		 * This is only possible if parent == real_parent.
2010  		 * Check if it has changed security domain.
2011  		 */
2012  		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2013  			sig = SIGCHLD;
2014  	}
2015  
2016  	clear_siginfo(&info);
2017  	info.si_signo = sig;
2018  	info.si_errno = 0;
2019  	/*
2020  	 * We are under tasklist_lock here so our parent is tied to
2021  	 * us and cannot change.
2022  	 *
2023  	 * task_active_pid_ns will always return the same pid namespace
2024  	 * until a task passes through release_task.
2025  	 *
2026  	 * write_lock() currently calls preempt_disable() which is the
2027  	 * same as rcu_read_lock(), but according to Oleg, this is not
2028  	 * correct to rely on this
2029  	 */
2030  	rcu_read_lock();
2031  	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2032  	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2033  				       task_uid(tsk));
2034  	rcu_read_unlock();
2035  
2036  	task_cputime(tsk, &utime, &stime);
2037  	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2038  	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2039  
2040  	info.si_status = tsk->exit_code & 0x7f;
2041  	if (tsk->exit_code & 0x80)
2042  		info.si_code = CLD_DUMPED;
2043  	else if (tsk->exit_code & 0x7f)
2044  		info.si_code = CLD_KILLED;
2045  	else {
2046  		info.si_code = CLD_EXITED;
2047  		info.si_status = tsk->exit_code >> 8;
2048  	}
2049  
2050  	psig = tsk->parent->sighand;
2051  	spin_lock_irqsave(&psig->siglock, flags);
2052  	if (!tsk->ptrace && sig == SIGCHLD &&
2053  	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2054  	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2055  		/*
2056  		 * We are exiting and our parent doesn't care.  POSIX.1
2057  		 * defines special semantics for setting SIGCHLD to SIG_IGN
2058  		 * or setting the SA_NOCLDWAIT flag: we should be reaped
2059  		 * automatically and not left for our parent's wait4 call.
2060  		 * Rather than having the parent do it as a magic kind of
2061  		 * signal handler, we just set this to tell do_exit that we
2062  		 * can be cleaned up without becoming a zombie.  Note that
2063  		 * we still call __wake_up_parent in this case, because a
2064  		 * blocked sys_wait4 might now return -ECHILD.
2065  		 *
2066  		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2067  		 * is implementation-defined: we do (if you don't want
2068  		 * it, just use SIG_IGN instead).
2069  		 */
2070  		autoreap = true;
2071  		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2072  			sig = 0;
2073  	}
2074  	/*
2075  	 * Send with __send_signal as si_pid and si_uid are in the
2076  	 * parent's namespaces.
2077  	 */
2078  	if (valid_signal(sig) && sig)
2079  		__send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2080  	__wake_up_parent(tsk, tsk->parent);
2081  	spin_unlock_irqrestore(&psig->siglock, flags);
2082  
2083  	return autoreap;
2084  }
2085  
2086  /**
2087   * do_notify_parent_cldstop - notify parent of stopped/continued state change
2088   * @tsk: task reporting the state change
2089   * @for_ptracer: the notification is for ptracer
2090   * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2091   *
2092   * Notify @tsk's parent that the stopped/continued state has changed.  If
2093   * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2094   * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2095   *
2096   * CONTEXT:
2097   * Must be called with tasklist_lock at least read locked.
2098   */
2099  static void do_notify_parent_cldstop(struct task_struct *tsk,
2100  				     bool for_ptracer, int why)
2101  {
2102  	struct kernel_siginfo info;
2103  	unsigned long flags;
2104  	struct task_struct *parent;
2105  	struct sighand_struct *sighand;
2106  	u64 utime, stime;
2107  
2108  	if (for_ptracer) {
2109  		parent = tsk->parent;
2110  	} else {
2111  		tsk = tsk->group_leader;
2112  		parent = tsk->real_parent;
2113  	}
2114  
2115  	clear_siginfo(&info);
2116  	info.si_signo = SIGCHLD;
2117  	info.si_errno = 0;
2118  	/*
2119  	 * see comment in do_notify_parent() about the following 4 lines
2120  	 */
2121  	rcu_read_lock();
2122  	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2123  	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2124  	rcu_read_unlock();
2125  
2126  	task_cputime(tsk, &utime, &stime);
2127  	info.si_utime = nsec_to_clock_t(utime);
2128  	info.si_stime = nsec_to_clock_t(stime);
2129  
2130   	info.si_code = why;
2131   	switch (why) {
2132   	case CLD_CONTINUED:
2133   		info.si_status = SIGCONT;
2134   		break;
2135   	case CLD_STOPPED:
2136   		info.si_status = tsk->signal->group_exit_code & 0x7f;
2137   		break;
2138   	case CLD_TRAPPED:
2139   		info.si_status = tsk->exit_code & 0x7f;
2140   		break;
2141   	default:
2142   		BUG();
2143   	}
2144  
2145  	sighand = parent->sighand;
2146  	spin_lock_irqsave(&sighand->siglock, flags);
2147  	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2148  	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2149  		__group_send_sig_info(SIGCHLD, &info, parent);
2150  	/*
2151  	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2152  	 */
2153  	__wake_up_parent(tsk, parent);
2154  	spin_unlock_irqrestore(&sighand->siglock, flags);
2155  }
2156  
2157  /*
2158   * This must be called with current->sighand->siglock held.
2159   *
2160   * This should be the path for all ptrace stops.
2161   * We always set current->last_siginfo while stopped here.
2162   * That makes it a way to test a stopped process for
2163   * being ptrace-stopped vs being job-control-stopped.
2164   *
2165   * If we actually decide not to stop at all because the tracer
2166   * is gone, we keep current->exit_code unless clear_code.
2167   */
2168  static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2169  	__releases(&current->sighand->siglock)
2170  	__acquires(&current->sighand->siglock)
2171  {
2172  	bool gstop_done = false;
2173  
2174  	if (arch_ptrace_stop_needed()) {
2175  		/*
2176  		 * The arch code has something special to do before a
2177  		 * ptrace stop.  This is allowed to block, e.g. for faults
2178  		 * on user stack pages.  We can't keep the siglock while
2179  		 * calling arch_ptrace_stop, so we must release it now.
2180  		 * To preserve proper semantics, we must do this before
2181  		 * any signal bookkeeping like checking group_stop_count.
2182  		 */
2183  		spin_unlock_irq(&current->sighand->siglock);
2184  		arch_ptrace_stop();
2185  		spin_lock_irq(&current->sighand->siglock);
2186  	}
2187  
2188  	/*
2189  	 * schedule() will not sleep if there is a pending signal that
2190  	 * can awaken the task.
2191  	 */
2192  	set_special_state(TASK_TRACED);
2193  
2194  	/*
2195  	 * We're committing to trapping.  TRACED should be visible before
2196  	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2197  	 * Also, transition to TRACED and updates to ->jobctl should be
2198  	 * atomic with respect to siglock and should be done after the arch
2199  	 * hook as siglock is released and regrabbed across it.
2200  	 *
2201  	 *     TRACER				    TRACEE
2202  	 *
2203  	 *     ptrace_attach()
2204  	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2205  	 *     do_wait()
2206  	 *       set_current_state()                smp_wmb();
2207  	 *       ptrace_do_wait()
2208  	 *         wait_task_stopped()
2209  	 *           task_stopped_code()
2210  	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2211  	 */
2212  	smp_wmb();
2213  
2214  	current->last_siginfo = info;
2215  	current->exit_code = exit_code;
2216  
2217  	/*
2218  	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2219  	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2220  	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2221  	 * could be clear now.  We act as if SIGCONT is received after
2222  	 * TASK_TRACED is entered - ignore it.
2223  	 */
2224  	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2225  		gstop_done = task_participate_group_stop(current);
2226  
2227  	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2228  	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2229  	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2230  		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2231  
2232  	/* entering a trap, clear TRAPPING */
2233  	task_clear_jobctl_trapping(current);
2234  
2235  	spin_unlock_irq(&current->sighand->siglock);
2236  	read_lock(&tasklist_lock);
2237  	if (likely(current->ptrace)) {
2238  		/*
2239  		 * Notify parents of the stop.
2240  		 *
2241  		 * While ptraced, there are two parents - the ptracer and
2242  		 * the real_parent of the group_leader.  The ptracer should
2243  		 * know about every stop while the real parent is only
2244  		 * interested in the completion of group stop.  The states
2245  		 * for the two don't interact with each other.  Notify
2246  		 * separately unless they're gonna be duplicates.
2247  		 */
2248  		do_notify_parent_cldstop(current, true, why);
2249  		if (gstop_done && ptrace_reparented(current))
2250  			do_notify_parent_cldstop(current, false, why);
2251  
2252  		/*
2253  		 * Don't want to allow preemption here, because
2254  		 * sys_ptrace() needs this task to be inactive.
2255  		 *
2256  		 * XXX: implement read_unlock_no_resched().
2257  		 */
2258  		preempt_disable();
2259  		read_unlock(&tasklist_lock);
2260  		cgroup_enter_frozen();
2261  		preempt_enable_no_resched();
2262  		freezable_schedule();
2263  		cgroup_leave_frozen(true);
2264  	} else {
2265  		/*
2266  		 * By the time we got the lock, our tracer went away.
2267  		 * Don't drop the lock yet, another tracer may come.
2268  		 *
2269  		 * If @gstop_done, the ptracer went away between group stop
2270  		 * completion and here.  During detach, it would have set
2271  		 * JOBCTL_STOP_PENDING on us and we'll re-enter
2272  		 * TASK_STOPPED in do_signal_stop() on return, so notifying
2273  		 * the real parent of the group stop completion is enough.
2274  		 */
2275  		if (gstop_done)
2276  			do_notify_parent_cldstop(current, false, why);
2277  
2278  		/* tasklist protects us from ptrace_freeze_traced() */
2279  		__set_current_state(TASK_RUNNING);
2280  		if (clear_code)
2281  			current->exit_code = 0;
2282  		read_unlock(&tasklist_lock);
2283  	}
2284  
2285  	/*
2286  	 * We are back.  Now reacquire the siglock before touching
2287  	 * last_siginfo, so that we are sure to have synchronized with
2288  	 * any signal-sending on another CPU that wants to examine it.
2289  	 */
2290  	spin_lock_irq(&current->sighand->siglock);
2291  	current->last_siginfo = NULL;
2292  
2293  	/* LISTENING can be set only during STOP traps, clear it */
2294  	current->jobctl &= ~JOBCTL_LISTENING;
2295  
2296  	/*
2297  	 * Queued signals ignored us while we were stopped for tracing.
2298  	 * So check for any that we should take before resuming user mode.
2299  	 * This sets TIF_SIGPENDING, but never clears it.
2300  	 */
2301  	recalc_sigpending_tsk(current);
2302  }
2303  
2304  static void ptrace_do_notify(int signr, int exit_code, int why)
2305  {
2306  	kernel_siginfo_t info;
2307  
2308  	clear_siginfo(&info);
2309  	info.si_signo = signr;
2310  	info.si_code = exit_code;
2311  	info.si_pid = task_pid_vnr(current);
2312  	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2313  
2314  	/* Let the debugger run.  */
2315  	ptrace_stop(exit_code, why, 1, &info);
2316  }
2317  
2318  void ptrace_notify(int exit_code)
2319  {
2320  	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2321  	if (unlikely(current->task_works))
2322  		task_work_run();
2323  
2324  	spin_lock_irq(&current->sighand->siglock);
2325  	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2326  	spin_unlock_irq(&current->sighand->siglock);
2327  }
2328  
2329  /**
2330   * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2331   * @signr: signr causing group stop if initiating
2332   *
2333   * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2334   * and participate in it.  If already set, participate in the existing
2335   * group stop.  If participated in a group stop (and thus slept), %true is
2336   * returned with siglock released.
2337   *
2338   * If ptraced, this function doesn't handle stop itself.  Instead,
2339   * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2340   * untouched.  The caller must ensure that INTERRUPT trap handling takes
2341   * places afterwards.
2342   *
2343   * CONTEXT:
2344   * Must be called with @current->sighand->siglock held, which is released
2345   * on %true return.
2346   *
2347   * RETURNS:
2348   * %false if group stop is already cancelled or ptrace trap is scheduled.
2349   * %true if participated in group stop.
2350   */
2351  static bool do_signal_stop(int signr)
2352  	__releases(&current->sighand->siglock)
2353  {
2354  	struct signal_struct *sig = current->signal;
2355  
2356  	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2357  		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2358  		struct task_struct *t;
2359  
2360  		/* signr will be recorded in task->jobctl for retries */
2361  		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2362  
2363  		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2364  		    unlikely(signal_group_exit(sig)))
2365  			return false;
2366  		/*
2367  		 * There is no group stop already in progress.  We must
2368  		 * initiate one now.
2369  		 *
2370  		 * While ptraced, a task may be resumed while group stop is
2371  		 * still in effect and then receive a stop signal and
2372  		 * initiate another group stop.  This deviates from the
2373  		 * usual behavior as two consecutive stop signals can't
2374  		 * cause two group stops when !ptraced.  That is why we
2375  		 * also check !task_is_stopped(t) below.
2376  		 *
2377  		 * The condition can be distinguished by testing whether
2378  		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2379  		 * group_exit_code in such case.
2380  		 *
2381  		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2382  		 * an intervening stop signal is required to cause two
2383  		 * continued events regardless of ptrace.
2384  		 */
2385  		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2386  			sig->group_exit_code = signr;
2387  
2388  		sig->group_stop_count = 0;
2389  
2390  		if (task_set_jobctl_pending(current, signr | gstop))
2391  			sig->group_stop_count++;
2392  
2393  		t = current;
2394  		while_each_thread(current, t) {
2395  			/*
2396  			 * Setting state to TASK_STOPPED for a group
2397  			 * stop is always done with the siglock held,
2398  			 * so this check has no races.
2399  			 */
2400  			if (!task_is_stopped(t) &&
2401  			    task_set_jobctl_pending(t, signr | gstop)) {
2402  				sig->group_stop_count++;
2403  				if (likely(!(t->ptrace & PT_SEIZED)))
2404  					signal_wake_up(t, 0);
2405  				else
2406  					ptrace_trap_notify(t);
2407  			}
2408  		}
2409  	}
2410  
2411  	if (likely(!current->ptrace)) {
2412  		int notify = 0;
2413  
2414  		/*
2415  		 * If there are no other threads in the group, or if there
2416  		 * is a group stop in progress and we are the last to stop,
2417  		 * report to the parent.
2418  		 */
2419  		if (task_participate_group_stop(current))
2420  			notify = CLD_STOPPED;
2421  
2422  		set_special_state(TASK_STOPPED);
2423  		spin_unlock_irq(&current->sighand->siglock);
2424  
2425  		/*
2426  		 * Notify the parent of the group stop completion.  Because
2427  		 * we're not holding either the siglock or tasklist_lock
2428  		 * here, ptracer may attach inbetween; however, this is for
2429  		 * group stop and should always be delivered to the real
2430  		 * parent of the group leader.  The new ptracer will get
2431  		 * its notification when this task transitions into
2432  		 * TASK_TRACED.
2433  		 */
2434  		if (notify) {
2435  			read_lock(&tasklist_lock);
2436  			do_notify_parent_cldstop(current, false, notify);
2437  			read_unlock(&tasklist_lock);
2438  		}
2439  
2440  		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2441  		cgroup_enter_frozen();
2442  		freezable_schedule();
2443  		return true;
2444  	} else {
2445  		/*
2446  		 * While ptraced, group stop is handled by STOP trap.
2447  		 * Schedule it and let the caller deal with it.
2448  		 */
2449  		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2450  		return false;
2451  	}
2452  }
2453  
2454  /**
2455   * do_jobctl_trap - take care of ptrace jobctl traps
2456   *
2457   * When PT_SEIZED, it's used for both group stop and explicit
2458   * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2459   * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2460   * the stop signal; otherwise, %SIGTRAP.
2461   *
2462   * When !PT_SEIZED, it's used only for group stop trap with stop signal
2463   * number as exit_code and no siginfo.
2464   *
2465   * CONTEXT:
2466   * Must be called with @current->sighand->siglock held, which may be
2467   * released and re-acquired before returning with intervening sleep.
2468   */
2469  static void do_jobctl_trap(void)
2470  {
2471  	struct signal_struct *signal = current->signal;
2472  	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2473  
2474  	if (current->ptrace & PT_SEIZED) {
2475  		if (!signal->group_stop_count &&
2476  		    !(signal->flags & SIGNAL_STOP_STOPPED))
2477  			signr = SIGTRAP;
2478  		WARN_ON_ONCE(!signr);
2479  		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2480  				 CLD_STOPPED);
2481  	} else {
2482  		WARN_ON_ONCE(!signr);
2483  		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2484  		current->exit_code = 0;
2485  	}
2486  }
2487  
2488  /**
2489   * do_freezer_trap - handle the freezer jobctl trap
2490   *
2491   * Puts the task into frozen state, if only the task is not about to quit.
2492   * In this case it drops JOBCTL_TRAP_FREEZE.
2493   *
2494   * CONTEXT:
2495   * Must be called with @current->sighand->siglock held,
2496   * which is always released before returning.
2497   */
2498  static void do_freezer_trap(void)
2499  	__releases(&current->sighand->siglock)
2500  {
2501  	/*
2502  	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2503  	 * let's make another loop to give it a chance to be handled.
2504  	 * In any case, we'll return back.
2505  	 */
2506  	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2507  	     JOBCTL_TRAP_FREEZE) {
2508  		spin_unlock_irq(&current->sighand->siglock);
2509  		return;
2510  	}
2511  
2512  	/*
2513  	 * Now we're sure that there is no pending fatal signal and no
2514  	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2515  	 * immediately (if there is a non-fatal signal pending), and
2516  	 * put the task into sleep.
2517  	 */
2518  	__set_current_state(TASK_INTERRUPTIBLE);
2519  	clear_thread_flag(TIF_SIGPENDING);
2520  	spin_unlock_irq(&current->sighand->siglock);
2521  	cgroup_enter_frozen();
2522  	freezable_schedule();
2523  }
2524  
2525  static int ptrace_signal(int signr, kernel_siginfo_t *info)
2526  {
2527  	/*
2528  	 * We do not check sig_kernel_stop(signr) but set this marker
2529  	 * unconditionally because we do not know whether debugger will
2530  	 * change signr. This flag has no meaning unless we are going
2531  	 * to stop after return from ptrace_stop(). In this case it will
2532  	 * be checked in do_signal_stop(), we should only stop if it was
2533  	 * not cleared by SIGCONT while we were sleeping. See also the
2534  	 * comment in dequeue_signal().
2535  	 */
2536  	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2537  	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2538  
2539  	/* We're back.  Did the debugger cancel the sig?  */
2540  	signr = current->exit_code;
2541  	if (signr == 0)
2542  		return signr;
2543  
2544  	current->exit_code = 0;
2545  
2546  	/*
2547  	 * Update the siginfo structure if the signal has
2548  	 * changed.  If the debugger wanted something
2549  	 * specific in the siginfo structure then it should
2550  	 * have updated *info via PTRACE_SETSIGINFO.
2551  	 */
2552  	if (signr != info->si_signo) {
2553  		clear_siginfo(info);
2554  		info->si_signo = signr;
2555  		info->si_errno = 0;
2556  		info->si_code = SI_USER;
2557  		rcu_read_lock();
2558  		info->si_pid = task_pid_vnr(current->parent);
2559  		info->si_uid = from_kuid_munged(current_user_ns(),
2560  						task_uid(current->parent));
2561  		rcu_read_unlock();
2562  	}
2563  
2564  	/* If the (new) signal is now blocked, requeue it.  */
2565  	if (sigismember(&current->blocked, signr)) {
2566  		send_signal(signr, info, current, PIDTYPE_PID);
2567  		signr = 0;
2568  	}
2569  
2570  	return signr;
2571  }
2572  
2573  static void hide_si_addr_tag_bits(struct ksignal *ksig)
2574  {
2575  	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2576  	case SIL_FAULT:
2577  	case SIL_FAULT_TRAPNO:
2578  	case SIL_FAULT_MCEERR:
2579  	case SIL_FAULT_BNDERR:
2580  	case SIL_FAULT_PKUERR:
2581  	case SIL_FAULT_PERF_EVENT:
2582  		ksig->info.si_addr = arch_untagged_si_addr(
2583  			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2584  		break;
2585  	case SIL_KILL:
2586  	case SIL_TIMER:
2587  	case SIL_POLL:
2588  	case SIL_CHLD:
2589  	case SIL_RT:
2590  	case SIL_SYS:
2591  		break;
2592  	}
2593  }
2594  
2595  bool get_signal(struct ksignal *ksig)
2596  {
2597  	struct sighand_struct *sighand = current->sighand;
2598  	struct signal_struct *signal = current->signal;
2599  	int signr;
2600  
2601  	if (unlikely(current->task_works))
2602  		task_work_run();
2603  
2604  	/*
2605  	 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2606  	 * that the arch handlers don't all have to do it. If we get here
2607  	 * without TIF_SIGPENDING, just exit after running signal work.
2608  	 */
2609  	if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2610  		if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2611  			tracehook_notify_signal();
2612  		if (!task_sigpending(current))
2613  			return false;
2614  	}
2615  
2616  	if (unlikely(uprobe_deny_signal()))
2617  		return false;
2618  
2619  	/*
2620  	 * Do this once, we can't return to user-mode if freezing() == T.
2621  	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2622  	 * thus do not need another check after return.
2623  	 */
2624  	try_to_freeze();
2625  
2626  relock:
2627  	spin_lock_irq(&sighand->siglock);
2628  
2629  	/*
2630  	 * Every stopped thread goes here after wakeup. Check to see if
2631  	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2632  	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2633  	 */
2634  	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2635  		int why;
2636  
2637  		if (signal->flags & SIGNAL_CLD_CONTINUED)
2638  			why = CLD_CONTINUED;
2639  		else
2640  			why = CLD_STOPPED;
2641  
2642  		signal->flags &= ~SIGNAL_CLD_MASK;
2643  
2644  		spin_unlock_irq(&sighand->siglock);
2645  
2646  		/*
2647  		 * Notify the parent that we're continuing.  This event is
2648  		 * always per-process and doesn't make whole lot of sense
2649  		 * for ptracers, who shouldn't consume the state via
2650  		 * wait(2) either, but, for backward compatibility, notify
2651  		 * the ptracer of the group leader too unless it's gonna be
2652  		 * a duplicate.
2653  		 */
2654  		read_lock(&tasklist_lock);
2655  		do_notify_parent_cldstop(current, false, why);
2656  
2657  		if (ptrace_reparented(current->group_leader))
2658  			do_notify_parent_cldstop(current->group_leader,
2659  						true, why);
2660  		read_unlock(&tasklist_lock);
2661  
2662  		goto relock;
2663  	}
2664  
2665  	/* Has this task already been marked for death? */
2666  	if (signal_group_exit(signal)) {
2667  		ksig->info.si_signo = signr = SIGKILL;
2668  		sigdelset(&current->pending.signal, SIGKILL);
2669  		trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2670  				&sighand->action[SIGKILL - 1]);
2671  		recalc_sigpending();
2672  		goto fatal;
2673  	}
2674  
2675  	for (;;) {
2676  		struct k_sigaction *ka;
2677  
2678  		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2679  		    do_signal_stop(0))
2680  			goto relock;
2681  
2682  		if (unlikely(current->jobctl &
2683  			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2684  			if (current->jobctl & JOBCTL_TRAP_MASK) {
2685  				do_jobctl_trap();
2686  				spin_unlock_irq(&sighand->siglock);
2687  			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2688  				do_freezer_trap();
2689  
2690  			goto relock;
2691  		}
2692  
2693  		/*
2694  		 * If the task is leaving the frozen state, let's update
2695  		 * cgroup counters and reset the frozen bit.
2696  		 */
2697  		if (unlikely(cgroup_task_frozen(current))) {
2698  			spin_unlock_irq(&sighand->siglock);
2699  			cgroup_leave_frozen(false);
2700  			goto relock;
2701  		}
2702  
2703  		/*
2704  		 * Signals generated by the execution of an instruction
2705  		 * need to be delivered before any other pending signals
2706  		 * so that the instruction pointer in the signal stack
2707  		 * frame points to the faulting instruction.
2708  		 */
2709  		signr = dequeue_synchronous_signal(&ksig->info);
2710  		if (!signr)
2711  			signr = dequeue_signal(current, &current->blocked, &ksig->info);
2712  
2713  		if (!signr)
2714  			break; /* will return 0 */
2715  
2716  		if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2717  		    !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2718  			signr = ptrace_signal(signr, &ksig->info);
2719  			if (!signr)
2720  				continue;
2721  		}
2722  
2723  		ka = &sighand->action[signr-1];
2724  
2725  		/* Trace actually delivered signals. */
2726  		trace_signal_deliver(signr, &ksig->info, ka);
2727  
2728  		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2729  			continue;
2730  		if (ka->sa.sa_handler != SIG_DFL) {
2731  			/* Run the handler.  */
2732  			ksig->ka = *ka;
2733  
2734  			if (ka->sa.sa_flags & SA_ONESHOT)
2735  				ka->sa.sa_handler = SIG_DFL;
2736  
2737  			break; /* will return non-zero "signr" value */
2738  		}
2739  
2740  		/*
2741  		 * Now we are doing the default action for this signal.
2742  		 */
2743  		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2744  			continue;
2745  
2746  		/*
2747  		 * Global init gets no signals it doesn't want.
2748  		 * Container-init gets no signals it doesn't want from same
2749  		 * container.
2750  		 *
2751  		 * Note that if global/container-init sees a sig_kernel_only()
2752  		 * signal here, the signal must have been generated internally
2753  		 * or must have come from an ancestor namespace. In either
2754  		 * case, the signal cannot be dropped.
2755  		 */
2756  		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2757  				!sig_kernel_only(signr))
2758  			continue;
2759  
2760  		if (sig_kernel_stop(signr)) {
2761  			/*
2762  			 * The default action is to stop all threads in
2763  			 * the thread group.  The job control signals
2764  			 * do nothing in an orphaned pgrp, but SIGSTOP
2765  			 * always works.  Note that siglock needs to be
2766  			 * dropped during the call to is_orphaned_pgrp()
2767  			 * because of lock ordering with tasklist_lock.
2768  			 * This allows an intervening SIGCONT to be posted.
2769  			 * We need to check for that and bail out if necessary.
2770  			 */
2771  			if (signr != SIGSTOP) {
2772  				spin_unlock_irq(&sighand->siglock);
2773  
2774  				/* signals can be posted during this window */
2775  
2776  				if (is_current_pgrp_orphaned())
2777  					goto relock;
2778  
2779  				spin_lock_irq(&sighand->siglock);
2780  			}
2781  
2782  			if (likely(do_signal_stop(ksig->info.si_signo))) {
2783  				/* It released the siglock.  */
2784  				goto relock;
2785  			}
2786  
2787  			/*
2788  			 * We didn't actually stop, due to a race
2789  			 * with SIGCONT or something like that.
2790  			 */
2791  			continue;
2792  		}
2793  
2794  	fatal:
2795  		spin_unlock_irq(&sighand->siglock);
2796  		if (unlikely(cgroup_task_frozen(current)))
2797  			cgroup_leave_frozen(true);
2798  
2799  		/*
2800  		 * Anything else is fatal, maybe with a core dump.
2801  		 */
2802  		current->flags |= PF_SIGNALED;
2803  
2804  		if (sig_kernel_coredump(signr)) {
2805  			if (print_fatal_signals)
2806  				print_fatal_signal(ksig->info.si_signo);
2807  			proc_coredump_connector(current);
2808  			/*
2809  			 * If it was able to dump core, this kills all
2810  			 * other threads in the group and synchronizes with
2811  			 * their demise.  If we lost the race with another
2812  			 * thread getting here, it set group_exit_code
2813  			 * first and our do_group_exit call below will use
2814  			 * that value and ignore the one we pass it.
2815  			 */
2816  			do_coredump(&ksig->info);
2817  		}
2818  
2819  		/*
2820  		 * PF_IO_WORKER threads will catch and exit on fatal signals
2821  		 * themselves. They have cleanup that must be performed, so
2822  		 * we cannot call do_exit() on their behalf.
2823  		 */
2824  		if (current->flags & PF_IO_WORKER)
2825  			goto out;
2826  
2827  		/*
2828  		 * Death signals, no core dump.
2829  		 */
2830  		do_group_exit(ksig->info.si_signo);
2831  		/* NOTREACHED */
2832  	}
2833  	spin_unlock_irq(&sighand->siglock);
2834  out:
2835  	ksig->sig = signr;
2836  
2837  	if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2838  		hide_si_addr_tag_bits(ksig);
2839  
2840  	return ksig->sig > 0;
2841  }
2842  
2843  /**
2844   * signal_delivered -
2845   * @ksig:		kernel signal struct
2846   * @stepping:		nonzero if debugger single-step or block-step in use
2847   *
2848   * This function should be called when a signal has successfully been
2849   * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2850   * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2851   * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2852   */
2853  static void signal_delivered(struct ksignal *ksig, int stepping)
2854  {
2855  	sigset_t blocked;
2856  
2857  	/* A signal was successfully delivered, and the
2858  	   saved sigmask was stored on the signal frame,
2859  	   and will be restored by sigreturn.  So we can
2860  	   simply clear the restore sigmask flag.  */
2861  	clear_restore_sigmask();
2862  
2863  	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2864  	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2865  		sigaddset(&blocked, ksig->sig);
2866  	set_current_blocked(&blocked);
2867  	if (current->sas_ss_flags & SS_AUTODISARM)
2868  		sas_ss_reset(current);
2869  	tracehook_signal_handler(stepping);
2870  }
2871  
2872  void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2873  {
2874  	if (failed)
2875  		force_sigsegv(ksig->sig);
2876  	else
2877  		signal_delivered(ksig, stepping);
2878  }
2879  
2880  /*
2881   * It could be that complete_signal() picked us to notify about the
2882   * group-wide signal. Other threads should be notified now to take
2883   * the shared signals in @which since we will not.
2884   */
2885  static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2886  {
2887  	sigset_t retarget;
2888  	struct task_struct *t;
2889  
2890  	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2891  	if (sigisemptyset(&retarget))
2892  		return;
2893  
2894  	t = tsk;
2895  	while_each_thread(tsk, t) {
2896  		if (t->flags & PF_EXITING)
2897  			continue;
2898  
2899  		if (!has_pending_signals(&retarget, &t->blocked))
2900  			continue;
2901  		/* Remove the signals this thread can handle. */
2902  		sigandsets(&retarget, &retarget, &t->blocked);
2903  
2904  		if (!task_sigpending(t))
2905  			signal_wake_up(t, 0);
2906  
2907  		if (sigisemptyset(&retarget))
2908  			break;
2909  	}
2910  }
2911  
2912  void exit_signals(struct task_struct *tsk)
2913  {
2914  	int group_stop = 0;
2915  	sigset_t unblocked;
2916  
2917  	/*
2918  	 * @tsk is about to have PF_EXITING set - lock out users which
2919  	 * expect stable threadgroup.
2920  	 */
2921  	cgroup_threadgroup_change_begin(tsk);
2922  
2923  	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2924  		tsk->flags |= PF_EXITING;
2925  		cgroup_threadgroup_change_end(tsk);
2926  		return;
2927  	}
2928  
2929  	spin_lock_irq(&tsk->sighand->siglock);
2930  	/*
2931  	 * From now this task is not visible for group-wide signals,
2932  	 * see wants_signal(), do_signal_stop().
2933  	 */
2934  	tsk->flags |= PF_EXITING;
2935  
2936  	cgroup_threadgroup_change_end(tsk);
2937  
2938  	if (!task_sigpending(tsk))
2939  		goto out;
2940  
2941  	unblocked = tsk->blocked;
2942  	signotset(&unblocked);
2943  	retarget_shared_pending(tsk, &unblocked);
2944  
2945  	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2946  	    task_participate_group_stop(tsk))
2947  		group_stop = CLD_STOPPED;
2948  out:
2949  	spin_unlock_irq(&tsk->sighand->siglock);
2950  
2951  	/*
2952  	 * If group stop has completed, deliver the notification.  This
2953  	 * should always go to the real parent of the group leader.
2954  	 */
2955  	if (unlikely(group_stop)) {
2956  		read_lock(&tasklist_lock);
2957  		do_notify_parent_cldstop(tsk, false, group_stop);
2958  		read_unlock(&tasklist_lock);
2959  	}
2960  }
2961  
2962  /*
2963   * System call entry points.
2964   */
2965  
2966  /**
2967   *  sys_restart_syscall - restart a system call
2968   */
2969  SYSCALL_DEFINE0(restart_syscall)
2970  {
2971  	struct restart_block *restart = &current->restart_block;
2972  	return restart->fn(restart);
2973  }
2974  
2975  long do_no_restart_syscall(struct restart_block *param)
2976  {
2977  	return -EINTR;
2978  }
2979  
2980  static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2981  {
2982  	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2983  		sigset_t newblocked;
2984  		/* A set of now blocked but previously unblocked signals. */
2985  		sigandnsets(&newblocked, newset, &current->blocked);
2986  		retarget_shared_pending(tsk, &newblocked);
2987  	}
2988  	tsk->blocked = *newset;
2989  	recalc_sigpending();
2990  }
2991  
2992  /**
2993   * set_current_blocked - change current->blocked mask
2994   * @newset: new mask
2995   *
2996   * It is wrong to change ->blocked directly, this helper should be used
2997   * to ensure the process can't miss a shared signal we are going to block.
2998   */
2999  void set_current_blocked(sigset_t *newset)
3000  {
3001  	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3002  	__set_current_blocked(newset);
3003  }
3004  
3005  void __set_current_blocked(const sigset_t *newset)
3006  {
3007  	struct task_struct *tsk = current;
3008  
3009  	/*
3010  	 * In case the signal mask hasn't changed, there is nothing we need
3011  	 * to do. The current->blocked shouldn't be modified by other task.
3012  	 */
3013  	if (sigequalsets(&tsk->blocked, newset))
3014  		return;
3015  
3016  	spin_lock_irq(&tsk->sighand->siglock);
3017  	__set_task_blocked(tsk, newset);
3018  	spin_unlock_irq(&tsk->sighand->siglock);
3019  }
3020  
3021  /*
3022   * This is also useful for kernel threads that want to temporarily
3023   * (or permanently) block certain signals.
3024   *
3025   * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3026   * interface happily blocks "unblockable" signals like SIGKILL
3027   * and friends.
3028   */
3029  int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3030  {
3031  	struct task_struct *tsk = current;
3032  	sigset_t newset;
3033  
3034  	/* Lockless, only current can change ->blocked, never from irq */
3035  	if (oldset)
3036  		*oldset = tsk->blocked;
3037  
3038  	switch (how) {
3039  	case SIG_BLOCK:
3040  		sigorsets(&newset, &tsk->blocked, set);
3041  		break;
3042  	case SIG_UNBLOCK:
3043  		sigandnsets(&newset, &tsk->blocked, set);
3044  		break;
3045  	case SIG_SETMASK:
3046  		newset = *set;
3047  		break;
3048  	default:
3049  		return -EINVAL;
3050  	}
3051  
3052  	__set_current_blocked(&newset);
3053  	return 0;
3054  }
3055  EXPORT_SYMBOL(sigprocmask);
3056  
3057  /*
3058   * The api helps set app-provided sigmasks.
3059   *
3060   * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3061   * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3062   *
3063   * Note that it does set_restore_sigmask() in advance, so it must be always
3064   * paired with restore_saved_sigmask_unless() before return from syscall.
3065   */
3066  int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3067  {
3068  	sigset_t kmask;
3069  
3070  	if (!umask)
3071  		return 0;
3072  	if (sigsetsize != sizeof(sigset_t))
3073  		return -EINVAL;
3074  	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3075  		return -EFAULT;
3076  
3077  	set_restore_sigmask();
3078  	current->saved_sigmask = current->blocked;
3079  	set_current_blocked(&kmask);
3080  
3081  	return 0;
3082  }
3083  
3084  #ifdef CONFIG_COMPAT
3085  int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3086  			    size_t sigsetsize)
3087  {
3088  	sigset_t kmask;
3089  
3090  	if (!umask)
3091  		return 0;
3092  	if (sigsetsize != sizeof(compat_sigset_t))
3093  		return -EINVAL;
3094  	if (get_compat_sigset(&kmask, umask))
3095  		return -EFAULT;
3096  
3097  	set_restore_sigmask();
3098  	current->saved_sigmask = current->blocked;
3099  	set_current_blocked(&kmask);
3100  
3101  	return 0;
3102  }
3103  #endif
3104  
3105  /**
3106   *  sys_rt_sigprocmask - change the list of currently blocked signals
3107   *  @how: whether to add, remove, or set signals
3108   *  @nset: stores pending signals
3109   *  @oset: previous value of signal mask if non-null
3110   *  @sigsetsize: size of sigset_t type
3111   */
3112  SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3113  		sigset_t __user *, oset, size_t, sigsetsize)
3114  {
3115  	sigset_t old_set, new_set;
3116  	int error;
3117  
3118  	/* XXX: Don't preclude handling different sized sigset_t's.  */
3119  	if (sigsetsize != sizeof(sigset_t))
3120  		return -EINVAL;
3121  
3122  	old_set = current->blocked;
3123  
3124  	if (nset) {
3125  		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3126  			return -EFAULT;
3127  		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3128  
3129  		error = sigprocmask(how, &new_set, NULL);
3130  		if (error)
3131  			return error;
3132  	}
3133  
3134  	if (oset) {
3135  		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3136  			return -EFAULT;
3137  	}
3138  
3139  	return 0;
3140  }
3141  
3142  #ifdef CONFIG_COMPAT
3143  COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3144  		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3145  {
3146  	sigset_t old_set = current->blocked;
3147  
3148  	/* XXX: Don't preclude handling different sized sigset_t's.  */
3149  	if (sigsetsize != sizeof(sigset_t))
3150  		return -EINVAL;
3151  
3152  	if (nset) {
3153  		sigset_t new_set;
3154  		int error;
3155  		if (get_compat_sigset(&new_set, nset))
3156  			return -EFAULT;
3157  		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3158  
3159  		error = sigprocmask(how, &new_set, NULL);
3160  		if (error)
3161  			return error;
3162  	}
3163  	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3164  }
3165  #endif
3166  
3167  static void do_sigpending(sigset_t *set)
3168  {
3169  	spin_lock_irq(&current->sighand->siglock);
3170  	sigorsets(set, &current->pending.signal,
3171  		  &current->signal->shared_pending.signal);
3172  	spin_unlock_irq(&current->sighand->siglock);
3173  
3174  	/* Outside the lock because only this thread touches it.  */
3175  	sigandsets(set, &current->blocked, set);
3176  }
3177  
3178  /**
3179   *  sys_rt_sigpending - examine a pending signal that has been raised
3180   *			while blocked
3181   *  @uset: stores pending signals
3182   *  @sigsetsize: size of sigset_t type or larger
3183   */
3184  SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3185  {
3186  	sigset_t set;
3187  
3188  	if (sigsetsize > sizeof(*uset))
3189  		return -EINVAL;
3190  
3191  	do_sigpending(&set);
3192  
3193  	if (copy_to_user(uset, &set, sigsetsize))
3194  		return -EFAULT;
3195  
3196  	return 0;
3197  }
3198  
3199  #ifdef CONFIG_COMPAT
3200  COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3201  		compat_size_t, sigsetsize)
3202  {
3203  	sigset_t set;
3204  
3205  	if (sigsetsize > sizeof(*uset))
3206  		return -EINVAL;
3207  
3208  	do_sigpending(&set);
3209  
3210  	return put_compat_sigset(uset, &set, sigsetsize);
3211  }
3212  #endif
3213  
3214  static const struct {
3215  	unsigned char limit, layout;
3216  } sig_sicodes[] = {
3217  	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3218  	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3219  	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3220  	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3221  	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3222  #if defined(SIGEMT)
3223  	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3224  #endif
3225  	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3226  	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3227  	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3228  };
3229  
3230  static bool known_siginfo_layout(unsigned sig, int si_code)
3231  {
3232  	if (si_code == SI_KERNEL)
3233  		return true;
3234  	else if ((si_code > SI_USER)) {
3235  		if (sig_specific_sicodes(sig)) {
3236  			if (si_code <= sig_sicodes[sig].limit)
3237  				return true;
3238  		}
3239  		else if (si_code <= NSIGPOLL)
3240  			return true;
3241  	}
3242  	else if (si_code >= SI_DETHREAD)
3243  		return true;
3244  	else if (si_code == SI_ASYNCNL)
3245  		return true;
3246  	return false;
3247  }
3248  
3249  enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3250  {
3251  	enum siginfo_layout layout = SIL_KILL;
3252  	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3253  		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3254  		    (si_code <= sig_sicodes[sig].limit)) {
3255  			layout = sig_sicodes[sig].layout;
3256  			/* Handle the exceptions */
3257  			if ((sig == SIGBUS) &&
3258  			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3259  				layout = SIL_FAULT_MCEERR;
3260  			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3261  				layout = SIL_FAULT_BNDERR;
3262  #ifdef SEGV_PKUERR
3263  			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3264  				layout = SIL_FAULT_PKUERR;
3265  #endif
3266  			else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3267  				layout = SIL_FAULT_PERF_EVENT;
3268  			else if (IS_ENABLED(CONFIG_SPARC) &&
3269  				 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3270  				layout = SIL_FAULT_TRAPNO;
3271  			else if (IS_ENABLED(CONFIG_ALPHA) &&
3272  				 ((sig == SIGFPE) ||
3273  				  ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3274  				layout = SIL_FAULT_TRAPNO;
3275  		}
3276  		else if (si_code <= NSIGPOLL)
3277  			layout = SIL_POLL;
3278  	} else {
3279  		if (si_code == SI_TIMER)
3280  			layout = SIL_TIMER;
3281  		else if (si_code == SI_SIGIO)
3282  			layout = SIL_POLL;
3283  		else if (si_code < 0)
3284  			layout = SIL_RT;
3285  	}
3286  	return layout;
3287  }
3288  
3289  static inline char __user *si_expansion(const siginfo_t __user *info)
3290  {
3291  	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3292  }
3293  
3294  int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3295  {
3296  	char __user *expansion = si_expansion(to);
3297  	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3298  		return -EFAULT;
3299  	if (clear_user(expansion, SI_EXPANSION_SIZE))
3300  		return -EFAULT;
3301  	return 0;
3302  }
3303  
3304  static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3305  				       const siginfo_t __user *from)
3306  {
3307  	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3308  		char __user *expansion = si_expansion(from);
3309  		char buf[SI_EXPANSION_SIZE];
3310  		int i;
3311  		/*
3312  		 * An unknown si_code might need more than
3313  		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3314  		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3315  		 * will return this data to userspace exactly.
3316  		 */
3317  		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3318  			return -EFAULT;
3319  		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3320  			if (buf[i] != 0)
3321  				return -E2BIG;
3322  		}
3323  	}
3324  	return 0;
3325  }
3326  
3327  static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3328  				    const siginfo_t __user *from)
3329  {
3330  	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3331  		return -EFAULT;
3332  	to->si_signo = signo;
3333  	return post_copy_siginfo_from_user(to, from);
3334  }
3335  
3336  int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3337  {
3338  	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3339  		return -EFAULT;
3340  	return post_copy_siginfo_from_user(to, from);
3341  }
3342  
3343  #ifdef CONFIG_COMPAT
3344  /**
3345   * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3346   * @to: compat siginfo destination
3347   * @from: kernel siginfo source
3348   *
3349   * Note: This function does not work properly for the SIGCHLD on x32, but
3350   * fortunately it doesn't have to.  The only valid callers for this function are
3351   * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3352   * The latter does not care because SIGCHLD will never cause a coredump.
3353   */
3354  void copy_siginfo_to_external32(struct compat_siginfo *to,
3355  		const struct kernel_siginfo *from)
3356  {
3357  	memset(to, 0, sizeof(*to));
3358  
3359  	to->si_signo = from->si_signo;
3360  	to->si_errno = from->si_errno;
3361  	to->si_code  = from->si_code;
3362  	switch(siginfo_layout(from->si_signo, from->si_code)) {
3363  	case SIL_KILL:
3364  		to->si_pid = from->si_pid;
3365  		to->si_uid = from->si_uid;
3366  		break;
3367  	case SIL_TIMER:
3368  		to->si_tid     = from->si_tid;
3369  		to->si_overrun = from->si_overrun;
3370  		to->si_int     = from->si_int;
3371  		break;
3372  	case SIL_POLL:
3373  		to->si_band = from->si_band;
3374  		to->si_fd   = from->si_fd;
3375  		break;
3376  	case SIL_FAULT:
3377  		to->si_addr = ptr_to_compat(from->si_addr);
3378  		break;
3379  	case SIL_FAULT_TRAPNO:
3380  		to->si_addr = ptr_to_compat(from->si_addr);
3381  		to->si_trapno = from->si_trapno;
3382  		break;
3383  	case SIL_FAULT_MCEERR:
3384  		to->si_addr = ptr_to_compat(from->si_addr);
3385  		to->si_addr_lsb = from->si_addr_lsb;
3386  		break;
3387  	case SIL_FAULT_BNDERR:
3388  		to->si_addr = ptr_to_compat(from->si_addr);
3389  		to->si_lower = ptr_to_compat(from->si_lower);
3390  		to->si_upper = ptr_to_compat(from->si_upper);
3391  		break;
3392  	case SIL_FAULT_PKUERR:
3393  		to->si_addr = ptr_to_compat(from->si_addr);
3394  		to->si_pkey = from->si_pkey;
3395  		break;
3396  	case SIL_FAULT_PERF_EVENT:
3397  		to->si_addr = ptr_to_compat(from->si_addr);
3398  		to->si_perf_data = from->si_perf_data;
3399  		to->si_perf_type = from->si_perf_type;
3400  		break;
3401  	case SIL_CHLD:
3402  		to->si_pid = from->si_pid;
3403  		to->si_uid = from->si_uid;
3404  		to->si_status = from->si_status;
3405  		to->si_utime = from->si_utime;
3406  		to->si_stime = from->si_stime;
3407  		break;
3408  	case SIL_RT:
3409  		to->si_pid = from->si_pid;
3410  		to->si_uid = from->si_uid;
3411  		to->si_int = from->si_int;
3412  		break;
3413  	case SIL_SYS:
3414  		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3415  		to->si_syscall   = from->si_syscall;
3416  		to->si_arch      = from->si_arch;
3417  		break;
3418  	}
3419  }
3420  
3421  int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3422  			   const struct kernel_siginfo *from)
3423  {
3424  	struct compat_siginfo new;
3425  
3426  	copy_siginfo_to_external32(&new, from);
3427  	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3428  		return -EFAULT;
3429  	return 0;
3430  }
3431  
3432  static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3433  					 const struct compat_siginfo *from)
3434  {
3435  	clear_siginfo(to);
3436  	to->si_signo = from->si_signo;
3437  	to->si_errno = from->si_errno;
3438  	to->si_code  = from->si_code;
3439  	switch(siginfo_layout(from->si_signo, from->si_code)) {
3440  	case SIL_KILL:
3441  		to->si_pid = from->si_pid;
3442  		to->si_uid = from->si_uid;
3443  		break;
3444  	case SIL_TIMER:
3445  		to->si_tid     = from->si_tid;
3446  		to->si_overrun = from->si_overrun;
3447  		to->si_int     = from->si_int;
3448  		break;
3449  	case SIL_POLL:
3450  		to->si_band = from->si_band;
3451  		to->si_fd   = from->si_fd;
3452  		break;
3453  	case SIL_FAULT:
3454  		to->si_addr = compat_ptr(from->si_addr);
3455  		break;
3456  	case SIL_FAULT_TRAPNO:
3457  		to->si_addr = compat_ptr(from->si_addr);
3458  		to->si_trapno = from->si_trapno;
3459  		break;
3460  	case SIL_FAULT_MCEERR:
3461  		to->si_addr = compat_ptr(from->si_addr);
3462  		to->si_addr_lsb = from->si_addr_lsb;
3463  		break;
3464  	case SIL_FAULT_BNDERR:
3465  		to->si_addr = compat_ptr(from->si_addr);
3466  		to->si_lower = compat_ptr(from->si_lower);
3467  		to->si_upper = compat_ptr(from->si_upper);
3468  		break;
3469  	case SIL_FAULT_PKUERR:
3470  		to->si_addr = compat_ptr(from->si_addr);
3471  		to->si_pkey = from->si_pkey;
3472  		break;
3473  	case SIL_FAULT_PERF_EVENT:
3474  		to->si_addr = compat_ptr(from->si_addr);
3475  		to->si_perf_data = from->si_perf_data;
3476  		to->si_perf_type = from->si_perf_type;
3477  		break;
3478  	case SIL_CHLD:
3479  		to->si_pid    = from->si_pid;
3480  		to->si_uid    = from->si_uid;
3481  		to->si_status = from->si_status;
3482  #ifdef CONFIG_X86_X32_ABI
3483  		if (in_x32_syscall()) {
3484  			to->si_utime = from->_sifields._sigchld_x32._utime;
3485  			to->si_stime = from->_sifields._sigchld_x32._stime;
3486  		} else
3487  #endif
3488  		{
3489  			to->si_utime = from->si_utime;
3490  			to->si_stime = from->si_stime;
3491  		}
3492  		break;
3493  	case SIL_RT:
3494  		to->si_pid = from->si_pid;
3495  		to->si_uid = from->si_uid;
3496  		to->si_int = from->si_int;
3497  		break;
3498  	case SIL_SYS:
3499  		to->si_call_addr = compat_ptr(from->si_call_addr);
3500  		to->si_syscall   = from->si_syscall;
3501  		to->si_arch      = from->si_arch;
3502  		break;
3503  	}
3504  	return 0;
3505  }
3506  
3507  static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3508  				      const struct compat_siginfo __user *ufrom)
3509  {
3510  	struct compat_siginfo from;
3511  
3512  	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3513  		return -EFAULT;
3514  
3515  	from.si_signo = signo;
3516  	return post_copy_siginfo_from_user32(to, &from);
3517  }
3518  
3519  int copy_siginfo_from_user32(struct kernel_siginfo *to,
3520  			     const struct compat_siginfo __user *ufrom)
3521  {
3522  	struct compat_siginfo from;
3523  
3524  	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3525  		return -EFAULT;
3526  
3527  	return post_copy_siginfo_from_user32(to, &from);
3528  }
3529  #endif /* CONFIG_COMPAT */
3530  
3531  /**
3532   *  do_sigtimedwait - wait for queued signals specified in @which
3533   *  @which: queued signals to wait for
3534   *  @info: if non-null, the signal's siginfo is returned here
3535   *  @ts: upper bound on process time suspension
3536   */
3537  static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3538  		    const struct timespec64 *ts)
3539  {
3540  	ktime_t *to = NULL, timeout = KTIME_MAX;
3541  	struct task_struct *tsk = current;
3542  	sigset_t mask = *which;
3543  	int sig, ret = 0;
3544  
3545  	if (ts) {
3546  		if (!timespec64_valid(ts))
3547  			return -EINVAL;
3548  		timeout = timespec64_to_ktime(*ts);
3549  		to = &timeout;
3550  	}
3551  
3552  	/*
3553  	 * Invert the set of allowed signals to get those we want to block.
3554  	 */
3555  	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3556  	signotset(&mask);
3557  
3558  	spin_lock_irq(&tsk->sighand->siglock);
3559  	sig = dequeue_signal(tsk, &mask, info);
3560  	if (!sig && timeout) {
3561  		/*
3562  		 * None ready, temporarily unblock those we're interested
3563  		 * while we are sleeping in so that we'll be awakened when
3564  		 * they arrive. Unblocking is always fine, we can avoid
3565  		 * set_current_blocked().
3566  		 */
3567  		tsk->real_blocked = tsk->blocked;
3568  		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3569  		recalc_sigpending();
3570  		spin_unlock_irq(&tsk->sighand->siglock);
3571  
3572  		__set_current_state(TASK_INTERRUPTIBLE);
3573  		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3574  							 HRTIMER_MODE_REL);
3575  		spin_lock_irq(&tsk->sighand->siglock);
3576  		__set_task_blocked(tsk, &tsk->real_blocked);
3577  		sigemptyset(&tsk->real_blocked);
3578  		sig = dequeue_signal(tsk, &mask, info);
3579  	}
3580  	spin_unlock_irq(&tsk->sighand->siglock);
3581  
3582  	if (sig)
3583  		return sig;
3584  	return ret ? -EINTR : -EAGAIN;
3585  }
3586  
3587  /**
3588   *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3589   *			in @uthese
3590   *  @uthese: queued signals to wait for
3591   *  @uinfo: if non-null, the signal's siginfo is returned here
3592   *  @uts: upper bound on process time suspension
3593   *  @sigsetsize: size of sigset_t type
3594   */
3595  SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3596  		siginfo_t __user *, uinfo,
3597  		const struct __kernel_timespec __user *, uts,
3598  		size_t, sigsetsize)
3599  {
3600  	sigset_t these;
3601  	struct timespec64 ts;
3602  	kernel_siginfo_t info;
3603  	int ret;
3604  
3605  	/* XXX: Don't preclude handling different sized sigset_t's.  */
3606  	if (sigsetsize != sizeof(sigset_t))
3607  		return -EINVAL;
3608  
3609  	if (copy_from_user(&these, uthese, sizeof(these)))
3610  		return -EFAULT;
3611  
3612  	if (uts) {
3613  		if (get_timespec64(&ts, uts))
3614  			return -EFAULT;
3615  	}
3616  
3617  	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3618  
3619  	if (ret > 0 && uinfo) {
3620  		if (copy_siginfo_to_user(uinfo, &info))
3621  			ret = -EFAULT;
3622  	}
3623  
3624  	return ret;
3625  }
3626  
3627  #ifdef CONFIG_COMPAT_32BIT_TIME
3628  SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3629  		siginfo_t __user *, uinfo,
3630  		const struct old_timespec32 __user *, uts,
3631  		size_t, sigsetsize)
3632  {
3633  	sigset_t these;
3634  	struct timespec64 ts;
3635  	kernel_siginfo_t info;
3636  	int ret;
3637  
3638  	if (sigsetsize != sizeof(sigset_t))
3639  		return -EINVAL;
3640  
3641  	if (copy_from_user(&these, uthese, sizeof(these)))
3642  		return -EFAULT;
3643  
3644  	if (uts) {
3645  		if (get_old_timespec32(&ts, uts))
3646  			return -EFAULT;
3647  	}
3648  
3649  	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3650  
3651  	if (ret > 0 && uinfo) {
3652  		if (copy_siginfo_to_user(uinfo, &info))
3653  			ret = -EFAULT;
3654  	}
3655  
3656  	return ret;
3657  }
3658  #endif
3659  
3660  #ifdef CONFIG_COMPAT
3661  COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3662  		struct compat_siginfo __user *, uinfo,
3663  		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3664  {
3665  	sigset_t s;
3666  	struct timespec64 t;
3667  	kernel_siginfo_t info;
3668  	long ret;
3669  
3670  	if (sigsetsize != sizeof(sigset_t))
3671  		return -EINVAL;
3672  
3673  	if (get_compat_sigset(&s, uthese))
3674  		return -EFAULT;
3675  
3676  	if (uts) {
3677  		if (get_timespec64(&t, uts))
3678  			return -EFAULT;
3679  	}
3680  
3681  	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3682  
3683  	if (ret > 0 && uinfo) {
3684  		if (copy_siginfo_to_user32(uinfo, &info))
3685  			ret = -EFAULT;
3686  	}
3687  
3688  	return ret;
3689  }
3690  
3691  #ifdef CONFIG_COMPAT_32BIT_TIME
3692  COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3693  		struct compat_siginfo __user *, uinfo,
3694  		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3695  {
3696  	sigset_t s;
3697  	struct timespec64 t;
3698  	kernel_siginfo_t info;
3699  	long ret;
3700  
3701  	if (sigsetsize != sizeof(sigset_t))
3702  		return -EINVAL;
3703  
3704  	if (get_compat_sigset(&s, uthese))
3705  		return -EFAULT;
3706  
3707  	if (uts) {
3708  		if (get_old_timespec32(&t, uts))
3709  			return -EFAULT;
3710  	}
3711  
3712  	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3713  
3714  	if (ret > 0 && uinfo) {
3715  		if (copy_siginfo_to_user32(uinfo, &info))
3716  			ret = -EFAULT;
3717  	}
3718  
3719  	return ret;
3720  }
3721  #endif
3722  #endif
3723  
3724  static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3725  {
3726  	clear_siginfo(info);
3727  	info->si_signo = sig;
3728  	info->si_errno = 0;
3729  	info->si_code = SI_USER;
3730  	info->si_pid = task_tgid_vnr(current);
3731  	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3732  }
3733  
3734  /**
3735   *  sys_kill - send a signal to a process
3736   *  @pid: the PID of the process
3737   *  @sig: signal to be sent
3738   */
3739  SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3740  {
3741  	struct kernel_siginfo info;
3742  
3743  	prepare_kill_siginfo(sig, &info);
3744  
3745  	return kill_something_info(sig, &info, pid);
3746  }
3747  
3748  /*
3749   * Verify that the signaler and signalee either are in the same pid namespace
3750   * or that the signaler's pid namespace is an ancestor of the signalee's pid
3751   * namespace.
3752   */
3753  static bool access_pidfd_pidns(struct pid *pid)
3754  {
3755  	struct pid_namespace *active = task_active_pid_ns(current);
3756  	struct pid_namespace *p = ns_of_pid(pid);
3757  
3758  	for (;;) {
3759  		if (!p)
3760  			return false;
3761  		if (p == active)
3762  			break;
3763  		p = p->parent;
3764  	}
3765  
3766  	return true;
3767  }
3768  
3769  static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3770  		siginfo_t __user *info)
3771  {
3772  #ifdef CONFIG_COMPAT
3773  	/*
3774  	 * Avoid hooking up compat syscalls and instead handle necessary
3775  	 * conversions here. Note, this is a stop-gap measure and should not be
3776  	 * considered a generic solution.
3777  	 */
3778  	if (in_compat_syscall())
3779  		return copy_siginfo_from_user32(
3780  			kinfo, (struct compat_siginfo __user *)info);
3781  #endif
3782  	return copy_siginfo_from_user(kinfo, info);
3783  }
3784  
3785  static struct pid *pidfd_to_pid(const struct file *file)
3786  {
3787  	struct pid *pid;
3788  
3789  	pid = pidfd_pid(file);
3790  	if (!IS_ERR(pid))
3791  		return pid;
3792  
3793  	return tgid_pidfd_to_pid(file);
3794  }
3795  
3796  /**
3797   * sys_pidfd_send_signal - Signal a process through a pidfd
3798   * @pidfd:  file descriptor of the process
3799   * @sig:    signal to send
3800   * @info:   signal info
3801   * @flags:  future flags
3802   *
3803   * The syscall currently only signals via PIDTYPE_PID which covers
3804   * kill(<positive-pid>, <signal>. It does not signal threads or process
3805   * groups.
3806   * In order to extend the syscall to threads and process groups the @flags
3807   * argument should be used. In essence, the @flags argument will determine
3808   * what is signaled and not the file descriptor itself. Put in other words,
3809   * grouping is a property of the flags argument not a property of the file
3810   * descriptor.
3811   *
3812   * Return: 0 on success, negative errno on failure
3813   */
3814  SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3815  		siginfo_t __user *, info, unsigned int, flags)
3816  {
3817  	int ret;
3818  	struct fd f;
3819  	struct pid *pid;
3820  	kernel_siginfo_t kinfo;
3821  
3822  	/* Enforce flags be set to 0 until we add an extension. */
3823  	if (flags)
3824  		return -EINVAL;
3825  
3826  	f = fdget(pidfd);
3827  	if (!f.file)
3828  		return -EBADF;
3829  
3830  	/* Is this a pidfd? */
3831  	pid = pidfd_to_pid(f.file);
3832  	if (IS_ERR(pid)) {
3833  		ret = PTR_ERR(pid);
3834  		goto err;
3835  	}
3836  
3837  	ret = -EINVAL;
3838  	if (!access_pidfd_pidns(pid))
3839  		goto err;
3840  
3841  	if (info) {
3842  		ret = copy_siginfo_from_user_any(&kinfo, info);
3843  		if (unlikely(ret))
3844  			goto err;
3845  
3846  		ret = -EINVAL;
3847  		if (unlikely(sig != kinfo.si_signo))
3848  			goto err;
3849  
3850  		/* Only allow sending arbitrary signals to yourself. */
3851  		ret = -EPERM;
3852  		if ((task_pid(current) != pid) &&
3853  		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3854  			goto err;
3855  	} else {
3856  		prepare_kill_siginfo(sig, &kinfo);
3857  	}
3858  
3859  	ret = kill_pid_info(sig, &kinfo, pid);
3860  
3861  err:
3862  	fdput(f);
3863  	return ret;
3864  }
3865  
3866  static int
3867  do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3868  {
3869  	struct task_struct *p;
3870  	int error = -ESRCH;
3871  
3872  	rcu_read_lock();
3873  	p = find_task_by_vpid(pid);
3874  	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3875  		error = check_kill_permission(sig, info, p);
3876  		/*
3877  		 * The null signal is a permissions and process existence
3878  		 * probe.  No signal is actually delivered.
3879  		 */
3880  		if (!error && sig) {
3881  			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3882  			/*
3883  			 * If lock_task_sighand() failed we pretend the task
3884  			 * dies after receiving the signal. The window is tiny,
3885  			 * and the signal is private anyway.
3886  			 */
3887  			if (unlikely(error == -ESRCH))
3888  				error = 0;
3889  		}
3890  	}
3891  	rcu_read_unlock();
3892  
3893  	return error;
3894  }
3895  
3896  static int do_tkill(pid_t tgid, pid_t pid, int sig)
3897  {
3898  	struct kernel_siginfo info;
3899  
3900  	clear_siginfo(&info);
3901  	info.si_signo = sig;
3902  	info.si_errno = 0;
3903  	info.si_code = SI_TKILL;
3904  	info.si_pid = task_tgid_vnr(current);
3905  	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3906  
3907  	return do_send_specific(tgid, pid, sig, &info);
3908  }
3909  
3910  /**
3911   *  sys_tgkill - send signal to one specific thread
3912   *  @tgid: the thread group ID of the thread
3913   *  @pid: the PID of the thread
3914   *  @sig: signal to be sent
3915   *
3916   *  This syscall also checks the @tgid and returns -ESRCH even if the PID
3917   *  exists but it's not belonging to the target process anymore. This
3918   *  method solves the problem of threads exiting and PIDs getting reused.
3919   */
3920  SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3921  {
3922  	/* This is only valid for single tasks */
3923  	if (pid <= 0 || tgid <= 0)
3924  		return -EINVAL;
3925  
3926  	return do_tkill(tgid, pid, sig);
3927  }
3928  
3929  /**
3930   *  sys_tkill - send signal to one specific task
3931   *  @pid: the PID of the task
3932   *  @sig: signal to be sent
3933   *
3934   *  Send a signal to only one task, even if it's a CLONE_THREAD task.
3935   */
3936  SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3937  {
3938  	/* This is only valid for single tasks */
3939  	if (pid <= 0)
3940  		return -EINVAL;
3941  
3942  	return do_tkill(0, pid, sig);
3943  }
3944  
3945  static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3946  {
3947  	/* Not even root can pretend to send signals from the kernel.
3948  	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3949  	 */
3950  	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3951  	    (task_pid_vnr(current) != pid))
3952  		return -EPERM;
3953  
3954  	/* POSIX.1b doesn't mention process groups.  */
3955  	return kill_proc_info(sig, info, pid);
3956  }
3957  
3958  /**
3959   *  sys_rt_sigqueueinfo - send signal information to a signal
3960   *  @pid: the PID of the thread
3961   *  @sig: signal to be sent
3962   *  @uinfo: signal info to be sent
3963   */
3964  SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3965  		siginfo_t __user *, uinfo)
3966  {
3967  	kernel_siginfo_t info;
3968  	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3969  	if (unlikely(ret))
3970  		return ret;
3971  	return do_rt_sigqueueinfo(pid, sig, &info);
3972  }
3973  
3974  #ifdef CONFIG_COMPAT
3975  COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3976  			compat_pid_t, pid,
3977  			int, sig,
3978  			struct compat_siginfo __user *, uinfo)
3979  {
3980  	kernel_siginfo_t info;
3981  	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3982  	if (unlikely(ret))
3983  		return ret;
3984  	return do_rt_sigqueueinfo(pid, sig, &info);
3985  }
3986  #endif
3987  
3988  static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3989  {
3990  	/* This is only valid for single tasks */
3991  	if (pid <= 0 || tgid <= 0)
3992  		return -EINVAL;
3993  
3994  	/* Not even root can pretend to send signals from the kernel.
3995  	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3996  	 */
3997  	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3998  	    (task_pid_vnr(current) != pid))
3999  		return -EPERM;
4000  
4001  	return do_send_specific(tgid, pid, sig, info);
4002  }
4003  
4004  SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4005  		siginfo_t __user *, uinfo)
4006  {
4007  	kernel_siginfo_t info;
4008  	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4009  	if (unlikely(ret))
4010  		return ret;
4011  	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4012  }
4013  
4014  #ifdef CONFIG_COMPAT
4015  COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4016  			compat_pid_t, tgid,
4017  			compat_pid_t, pid,
4018  			int, sig,
4019  			struct compat_siginfo __user *, uinfo)
4020  {
4021  	kernel_siginfo_t info;
4022  	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4023  	if (unlikely(ret))
4024  		return ret;
4025  	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4026  }
4027  #endif
4028  
4029  /*
4030   * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4031   */
4032  void kernel_sigaction(int sig, __sighandler_t action)
4033  {
4034  	spin_lock_irq(&current->sighand->siglock);
4035  	current->sighand->action[sig - 1].sa.sa_handler = action;
4036  	if (action == SIG_IGN) {
4037  		sigset_t mask;
4038  
4039  		sigemptyset(&mask);
4040  		sigaddset(&mask, sig);
4041  
4042  		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
4043  		flush_sigqueue_mask(&mask, &current->pending);
4044  		recalc_sigpending();
4045  	}
4046  	spin_unlock_irq(&current->sighand->siglock);
4047  }
4048  EXPORT_SYMBOL(kernel_sigaction);
4049  
4050  void __weak sigaction_compat_abi(struct k_sigaction *act,
4051  		struct k_sigaction *oact)
4052  {
4053  }
4054  
4055  int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4056  {
4057  	struct task_struct *p = current, *t;
4058  	struct k_sigaction *k;
4059  	sigset_t mask;
4060  
4061  	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4062  		return -EINVAL;
4063  
4064  	k = &p->sighand->action[sig-1];
4065  
4066  	spin_lock_irq(&p->sighand->siglock);
4067  	if (k->sa.sa_flags & SA_IMMUTABLE) {
4068  		spin_unlock_irq(&p->sighand->siglock);
4069  		return -EINVAL;
4070  	}
4071  	if (oact)
4072  		*oact = *k;
4073  
4074  	/*
4075  	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4076  	 * e.g. by having an architecture use the bit in their uapi.
4077  	 */
4078  	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4079  
4080  	/*
4081  	 * Clear unknown flag bits in order to allow userspace to detect missing
4082  	 * support for flag bits and to allow the kernel to use non-uapi bits
4083  	 * internally.
4084  	 */
4085  	if (act)
4086  		act->sa.sa_flags &= UAPI_SA_FLAGS;
4087  	if (oact)
4088  		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4089  
4090  	sigaction_compat_abi(act, oact);
4091  
4092  	if (act) {
4093  		sigdelsetmask(&act->sa.sa_mask,
4094  			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4095  		*k = *act;
4096  		/*
4097  		 * POSIX 3.3.1.3:
4098  		 *  "Setting a signal action to SIG_IGN for a signal that is
4099  		 *   pending shall cause the pending signal to be discarded,
4100  		 *   whether or not it is blocked."
4101  		 *
4102  		 *  "Setting a signal action to SIG_DFL for a signal that is
4103  		 *   pending and whose default action is to ignore the signal
4104  		 *   (for example, SIGCHLD), shall cause the pending signal to
4105  		 *   be discarded, whether or not it is blocked"
4106  		 */
4107  		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4108  			sigemptyset(&mask);
4109  			sigaddset(&mask, sig);
4110  			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4111  			for_each_thread(p, t)
4112  				flush_sigqueue_mask(&mask, &t->pending);
4113  		}
4114  	}
4115  
4116  	spin_unlock_irq(&p->sighand->siglock);
4117  	return 0;
4118  }
4119  
4120  #ifdef CONFIG_DYNAMIC_SIGFRAME
4121  static inline void sigaltstack_lock(void)
4122  	__acquires(&current->sighand->siglock)
4123  {
4124  	spin_lock_irq(&current->sighand->siglock);
4125  }
4126  
4127  static inline void sigaltstack_unlock(void)
4128  	__releases(&current->sighand->siglock)
4129  {
4130  	spin_unlock_irq(&current->sighand->siglock);
4131  }
4132  #else
4133  static inline void sigaltstack_lock(void) { }
4134  static inline void sigaltstack_unlock(void) { }
4135  #endif
4136  
4137  static int
4138  do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4139  		size_t min_ss_size)
4140  {
4141  	struct task_struct *t = current;
4142  	int ret = 0;
4143  
4144  	if (oss) {
4145  		memset(oss, 0, sizeof(stack_t));
4146  		oss->ss_sp = (void __user *) t->sas_ss_sp;
4147  		oss->ss_size = t->sas_ss_size;
4148  		oss->ss_flags = sas_ss_flags(sp) |
4149  			(current->sas_ss_flags & SS_FLAG_BITS);
4150  	}
4151  
4152  	if (ss) {
4153  		void __user *ss_sp = ss->ss_sp;
4154  		size_t ss_size = ss->ss_size;
4155  		unsigned ss_flags = ss->ss_flags;
4156  		int ss_mode;
4157  
4158  		if (unlikely(on_sig_stack(sp)))
4159  			return -EPERM;
4160  
4161  		ss_mode = ss_flags & ~SS_FLAG_BITS;
4162  		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4163  				ss_mode != 0))
4164  			return -EINVAL;
4165  
4166  		sigaltstack_lock();
4167  		if (ss_mode == SS_DISABLE) {
4168  			ss_size = 0;
4169  			ss_sp = NULL;
4170  		} else {
4171  			if (unlikely(ss_size < min_ss_size))
4172  				ret = -ENOMEM;
4173  			if (!sigaltstack_size_valid(ss_size))
4174  				ret = -ENOMEM;
4175  		}
4176  		if (!ret) {
4177  			t->sas_ss_sp = (unsigned long) ss_sp;
4178  			t->sas_ss_size = ss_size;
4179  			t->sas_ss_flags = ss_flags;
4180  		}
4181  		sigaltstack_unlock();
4182  	}
4183  	return ret;
4184  }
4185  
4186  SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4187  {
4188  	stack_t new, old;
4189  	int err;
4190  	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4191  		return -EFAULT;
4192  	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4193  			      current_user_stack_pointer(),
4194  			      MINSIGSTKSZ);
4195  	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4196  		err = -EFAULT;
4197  	return err;
4198  }
4199  
4200  int restore_altstack(const stack_t __user *uss)
4201  {
4202  	stack_t new;
4203  	if (copy_from_user(&new, uss, sizeof(stack_t)))
4204  		return -EFAULT;
4205  	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4206  			     MINSIGSTKSZ);
4207  	/* squash all but EFAULT for now */
4208  	return 0;
4209  }
4210  
4211  int __save_altstack(stack_t __user *uss, unsigned long sp)
4212  {
4213  	struct task_struct *t = current;
4214  	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4215  		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4216  		__put_user(t->sas_ss_size, &uss->ss_size);
4217  	return err;
4218  }
4219  
4220  #ifdef CONFIG_COMPAT
4221  static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4222  				 compat_stack_t __user *uoss_ptr)
4223  {
4224  	stack_t uss, uoss;
4225  	int ret;
4226  
4227  	if (uss_ptr) {
4228  		compat_stack_t uss32;
4229  		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4230  			return -EFAULT;
4231  		uss.ss_sp = compat_ptr(uss32.ss_sp);
4232  		uss.ss_flags = uss32.ss_flags;
4233  		uss.ss_size = uss32.ss_size;
4234  	}
4235  	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4236  			     compat_user_stack_pointer(),
4237  			     COMPAT_MINSIGSTKSZ);
4238  	if (ret >= 0 && uoss_ptr)  {
4239  		compat_stack_t old;
4240  		memset(&old, 0, sizeof(old));
4241  		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4242  		old.ss_flags = uoss.ss_flags;
4243  		old.ss_size = uoss.ss_size;
4244  		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4245  			ret = -EFAULT;
4246  	}
4247  	return ret;
4248  }
4249  
4250  COMPAT_SYSCALL_DEFINE2(sigaltstack,
4251  			const compat_stack_t __user *, uss_ptr,
4252  			compat_stack_t __user *, uoss_ptr)
4253  {
4254  	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4255  }
4256  
4257  int compat_restore_altstack(const compat_stack_t __user *uss)
4258  {
4259  	int err = do_compat_sigaltstack(uss, NULL);
4260  	/* squash all but -EFAULT for now */
4261  	return err == -EFAULT ? err : 0;
4262  }
4263  
4264  int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4265  {
4266  	int err;
4267  	struct task_struct *t = current;
4268  	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4269  			 &uss->ss_sp) |
4270  		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4271  		__put_user(t->sas_ss_size, &uss->ss_size);
4272  	return err;
4273  }
4274  #endif
4275  
4276  #ifdef __ARCH_WANT_SYS_SIGPENDING
4277  
4278  /**
4279   *  sys_sigpending - examine pending signals
4280   *  @uset: where mask of pending signal is returned
4281   */
4282  SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4283  {
4284  	sigset_t set;
4285  
4286  	if (sizeof(old_sigset_t) > sizeof(*uset))
4287  		return -EINVAL;
4288  
4289  	do_sigpending(&set);
4290  
4291  	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4292  		return -EFAULT;
4293  
4294  	return 0;
4295  }
4296  
4297  #ifdef CONFIG_COMPAT
4298  COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4299  {
4300  	sigset_t set;
4301  
4302  	do_sigpending(&set);
4303  
4304  	return put_user(set.sig[0], set32);
4305  }
4306  #endif
4307  
4308  #endif
4309  
4310  #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4311  /**
4312   *  sys_sigprocmask - examine and change blocked signals
4313   *  @how: whether to add, remove, or set signals
4314   *  @nset: signals to add or remove (if non-null)
4315   *  @oset: previous value of signal mask if non-null
4316   *
4317   * Some platforms have their own version with special arguments;
4318   * others support only sys_rt_sigprocmask.
4319   */
4320  
4321  SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4322  		old_sigset_t __user *, oset)
4323  {
4324  	old_sigset_t old_set, new_set;
4325  	sigset_t new_blocked;
4326  
4327  	old_set = current->blocked.sig[0];
4328  
4329  	if (nset) {
4330  		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4331  			return -EFAULT;
4332  
4333  		new_blocked = current->blocked;
4334  
4335  		switch (how) {
4336  		case SIG_BLOCK:
4337  			sigaddsetmask(&new_blocked, new_set);
4338  			break;
4339  		case SIG_UNBLOCK:
4340  			sigdelsetmask(&new_blocked, new_set);
4341  			break;
4342  		case SIG_SETMASK:
4343  			new_blocked.sig[0] = new_set;
4344  			break;
4345  		default:
4346  			return -EINVAL;
4347  		}
4348  
4349  		set_current_blocked(&new_blocked);
4350  	}
4351  
4352  	if (oset) {
4353  		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4354  			return -EFAULT;
4355  	}
4356  
4357  	return 0;
4358  }
4359  #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4360  
4361  #ifndef CONFIG_ODD_RT_SIGACTION
4362  /**
4363   *  sys_rt_sigaction - alter an action taken by a process
4364   *  @sig: signal to be sent
4365   *  @act: new sigaction
4366   *  @oact: used to save the previous sigaction
4367   *  @sigsetsize: size of sigset_t type
4368   */
4369  SYSCALL_DEFINE4(rt_sigaction, int, sig,
4370  		const struct sigaction __user *, act,
4371  		struct sigaction __user *, oact,
4372  		size_t, sigsetsize)
4373  {
4374  	struct k_sigaction new_sa, old_sa;
4375  	int ret;
4376  
4377  	/* XXX: Don't preclude handling different sized sigset_t's.  */
4378  	if (sigsetsize != sizeof(sigset_t))
4379  		return -EINVAL;
4380  
4381  	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4382  		return -EFAULT;
4383  
4384  	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4385  	if (ret)
4386  		return ret;
4387  
4388  	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4389  		return -EFAULT;
4390  
4391  	return 0;
4392  }
4393  #ifdef CONFIG_COMPAT
4394  COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4395  		const struct compat_sigaction __user *, act,
4396  		struct compat_sigaction __user *, oact,
4397  		compat_size_t, sigsetsize)
4398  {
4399  	struct k_sigaction new_ka, old_ka;
4400  #ifdef __ARCH_HAS_SA_RESTORER
4401  	compat_uptr_t restorer;
4402  #endif
4403  	int ret;
4404  
4405  	/* XXX: Don't preclude handling different sized sigset_t's.  */
4406  	if (sigsetsize != sizeof(compat_sigset_t))
4407  		return -EINVAL;
4408  
4409  	if (act) {
4410  		compat_uptr_t handler;
4411  		ret = get_user(handler, &act->sa_handler);
4412  		new_ka.sa.sa_handler = compat_ptr(handler);
4413  #ifdef __ARCH_HAS_SA_RESTORER
4414  		ret |= get_user(restorer, &act->sa_restorer);
4415  		new_ka.sa.sa_restorer = compat_ptr(restorer);
4416  #endif
4417  		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4418  		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4419  		if (ret)
4420  			return -EFAULT;
4421  	}
4422  
4423  	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4424  	if (!ret && oact) {
4425  		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4426  			       &oact->sa_handler);
4427  		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4428  					 sizeof(oact->sa_mask));
4429  		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4430  #ifdef __ARCH_HAS_SA_RESTORER
4431  		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4432  				&oact->sa_restorer);
4433  #endif
4434  	}
4435  	return ret;
4436  }
4437  #endif
4438  #endif /* !CONFIG_ODD_RT_SIGACTION */
4439  
4440  #ifdef CONFIG_OLD_SIGACTION
4441  SYSCALL_DEFINE3(sigaction, int, sig,
4442  		const struct old_sigaction __user *, act,
4443  	        struct old_sigaction __user *, oact)
4444  {
4445  	struct k_sigaction new_ka, old_ka;
4446  	int ret;
4447  
4448  	if (act) {
4449  		old_sigset_t mask;
4450  		if (!access_ok(act, sizeof(*act)) ||
4451  		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4452  		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4453  		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4454  		    __get_user(mask, &act->sa_mask))
4455  			return -EFAULT;
4456  #ifdef __ARCH_HAS_KA_RESTORER
4457  		new_ka.ka_restorer = NULL;
4458  #endif
4459  		siginitset(&new_ka.sa.sa_mask, mask);
4460  	}
4461  
4462  	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4463  
4464  	if (!ret && oact) {
4465  		if (!access_ok(oact, sizeof(*oact)) ||
4466  		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4467  		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4468  		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4469  		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4470  			return -EFAULT;
4471  	}
4472  
4473  	return ret;
4474  }
4475  #endif
4476  #ifdef CONFIG_COMPAT_OLD_SIGACTION
4477  COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4478  		const struct compat_old_sigaction __user *, act,
4479  	        struct compat_old_sigaction __user *, oact)
4480  {
4481  	struct k_sigaction new_ka, old_ka;
4482  	int ret;
4483  	compat_old_sigset_t mask;
4484  	compat_uptr_t handler, restorer;
4485  
4486  	if (act) {
4487  		if (!access_ok(act, sizeof(*act)) ||
4488  		    __get_user(handler, &act->sa_handler) ||
4489  		    __get_user(restorer, &act->sa_restorer) ||
4490  		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4491  		    __get_user(mask, &act->sa_mask))
4492  			return -EFAULT;
4493  
4494  #ifdef __ARCH_HAS_KA_RESTORER
4495  		new_ka.ka_restorer = NULL;
4496  #endif
4497  		new_ka.sa.sa_handler = compat_ptr(handler);
4498  		new_ka.sa.sa_restorer = compat_ptr(restorer);
4499  		siginitset(&new_ka.sa.sa_mask, mask);
4500  	}
4501  
4502  	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4503  
4504  	if (!ret && oact) {
4505  		if (!access_ok(oact, sizeof(*oact)) ||
4506  		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4507  			       &oact->sa_handler) ||
4508  		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4509  			       &oact->sa_restorer) ||
4510  		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4511  		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4512  			return -EFAULT;
4513  	}
4514  	return ret;
4515  }
4516  #endif
4517  
4518  #ifdef CONFIG_SGETMASK_SYSCALL
4519  
4520  /*
4521   * For backwards compatibility.  Functionality superseded by sigprocmask.
4522   */
4523  SYSCALL_DEFINE0(sgetmask)
4524  {
4525  	/* SMP safe */
4526  	return current->blocked.sig[0];
4527  }
4528  
4529  SYSCALL_DEFINE1(ssetmask, int, newmask)
4530  {
4531  	int old = current->blocked.sig[0];
4532  	sigset_t newset;
4533  
4534  	siginitset(&newset, newmask);
4535  	set_current_blocked(&newset);
4536  
4537  	return old;
4538  }
4539  #endif /* CONFIG_SGETMASK_SYSCALL */
4540  
4541  #ifdef __ARCH_WANT_SYS_SIGNAL
4542  /*
4543   * For backwards compatibility.  Functionality superseded by sigaction.
4544   */
4545  SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4546  {
4547  	struct k_sigaction new_sa, old_sa;
4548  	int ret;
4549  
4550  	new_sa.sa.sa_handler = handler;
4551  	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4552  	sigemptyset(&new_sa.sa.sa_mask);
4553  
4554  	ret = do_sigaction(sig, &new_sa, &old_sa);
4555  
4556  	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4557  }
4558  #endif /* __ARCH_WANT_SYS_SIGNAL */
4559  
4560  #ifdef __ARCH_WANT_SYS_PAUSE
4561  
4562  SYSCALL_DEFINE0(pause)
4563  {
4564  	while (!signal_pending(current)) {
4565  		__set_current_state(TASK_INTERRUPTIBLE);
4566  		schedule();
4567  	}
4568  	return -ERESTARTNOHAND;
4569  }
4570  
4571  #endif
4572  
4573  static int sigsuspend(sigset_t *set)
4574  {
4575  	current->saved_sigmask = current->blocked;
4576  	set_current_blocked(set);
4577  
4578  	while (!signal_pending(current)) {
4579  		__set_current_state(TASK_INTERRUPTIBLE);
4580  		schedule();
4581  	}
4582  	set_restore_sigmask();
4583  	return -ERESTARTNOHAND;
4584  }
4585  
4586  /**
4587   *  sys_rt_sigsuspend - replace the signal mask for a value with the
4588   *	@unewset value until a signal is received
4589   *  @unewset: new signal mask value
4590   *  @sigsetsize: size of sigset_t type
4591   */
4592  SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4593  {
4594  	sigset_t newset;
4595  
4596  	/* XXX: Don't preclude handling different sized sigset_t's.  */
4597  	if (sigsetsize != sizeof(sigset_t))
4598  		return -EINVAL;
4599  
4600  	if (copy_from_user(&newset, unewset, sizeof(newset)))
4601  		return -EFAULT;
4602  	return sigsuspend(&newset);
4603  }
4604  
4605  #ifdef CONFIG_COMPAT
4606  COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4607  {
4608  	sigset_t newset;
4609  
4610  	/* XXX: Don't preclude handling different sized sigset_t's.  */
4611  	if (sigsetsize != sizeof(sigset_t))
4612  		return -EINVAL;
4613  
4614  	if (get_compat_sigset(&newset, unewset))
4615  		return -EFAULT;
4616  	return sigsuspend(&newset);
4617  }
4618  #endif
4619  
4620  #ifdef CONFIG_OLD_SIGSUSPEND
4621  SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4622  {
4623  	sigset_t blocked;
4624  	siginitset(&blocked, mask);
4625  	return sigsuspend(&blocked);
4626  }
4627  #endif
4628  #ifdef CONFIG_OLD_SIGSUSPEND3
4629  SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4630  {
4631  	sigset_t blocked;
4632  	siginitset(&blocked, mask);
4633  	return sigsuspend(&blocked);
4634  }
4635  #endif
4636  
4637  __weak const char *arch_vma_name(struct vm_area_struct *vma)
4638  {
4639  	return NULL;
4640  }
4641  
4642  static inline void siginfo_buildtime_checks(void)
4643  {
4644  	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4645  
4646  	/* Verify the offsets in the two siginfos match */
4647  #define CHECK_OFFSET(field) \
4648  	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4649  
4650  	/* kill */
4651  	CHECK_OFFSET(si_pid);
4652  	CHECK_OFFSET(si_uid);
4653  
4654  	/* timer */
4655  	CHECK_OFFSET(si_tid);
4656  	CHECK_OFFSET(si_overrun);
4657  	CHECK_OFFSET(si_value);
4658  
4659  	/* rt */
4660  	CHECK_OFFSET(si_pid);
4661  	CHECK_OFFSET(si_uid);
4662  	CHECK_OFFSET(si_value);
4663  
4664  	/* sigchld */
4665  	CHECK_OFFSET(si_pid);
4666  	CHECK_OFFSET(si_uid);
4667  	CHECK_OFFSET(si_status);
4668  	CHECK_OFFSET(si_utime);
4669  	CHECK_OFFSET(si_stime);
4670  
4671  	/* sigfault */
4672  	CHECK_OFFSET(si_addr);
4673  	CHECK_OFFSET(si_trapno);
4674  	CHECK_OFFSET(si_addr_lsb);
4675  	CHECK_OFFSET(si_lower);
4676  	CHECK_OFFSET(si_upper);
4677  	CHECK_OFFSET(si_pkey);
4678  	CHECK_OFFSET(si_perf_data);
4679  	CHECK_OFFSET(si_perf_type);
4680  
4681  	/* sigpoll */
4682  	CHECK_OFFSET(si_band);
4683  	CHECK_OFFSET(si_fd);
4684  
4685  	/* sigsys */
4686  	CHECK_OFFSET(si_call_addr);
4687  	CHECK_OFFSET(si_syscall);
4688  	CHECK_OFFSET(si_arch);
4689  #undef CHECK_OFFSET
4690  
4691  	/* usb asyncio */
4692  	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4693  		     offsetof(struct siginfo, si_addr));
4694  	if (sizeof(int) == sizeof(void __user *)) {
4695  		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4696  			     sizeof(void __user *));
4697  	} else {
4698  		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4699  			      sizeof_field(struct siginfo, si_uid)) !=
4700  			     sizeof(void __user *));
4701  		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4702  			     offsetof(struct siginfo, si_uid));
4703  	}
4704  #ifdef CONFIG_COMPAT
4705  	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4706  		     offsetof(struct compat_siginfo, si_addr));
4707  	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4708  		     sizeof(compat_uptr_t));
4709  	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4710  		     sizeof_field(struct siginfo, si_pid));
4711  #endif
4712  }
4713  
4714  void __init signals_init(void)
4715  {
4716  	siginfo_buildtime_checks();
4717  
4718  	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4719  }
4720  
4721  #ifdef CONFIG_KGDB_KDB
4722  #include <linux/kdb.h>
4723  /*
4724   * kdb_send_sig - Allows kdb to send signals without exposing
4725   * signal internals.  This function checks if the required locks are
4726   * available before calling the main signal code, to avoid kdb
4727   * deadlocks.
4728   */
4729  void kdb_send_sig(struct task_struct *t, int sig)
4730  {
4731  	static struct task_struct *kdb_prev_t;
4732  	int new_t, ret;
4733  	if (!spin_trylock(&t->sighand->siglock)) {
4734  		kdb_printf("Can't do kill command now.\n"
4735  			   "The sigmask lock is held somewhere else in "
4736  			   "kernel, try again later\n");
4737  		return;
4738  	}
4739  	new_t = kdb_prev_t != t;
4740  	kdb_prev_t = t;
4741  	if (!task_is_running(t) && new_t) {
4742  		spin_unlock(&t->sighand->siglock);
4743  		kdb_printf("Process is not RUNNING, sending a signal from "
4744  			   "kdb risks deadlock\n"
4745  			   "on the run queue locks. "
4746  			   "The signal has _not_ been sent.\n"
4747  			   "Reissue the kill command if you want to risk "
4748  			   "the deadlock.\n");
4749  		return;
4750  	}
4751  	ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4752  	spin_unlock(&t->sighand->siglock);
4753  	if (ret)
4754  		kdb_printf("Fail to deliver Signal %d to process %d.\n",
4755  			   sig, t->pid);
4756  	else
4757  		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4758  }
4759  #endif	/* CONFIG_KGDB_KDB */
4760