xref: /linux/kernel/signal.c (revision e5c5d22e8dcf7c2d430336cbf8e180bd38e8daf1)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/ptrace.h>
24 #include <linux/signal.h>
25 #include <linux/signalfd.h>
26 #include <linux/ratelimit.h>
27 #include <linux/tracehook.h>
28 #include <linux/capability.h>
29 #include <linux/freezer.h>
30 #include <linux/pid_namespace.h>
31 #include <linux/nsproxy.h>
32 #include <linux/user_namespace.h>
33 #include <linux/uprobes.h>
34 #include <linux/compat.h>
35 #include <linux/cn_proc.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/signal.h>
38 
39 #include <asm/param.h>
40 #include <asm/uaccess.h>
41 #include <asm/unistd.h>
42 #include <asm/siginfo.h>
43 #include <asm/cacheflush.h>
44 #include "audit.h"	/* audit_signal_info() */
45 
46 /*
47  * SLAB caches for signal bits.
48  */
49 
50 static struct kmem_cache *sigqueue_cachep;
51 
52 int print_fatal_signals __read_mostly;
53 
54 static void __user *sig_handler(struct task_struct *t, int sig)
55 {
56 	return t->sighand->action[sig - 1].sa.sa_handler;
57 }
58 
59 static int sig_handler_ignored(void __user *handler, int sig)
60 {
61 	/* Is it explicitly or implicitly ignored? */
62 	return handler == SIG_IGN ||
63 		(handler == SIG_DFL && sig_kernel_ignore(sig));
64 }
65 
66 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
67 {
68 	void __user *handler;
69 
70 	handler = sig_handler(t, sig);
71 
72 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
73 			handler == SIG_DFL && !force)
74 		return 1;
75 
76 	return sig_handler_ignored(handler, sig);
77 }
78 
79 static int sig_ignored(struct task_struct *t, int sig, bool force)
80 {
81 	/*
82 	 * Blocked signals are never ignored, since the
83 	 * signal handler may change by the time it is
84 	 * unblocked.
85 	 */
86 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
87 		return 0;
88 
89 	if (!sig_task_ignored(t, sig, force))
90 		return 0;
91 
92 	/*
93 	 * Tracers may want to know about even ignored signals.
94 	 */
95 	return !t->ptrace;
96 }
97 
98 /*
99  * Re-calculate pending state from the set of locally pending
100  * signals, globally pending signals, and blocked signals.
101  */
102 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
103 {
104 	unsigned long ready;
105 	long i;
106 
107 	switch (_NSIG_WORDS) {
108 	default:
109 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
110 			ready |= signal->sig[i] &~ blocked->sig[i];
111 		break;
112 
113 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
114 		ready |= signal->sig[2] &~ blocked->sig[2];
115 		ready |= signal->sig[1] &~ blocked->sig[1];
116 		ready |= signal->sig[0] &~ blocked->sig[0];
117 		break;
118 
119 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
120 		ready |= signal->sig[0] &~ blocked->sig[0];
121 		break;
122 
123 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
124 	}
125 	return ready !=	0;
126 }
127 
128 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
129 
130 static int recalc_sigpending_tsk(struct task_struct *t)
131 {
132 	if ((t->jobctl & JOBCTL_PENDING_MASK) ||
133 	    PENDING(&t->pending, &t->blocked) ||
134 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
135 		set_tsk_thread_flag(t, TIF_SIGPENDING);
136 		return 1;
137 	}
138 	/*
139 	 * We must never clear the flag in another thread, or in current
140 	 * when it's possible the current syscall is returning -ERESTART*.
141 	 * So we don't clear it here, and only callers who know they should do.
142 	 */
143 	return 0;
144 }
145 
146 /*
147  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
148  * This is superfluous when called on current, the wakeup is a harmless no-op.
149  */
150 void recalc_sigpending_and_wake(struct task_struct *t)
151 {
152 	if (recalc_sigpending_tsk(t))
153 		signal_wake_up(t, 0);
154 }
155 
156 void recalc_sigpending(void)
157 {
158 	if (!recalc_sigpending_tsk(current) && !freezing(current))
159 		clear_thread_flag(TIF_SIGPENDING);
160 
161 }
162 
163 /* Given the mask, find the first available signal that should be serviced. */
164 
165 #define SYNCHRONOUS_MASK \
166 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
167 	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
168 
169 int next_signal(struct sigpending *pending, sigset_t *mask)
170 {
171 	unsigned long i, *s, *m, x;
172 	int sig = 0;
173 
174 	s = pending->signal.sig;
175 	m = mask->sig;
176 
177 	/*
178 	 * Handle the first word specially: it contains the
179 	 * synchronous signals that need to be dequeued first.
180 	 */
181 	x = *s &~ *m;
182 	if (x) {
183 		if (x & SYNCHRONOUS_MASK)
184 			x &= SYNCHRONOUS_MASK;
185 		sig = ffz(~x) + 1;
186 		return sig;
187 	}
188 
189 	switch (_NSIG_WORDS) {
190 	default:
191 		for (i = 1; i < _NSIG_WORDS; ++i) {
192 			x = *++s &~ *++m;
193 			if (!x)
194 				continue;
195 			sig = ffz(~x) + i*_NSIG_BPW + 1;
196 			break;
197 		}
198 		break;
199 
200 	case 2:
201 		x = s[1] &~ m[1];
202 		if (!x)
203 			break;
204 		sig = ffz(~x) + _NSIG_BPW + 1;
205 		break;
206 
207 	case 1:
208 		/* Nothing to do */
209 		break;
210 	}
211 
212 	return sig;
213 }
214 
215 static inline void print_dropped_signal(int sig)
216 {
217 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
218 
219 	if (!print_fatal_signals)
220 		return;
221 
222 	if (!__ratelimit(&ratelimit_state))
223 		return;
224 
225 	printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
226 				current->comm, current->pid, sig);
227 }
228 
229 /**
230  * task_set_jobctl_pending - set jobctl pending bits
231  * @task: target task
232  * @mask: pending bits to set
233  *
234  * Clear @mask from @task->jobctl.  @mask must be subset of
235  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
236  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
237  * cleared.  If @task is already being killed or exiting, this function
238  * becomes noop.
239  *
240  * CONTEXT:
241  * Must be called with @task->sighand->siglock held.
242  *
243  * RETURNS:
244  * %true if @mask is set, %false if made noop because @task was dying.
245  */
246 bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
247 {
248 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
249 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
250 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
251 
252 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
253 		return false;
254 
255 	if (mask & JOBCTL_STOP_SIGMASK)
256 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
257 
258 	task->jobctl |= mask;
259 	return true;
260 }
261 
262 /**
263  * task_clear_jobctl_trapping - clear jobctl trapping bit
264  * @task: target task
265  *
266  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
267  * Clear it and wake up the ptracer.  Note that we don't need any further
268  * locking.  @task->siglock guarantees that @task->parent points to the
269  * ptracer.
270  *
271  * CONTEXT:
272  * Must be called with @task->sighand->siglock held.
273  */
274 void task_clear_jobctl_trapping(struct task_struct *task)
275 {
276 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
277 		task->jobctl &= ~JOBCTL_TRAPPING;
278 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
279 	}
280 }
281 
282 /**
283  * task_clear_jobctl_pending - clear jobctl pending bits
284  * @task: target task
285  * @mask: pending bits to clear
286  *
287  * Clear @mask from @task->jobctl.  @mask must be subset of
288  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
289  * STOP bits are cleared together.
290  *
291  * If clearing of @mask leaves no stop or trap pending, this function calls
292  * task_clear_jobctl_trapping().
293  *
294  * CONTEXT:
295  * Must be called with @task->sighand->siglock held.
296  */
297 void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
298 {
299 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
300 
301 	if (mask & JOBCTL_STOP_PENDING)
302 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
303 
304 	task->jobctl &= ~mask;
305 
306 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
307 		task_clear_jobctl_trapping(task);
308 }
309 
310 /**
311  * task_participate_group_stop - participate in a group stop
312  * @task: task participating in a group stop
313  *
314  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
315  * Group stop states are cleared and the group stop count is consumed if
316  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
317  * stop, the appropriate %SIGNAL_* flags are set.
318  *
319  * CONTEXT:
320  * Must be called with @task->sighand->siglock held.
321  *
322  * RETURNS:
323  * %true if group stop completion should be notified to the parent, %false
324  * otherwise.
325  */
326 static bool task_participate_group_stop(struct task_struct *task)
327 {
328 	struct signal_struct *sig = task->signal;
329 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
330 
331 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
332 
333 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
334 
335 	if (!consume)
336 		return false;
337 
338 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
339 		sig->group_stop_count--;
340 
341 	/*
342 	 * Tell the caller to notify completion iff we are entering into a
343 	 * fresh group stop.  Read comment in do_signal_stop() for details.
344 	 */
345 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
346 		sig->flags = SIGNAL_STOP_STOPPED;
347 		return true;
348 	}
349 	return false;
350 }
351 
352 /*
353  * allocate a new signal queue record
354  * - this may be called without locks if and only if t == current, otherwise an
355  *   appropriate lock must be held to stop the target task from exiting
356  */
357 static struct sigqueue *
358 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
359 {
360 	struct sigqueue *q = NULL;
361 	struct user_struct *user;
362 
363 	/*
364 	 * Protect access to @t credentials. This can go away when all
365 	 * callers hold rcu read lock.
366 	 */
367 	rcu_read_lock();
368 	user = get_uid(__task_cred(t)->user);
369 	atomic_inc(&user->sigpending);
370 	rcu_read_unlock();
371 
372 	if (override_rlimit ||
373 	    atomic_read(&user->sigpending) <=
374 			task_rlimit(t, RLIMIT_SIGPENDING)) {
375 		q = kmem_cache_alloc(sigqueue_cachep, flags);
376 	} else {
377 		print_dropped_signal(sig);
378 	}
379 
380 	if (unlikely(q == NULL)) {
381 		atomic_dec(&user->sigpending);
382 		free_uid(user);
383 	} else {
384 		INIT_LIST_HEAD(&q->list);
385 		q->flags = 0;
386 		q->user = user;
387 	}
388 
389 	return q;
390 }
391 
392 static void __sigqueue_free(struct sigqueue *q)
393 {
394 	if (q->flags & SIGQUEUE_PREALLOC)
395 		return;
396 	atomic_dec(&q->user->sigpending);
397 	free_uid(q->user);
398 	kmem_cache_free(sigqueue_cachep, q);
399 }
400 
401 void flush_sigqueue(struct sigpending *queue)
402 {
403 	struct sigqueue *q;
404 
405 	sigemptyset(&queue->signal);
406 	while (!list_empty(&queue->list)) {
407 		q = list_entry(queue->list.next, struct sigqueue , list);
408 		list_del_init(&q->list);
409 		__sigqueue_free(q);
410 	}
411 }
412 
413 /*
414  * Flush all pending signals for a task.
415  */
416 void __flush_signals(struct task_struct *t)
417 {
418 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
419 	flush_sigqueue(&t->pending);
420 	flush_sigqueue(&t->signal->shared_pending);
421 }
422 
423 void flush_signals(struct task_struct *t)
424 {
425 	unsigned long flags;
426 
427 	spin_lock_irqsave(&t->sighand->siglock, flags);
428 	__flush_signals(t);
429 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
430 }
431 
432 static void __flush_itimer_signals(struct sigpending *pending)
433 {
434 	sigset_t signal, retain;
435 	struct sigqueue *q, *n;
436 
437 	signal = pending->signal;
438 	sigemptyset(&retain);
439 
440 	list_for_each_entry_safe(q, n, &pending->list, list) {
441 		int sig = q->info.si_signo;
442 
443 		if (likely(q->info.si_code != SI_TIMER)) {
444 			sigaddset(&retain, sig);
445 		} else {
446 			sigdelset(&signal, sig);
447 			list_del_init(&q->list);
448 			__sigqueue_free(q);
449 		}
450 	}
451 
452 	sigorsets(&pending->signal, &signal, &retain);
453 }
454 
455 void flush_itimer_signals(void)
456 {
457 	struct task_struct *tsk = current;
458 	unsigned long flags;
459 
460 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
461 	__flush_itimer_signals(&tsk->pending);
462 	__flush_itimer_signals(&tsk->signal->shared_pending);
463 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
464 }
465 
466 void ignore_signals(struct task_struct *t)
467 {
468 	int i;
469 
470 	for (i = 0; i < _NSIG; ++i)
471 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
472 
473 	flush_signals(t);
474 }
475 
476 /*
477  * Flush all handlers for a task.
478  */
479 
480 void
481 flush_signal_handlers(struct task_struct *t, int force_default)
482 {
483 	int i;
484 	struct k_sigaction *ka = &t->sighand->action[0];
485 	for (i = _NSIG ; i != 0 ; i--) {
486 		if (force_default || ka->sa.sa_handler != SIG_IGN)
487 			ka->sa.sa_handler = SIG_DFL;
488 		ka->sa.sa_flags = 0;
489 #ifdef __ARCH_HAS_SA_RESTORER
490 		ka->sa.sa_restorer = NULL;
491 #endif
492 		sigemptyset(&ka->sa.sa_mask);
493 		ka++;
494 	}
495 }
496 
497 int unhandled_signal(struct task_struct *tsk, int sig)
498 {
499 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
500 	if (is_global_init(tsk))
501 		return 1;
502 	if (handler != SIG_IGN && handler != SIG_DFL)
503 		return 0;
504 	/* if ptraced, let the tracer determine */
505 	return !tsk->ptrace;
506 }
507 
508 /*
509  * Notify the system that a driver wants to block all signals for this
510  * process, and wants to be notified if any signals at all were to be
511  * sent/acted upon.  If the notifier routine returns non-zero, then the
512  * signal will be acted upon after all.  If the notifier routine returns 0,
513  * then then signal will be blocked.  Only one block per process is
514  * allowed.  priv is a pointer to private data that the notifier routine
515  * can use to determine if the signal should be blocked or not.
516  */
517 void
518 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
519 {
520 	unsigned long flags;
521 
522 	spin_lock_irqsave(&current->sighand->siglock, flags);
523 	current->notifier_mask = mask;
524 	current->notifier_data = priv;
525 	current->notifier = notifier;
526 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
527 }
528 
529 /* Notify the system that blocking has ended. */
530 
531 void
532 unblock_all_signals(void)
533 {
534 	unsigned long flags;
535 
536 	spin_lock_irqsave(&current->sighand->siglock, flags);
537 	current->notifier = NULL;
538 	current->notifier_data = NULL;
539 	recalc_sigpending();
540 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
541 }
542 
543 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
544 {
545 	struct sigqueue *q, *first = NULL;
546 
547 	/*
548 	 * Collect the siginfo appropriate to this signal.  Check if
549 	 * there is another siginfo for the same signal.
550 	*/
551 	list_for_each_entry(q, &list->list, list) {
552 		if (q->info.si_signo == sig) {
553 			if (first)
554 				goto still_pending;
555 			first = q;
556 		}
557 	}
558 
559 	sigdelset(&list->signal, sig);
560 
561 	if (first) {
562 still_pending:
563 		list_del_init(&first->list);
564 		copy_siginfo(info, &first->info);
565 		__sigqueue_free(first);
566 	} else {
567 		/*
568 		 * Ok, it wasn't in the queue.  This must be
569 		 * a fast-pathed signal or we must have been
570 		 * out of queue space.  So zero out the info.
571 		 */
572 		info->si_signo = sig;
573 		info->si_errno = 0;
574 		info->si_code = SI_USER;
575 		info->si_pid = 0;
576 		info->si_uid = 0;
577 	}
578 }
579 
580 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
581 			siginfo_t *info)
582 {
583 	int sig = next_signal(pending, mask);
584 
585 	if (sig) {
586 		if (current->notifier) {
587 			if (sigismember(current->notifier_mask, sig)) {
588 				if (!(current->notifier)(current->notifier_data)) {
589 					clear_thread_flag(TIF_SIGPENDING);
590 					return 0;
591 				}
592 			}
593 		}
594 
595 		collect_signal(sig, pending, info);
596 	}
597 
598 	return sig;
599 }
600 
601 /*
602  * Dequeue a signal and return the element to the caller, which is
603  * expected to free it.
604  *
605  * All callers have to hold the siglock.
606  */
607 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
608 {
609 	int signr;
610 
611 	/* We only dequeue private signals from ourselves, we don't let
612 	 * signalfd steal them
613 	 */
614 	signr = __dequeue_signal(&tsk->pending, mask, info);
615 	if (!signr) {
616 		signr = __dequeue_signal(&tsk->signal->shared_pending,
617 					 mask, info);
618 		/*
619 		 * itimer signal ?
620 		 *
621 		 * itimers are process shared and we restart periodic
622 		 * itimers in the signal delivery path to prevent DoS
623 		 * attacks in the high resolution timer case. This is
624 		 * compliant with the old way of self-restarting
625 		 * itimers, as the SIGALRM is a legacy signal and only
626 		 * queued once. Changing the restart behaviour to
627 		 * restart the timer in the signal dequeue path is
628 		 * reducing the timer noise on heavy loaded !highres
629 		 * systems too.
630 		 */
631 		if (unlikely(signr == SIGALRM)) {
632 			struct hrtimer *tmr = &tsk->signal->real_timer;
633 
634 			if (!hrtimer_is_queued(tmr) &&
635 			    tsk->signal->it_real_incr.tv64 != 0) {
636 				hrtimer_forward(tmr, tmr->base->get_time(),
637 						tsk->signal->it_real_incr);
638 				hrtimer_restart(tmr);
639 			}
640 		}
641 	}
642 
643 	recalc_sigpending();
644 	if (!signr)
645 		return 0;
646 
647 	if (unlikely(sig_kernel_stop(signr))) {
648 		/*
649 		 * Set a marker that we have dequeued a stop signal.  Our
650 		 * caller might release the siglock and then the pending
651 		 * stop signal it is about to process is no longer in the
652 		 * pending bitmasks, but must still be cleared by a SIGCONT
653 		 * (and overruled by a SIGKILL).  So those cases clear this
654 		 * shared flag after we've set it.  Note that this flag may
655 		 * remain set after the signal we return is ignored or
656 		 * handled.  That doesn't matter because its only purpose
657 		 * is to alert stop-signal processing code when another
658 		 * processor has come along and cleared the flag.
659 		 */
660 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
661 	}
662 	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
663 		/*
664 		 * Release the siglock to ensure proper locking order
665 		 * of timer locks outside of siglocks.  Note, we leave
666 		 * irqs disabled here, since the posix-timers code is
667 		 * about to disable them again anyway.
668 		 */
669 		spin_unlock(&tsk->sighand->siglock);
670 		do_schedule_next_timer(info);
671 		spin_lock(&tsk->sighand->siglock);
672 	}
673 	return signr;
674 }
675 
676 /*
677  * Tell a process that it has a new active signal..
678  *
679  * NOTE! we rely on the previous spin_lock to
680  * lock interrupts for us! We can only be called with
681  * "siglock" held, and the local interrupt must
682  * have been disabled when that got acquired!
683  *
684  * No need to set need_resched since signal event passing
685  * goes through ->blocked
686  */
687 void signal_wake_up_state(struct task_struct *t, unsigned int state)
688 {
689 	set_tsk_thread_flag(t, TIF_SIGPENDING);
690 	/*
691 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
692 	 * case. We don't check t->state here because there is a race with it
693 	 * executing another processor and just now entering stopped state.
694 	 * By using wake_up_state, we ensure the process will wake up and
695 	 * handle its death signal.
696 	 */
697 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
698 		kick_process(t);
699 }
700 
701 /*
702  * Remove signals in mask from the pending set and queue.
703  * Returns 1 if any signals were found.
704  *
705  * All callers must be holding the siglock.
706  *
707  * This version takes a sigset mask and looks at all signals,
708  * not just those in the first mask word.
709  */
710 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
711 {
712 	struct sigqueue *q, *n;
713 	sigset_t m;
714 
715 	sigandsets(&m, mask, &s->signal);
716 	if (sigisemptyset(&m))
717 		return 0;
718 
719 	sigandnsets(&s->signal, &s->signal, mask);
720 	list_for_each_entry_safe(q, n, &s->list, list) {
721 		if (sigismember(mask, q->info.si_signo)) {
722 			list_del_init(&q->list);
723 			__sigqueue_free(q);
724 		}
725 	}
726 	return 1;
727 }
728 /*
729  * Remove signals in mask from the pending set and queue.
730  * Returns 1 if any signals were found.
731  *
732  * All callers must be holding the siglock.
733  */
734 static int rm_from_queue(unsigned long mask, struct sigpending *s)
735 {
736 	struct sigqueue *q, *n;
737 
738 	if (!sigtestsetmask(&s->signal, mask))
739 		return 0;
740 
741 	sigdelsetmask(&s->signal, mask);
742 	list_for_each_entry_safe(q, n, &s->list, list) {
743 		if (q->info.si_signo < SIGRTMIN &&
744 		    (mask & sigmask(q->info.si_signo))) {
745 			list_del_init(&q->list);
746 			__sigqueue_free(q);
747 		}
748 	}
749 	return 1;
750 }
751 
752 static inline int is_si_special(const struct siginfo *info)
753 {
754 	return info <= SEND_SIG_FORCED;
755 }
756 
757 static inline bool si_fromuser(const struct siginfo *info)
758 {
759 	return info == SEND_SIG_NOINFO ||
760 		(!is_si_special(info) && SI_FROMUSER(info));
761 }
762 
763 /*
764  * called with RCU read lock from check_kill_permission()
765  */
766 static int kill_ok_by_cred(struct task_struct *t)
767 {
768 	const struct cred *cred = current_cred();
769 	const struct cred *tcred = __task_cred(t);
770 
771 	if (uid_eq(cred->euid, tcred->suid) ||
772 	    uid_eq(cred->euid, tcred->uid)  ||
773 	    uid_eq(cred->uid,  tcred->suid) ||
774 	    uid_eq(cred->uid,  tcred->uid))
775 		return 1;
776 
777 	if (ns_capable(tcred->user_ns, CAP_KILL))
778 		return 1;
779 
780 	return 0;
781 }
782 
783 /*
784  * Bad permissions for sending the signal
785  * - the caller must hold the RCU read lock
786  */
787 static int check_kill_permission(int sig, struct siginfo *info,
788 				 struct task_struct *t)
789 {
790 	struct pid *sid;
791 	int error;
792 
793 	if (!valid_signal(sig))
794 		return -EINVAL;
795 
796 	if (!si_fromuser(info))
797 		return 0;
798 
799 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
800 	if (error)
801 		return error;
802 
803 	if (!same_thread_group(current, t) &&
804 	    !kill_ok_by_cred(t)) {
805 		switch (sig) {
806 		case SIGCONT:
807 			sid = task_session(t);
808 			/*
809 			 * We don't return the error if sid == NULL. The
810 			 * task was unhashed, the caller must notice this.
811 			 */
812 			if (!sid || sid == task_session(current))
813 				break;
814 		default:
815 			return -EPERM;
816 		}
817 	}
818 
819 	return security_task_kill(t, info, sig, 0);
820 }
821 
822 /**
823  * ptrace_trap_notify - schedule trap to notify ptracer
824  * @t: tracee wanting to notify tracer
825  *
826  * This function schedules sticky ptrace trap which is cleared on the next
827  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
828  * ptracer.
829  *
830  * If @t is running, STOP trap will be taken.  If trapped for STOP and
831  * ptracer is listening for events, tracee is woken up so that it can
832  * re-trap for the new event.  If trapped otherwise, STOP trap will be
833  * eventually taken without returning to userland after the existing traps
834  * are finished by PTRACE_CONT.
835  *
836  * CONTEXT:
837  * Must be called with @task->sighand->siglock held.
838  */
839 static void ptrace_trap_notify(struct task_struct *t)
840 {
841 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
842 	assert_spin_locked(&t->sighand->siglock);
843 
844 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
845 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
846 }
847 
848 /*
849  * Handle magic process-wide effects of stop/continue signals. Unlike
850  * the signal actions, these happen immediately at signal-generation
851  * time regardless of blocking, ignoring, or handling.  This does the
852  * actual continuing for SIGCONT, but not the actual stopping for stop
853  * signals. The process stop is done as a signal action for SIG_DFL.
854  *
855  * Returns true if the signal should be actually delivered, otherwise
856  * it should be dropped.
857  */
858 static int prepare_signal(int sig, struct task_struct *p, bool force)
859 {
860 	struct signal_struct *signal = p->signal;
861 	struct task_struct *t;
862 
863 	if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
864 		/*
865 		 * The process is in the middle of dying, nothing to do.
866 		 */
867 	} else if (sig_kernel_stop(sig)) {
868 		/*
869 		 * This is a stop signal.  Remove SIGCONT from all queues.
870 		 */
871 		rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
872 		t = p;
873 		do {
874 			rm_from_queue(sigmask(SIGCONT), &t->pending);
875 		} while_each_thread(p, t);
876 	} else if (sig == SIGCONT) {
877 		unsigned int why;
878 		/*
879 		 * Remove all stop signals from all queues, wake all threads.
880 		 */
881 		rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
882 		t = p;
883 		do {
884 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
885 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
886 			if (likely(!(t->ptrace & PT_SEIZED)))
887 				wake_up_state(t, __TASK_STOPPED);
888 			else
889 				ptrace_trap_notify(t);
890 		} while_each_thread(p, t);
891 
892 		/*
893 		 * Notify the parent with CLD_CONTINUED if we were stopped.
894 		 *
895 		 * If we were in the middle of a group stop, we pretend it
896 		 * was already finished, and then continued. Since SIGCHLD
897 		 * doesn't queue we report only CLD_STOPPED, as if the next
898 		 * CLD_CONTINUED was dropped.
899 		 */
900 		why = 0;
901 		if (signal->flags & SIGNAL_STOP_STOPPED)
902 			why |= SIGNAL_CLD_CONTINUED;
903 		else if (signal->group_stop_count)
904 			why |= SIGNAL_CLD_STOPPED;
905 
906 		if (why) {
907 			/*
908 			 * The first thread which returns from do_signal_stop()
909 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
910 			 * notify its parent. See get_signal_to_deliver().
911 			 */
912 			signal->flags = why | SIGNAL_STOP_CONTINUED;
913 			signal->group_stop_count = 0;
914 			signal->group_exit_code = 0;
915 		}
916 	}
917 
918 	return !sig_ignored(p, sig, force);
919 }
920 
921 /*
922  * Test if P wants to take SIG.  After we've checked all threads with this,
923  * it's equivalent to finding no threads not blocking SIG.  Any threads not
924  * blocking SIG were ruled out because they are not running and already
925  * have pending signals.  Such threads will dequeue from the shared queue
926  * as soon as they're available, so putting the signal on the shared queue
927  * will be equivalent to sending it to one such thread.
928  */
929 static inline int wants_signal(int sig, struct task_struct *p)
930 {
931 	if (sigismember(&p->blocked, sig))
932 		return 0;
933 	if (p->flags & PF_EXITING)
934 		return 0;
935 	if (sig == SIGKILL)
936 		return 1;
937 	if (task_is_stopped_or_traced(p))
938 		return 0;
939 	return task_curr(p) || !signal_pending(p);
940 }
941 
942 static void complete_signal(int sig, struct task_struct *p, int group)
943 {
944 	struct signal_struct *signal = p->signal;
945 	struct task_struct *t;
946 
947 	/*
948 	 * Now find a thread we can wake up to take the signal off the queue.
949 	 *
950 	 * If the main thread wants the signal, it gets first crack.
951 	 * Probably the least surprising to the average bear.
952 	 */
953 	if (wants_signal(sig, p))
954 		t = p;
955 	else if (!group || thread_group_empty(p))
956 		/*
957 		 * There is just one thread and it does not need to be woken.
958 		 * It will dequeue unblocked signals before it runs again.
959 		 */
960 		return;
961 	else {
962 		/*
963 		 * Otherwise try to find a suitable thread.
964 		 */
965 		t = signal->curr_target;
966 		while (!wants_signal(sig, t)) {
967 			t = next_thread(t);
968 			if (t == signal->curr_target)
969 				/*
970 				 * No thread needs to be woken.
971 				 * Any eligible threads will see
972 				 * the signal in the queue soon.
973 				 */
974 				return;
975 		}
976 		signal->curr_target = t;
977 	}
978 
979 	/*
980 	 * Found a killable thread.  If the signal will be fatal,
981 	 * then start taking the whole group down immediately.
982 	 */
983 	if (sig_fatal(p, sig) &&
984 	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
985 	    !sigismember(&t->real_blocked, sig) &&
986 	    (sig == SIGKILL || !t->ptrace)) {
987 		/*
988 		 * This signal will be fatal to the whole group.
989 		 */
990 		if (!sig_kernel_coredump(sig)) {
991 			/*
992 			 * Start a group exit and wake everybody up.
993 			 * This way we don't have other threads
994 			 * running and doing things after a slower
995 			 * thread has the fatal signal pending.
996 			 */
997 			signal->flags = SIGNAL_GROUP_EXIT;
998 			signal->group_exit_code = sig;
999 			signal->group_stop_count = 0;
1000 			t = p;
1001 			do {
1002 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1003 				sigaddset(&t->pending.signal, SIGKILL);
1004 				signal_wake_up(t, 1);
1005 			} while_each_thread(p, t);
1006 			return;
1007 		}
1008 	}
1009 
1010 	/*
1011 	 * The signal is already in the shared-pending queue.
1012 	 * Tell the chosen thread to wake up and dequeue it.
1013 	 */
1014 	signal_wake_up(t, sig == SIGKILL);
1015 	return;
1016 }
1017 
1018 static inline int legacy_queue(struct sigpending *signals, int sig)
1019 {
1020 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1021 }
1022 
1023 #ifdef CONFIG_USER_NS
1024 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1025 {
1026 	if (current_user_ns() == task_cred_xxx(t, user_ns))
1027 		return;
1028 
1029 	if (SI_FROMKERNEL(info))
1030 		return;
1031 
1032 	rcu_read_lock();
1033 	info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1034 					make_kuid(current_user_ns(), info->si_uid));
1035 	rcu_read_unlock();
1036 }
1037 #else
1038 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1039 {
1040 	return;
1041 }
1042 #endif
1043 
1044 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1045 			int group, int from_ancestor_ns)
1046 {
1047 	struct sigpending *pending;
1048 	struct sigqueue *q;
1049 	int override_rlimit;
1050 	int ret = 0, result;
1051 
1052 	assert_spin_locked(&t->sighand->siglock);
1053 
1054 	result = TRACE_SIGNAL_IGNORED;
1055 	if (!prepare_signal(sig, t,
1056 			from_ancestor_ns || (info == SEND_SIG_FORCED)))
1057 		goto ret;
1058 
1059 	pending = group ? &t->signal->shared_pending : &t->pending;
1060 	/*
1061 	 * Short-circuit ignored signals and support queuing
1062 	 * exactly one non-rt signal, so that we can get more
1063 	 * detailed information about the cause of the signal.
1064 	 */
1065 	result = TRACE_SIGNAL_ALREADY_PENDING;
1066 	if (legacy_queue(pending, sig))
1067 		goto ret;
1068 
1069 	result = TRACE_SIGNAL_DELIVERED;
1070 	/*
1071 	 * fast-pathed signals for kernel-internal things like SIGSTOP
1072 	 * or SIGKILL.
1073 	 */
1074 	if (info == SEND_SIG_FORCED)
1075 		goto out_set;
1076 
1077 	/*
1078 	 * Real-time signals must be queued if sent by sigqueue, or
1079 	 * some other real-time mechanism.  It is implementation
1080 	 * defined whether kill() does so.  We attempt to do so, on
1081 	 * the principle of least surprise, but since kill is not
1082 	 * allowed to fail with EAGAIN when low on memory we just
1083 	 * make sure at least one signal gets delivered and don't
1084 	 * pass on the info struct.
1085 	 */
1086 	if (sig < SIGRTMIN)
1087 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1088 	else
1089 		override_rlimit = 0;
1090 
1091 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1092 		override_rlimit);
1093 	if (q) {
1094 		list_add_tail(&q->list, &pending->list);
1095 		switch ((unsigned long) info) {
1096 		case (unsigned long) SEND_SIG_NOINFO:
1097 			q->info.si_signo = sig;
1098 			q->info.si_errno = 0;
1099 			q->info.si_code = SI_USER;
1100 			q->info.si_pid = task_tgid_nr_ns(current,
1101 							task_active_pid_ns(t));
1102 			q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1103 			break;
1104 		case (unsigned long) SEND_SIG_PRIV:
1105 			q->info.si_signo = sig;
1106 			q->info.si_errno = 0;
1107 			q->info.si_code = SI_KERNEL;
1108 			q->info.si_pid = 0;
1109 			q->info.si_uid = 0;
1110 			break;
1111 		default:
1112 			copy_siginfo(&q->info, info);
1113 			if (from_ancestor_ns)
1114 				q->info.si_pid = 0;
1115 			break;
1116 		}
1117 
1118 		userns_fixup_signal_uid(&q->info, t);
1119 
1120 	} else if (!is_si_special(info)) {
1121 		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1122 			/*
1123 			 * Queue overflow, abort.  We may abort if the
1124 			 * signal was rt and sent by user using something
1125 			 * other than kill().
1126 			 */
1127 			result = TRACE_SIGNAL_OVERFLOW_FAIL;
1128 			ret = -EAGAIN;
1129 			goto ret;
1130 		} else {
1131 			/*
1132 			 * This is a silent loss of information.  We still
1133 			 * send the signal, but the *info bits are lost.
1134 			 */
1135 			result = TRACE_SIGNAL_LOSE_INFO;
1136 		}
1137 	}
1138 
1139 out_set:
1140 	signalfd_notify(t, sig);
1141 	sigaddset(&pending->signal, sig);
1142 	complete_signal(sig, t, group);
1143 ret:
1144 	trace_signal_generate(sig, info, t, group, result);
1145 	return ret;
1146 }
1147 
1148 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1149 			int group)
1150 {
1151 	int from_ancestor_ns = 0;
1152 
1153 #ifdef CONFIG_PID_NS
1154 	from_ancestor_ns = si_fromuser(info) &&
1155 			   !task_pid_nr_ns(current, task_active_pid_ns(t));
1156 #endif
1157 
1158 	return __send_signal(sig, info, t, group, from_ancestor_ns);
1159 }
1160 
1161 static void print_fatal_signal(int signr)
1162 {
1163 	struct pt_regs *regs = signal_pt_regs();
1164 	printk(KERN_INFO "%s/%d: potentially unexpected fatal signal %d.\n",
1165 		current->comm, task_pid_nr(current), signr);
1166 
1167 #if defined(__i386__) && !defined(__arch_um__)
1168 	printk(KERN_INFO "code at %08lx: ", regs->ip);
1169 	{
1170 		int i;
1171 		for (i = 0; i < 16; i++) {
1172 			unsigned char insn;
1173 
1174 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1175 				break;
1176 			printk(KERN_CONT "%02x ", insn);
1177 		}
1178 	}
1179 	printk(KERN_CONT "\n");
1180 #endif
1181 	preempt_disable();
1182 	show_regs(regs);
1183 	preempt_enable();
1184 }
1185 
1186 static int __init setup_print_fatal_signals(char *str)
1187 {
1188 	get_option (&str, &print_fatal_signals);
1189 
1190 	return 1;
1191 }
1192 
1193 __setup("print-fatal-signals=", setup_print_fatal_signals);
1194 
1195 int
1196 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1197 {
1198 	return send_signal(sig, info, p, 1);
1199 }
1200 
1201 static int
1202 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1203 {
1204 	return send_signal(sig, info, t, 0);
1205 }
1206 
1207 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1208 			bool group)
1209 {
1210 	unsigned long flags;
1211 	int ret = -ESRCH;
1212 
1213 	if (lock_task_sighand(p, &flags)) {
1214 		ret = send_signal(sig, info, p, group);
1215 		unlock_task_sighand(p, &flags);
1216 	}
1217 
1218 	return ret;
1219 }
1220 
1221 /*
1222  * Force a signal that the process can't ignore: if necessary
1223  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1224  *
1225  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1226  * since we do not want to have a signal handler that was blocked
1227  * be invoked when user space had explicitly blocked it.
1228  *
1229  * We don't want to have recursive SIGSEGV's etc, for example,
1230  * that is why we also clear SIGNAL_UNKILLABLE.
1231  */
1232 int
1233 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1234 {
1235 	unsigned long int flags;
1236 	int ret, blocked, ignored;
1237 	struct k_sigaction *action;
1238 
1239 	spin_lock_irqsave(&t->sighand->siglock, flags);
1240 	action = &t->sighand->action[sig-1];
1241 	ignored = action->sa.sa_handler == SIG_IGN;
1242 	blocked = sigismember(&t->blocked, sig);
1243 	if (blocked || ignored) {
1244 		action->sa.sa_handler = SIG_DFL;
1245 		if (blocked) {
1246 			sigdelset(&t->blocked, sig);
1247 			recalc_sigpending_and_wake(t);
1248 		}
1249 	}
1250 	if (action->sa.sa_handler == SIG_DFL)
1251 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1252 	ret = specific_send_sig_info(sig, info, t);
1253 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1254 
1255 	return ret;
1256 }
1257 
1258 /*
1259  * Nuke all other threads in the group.
1260  */
1261 int zap_other_threads(struct task_struct *p)
1262 {
1263 	struct task_struct *t = p;
1264 	int count = 0;
1265 
1266 	p->signal->group_stop_count = 0;
1267 
1268 	while_each_thread(p, t) {
1269 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1270 		count++;
1271 
1272 		/* Don't bother with already dead threads */
1273 		if (t->exit_state)
1274 			continue;
1275 		sigaddset(&t->pending.signal, SIGKILL);
1276 		signal_wake_up(t, 1);
1277 	}
1278 
1279 	return count;
1280 }
1281 
1282 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1283 					   unsigned long *flags)
1284 {
1285 	struct sighand_struct *sighand;
1286 
1287 	for (;;) {
1288 		local_irq_save(*flags);
1289 		rcu_read_lock();
1290 		sighand = rcu_dereference(tsk->sighand);
1291 		if (unlikely(sighand == NULL)) {
1292 			rcu_read_unlock();
1293 			local_irq_restore(*flags);
1294 			break;
1295 		}
1296 
1297 		spin_lock(&sighand->siglock);
1298 		if (likely(sighand == tsk->sighand)) {
1299 			rcu_read_unlock();
1300 			break;
1301 		}
1302 		spin_unlock(&sighand->siglock);
1303 		rcu_read_unlock();
1304 		local_irq_restore(*flags);
1305 	}
1306 
1307 	return sighand;
1308 }
1309 
1310 /*
1311  * send signal info to all the members of a group
1312  */
1313 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1314 {
1315 	int ret;
1316 
1317 	rcu_read_lock();
1318 	ret = check_kill_permission(sig, info, p);
1319 	rcu_read_unlock();
1320 
1321 	if (!ret && sig)
1322 		ret = do_send_sig_info(sig, info, p, true);
1323 
1324 	return ret;
1325 }
1326 
1327 /*
1328  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1329  * control characters do (^C, ^Z etc)
1330  * - the caller must hold at least a readlock on tasklist_lock
1331  */
1332 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1333 {
1334 	struct task_struct *p = NULL;
1335 	int retval, success;
1336 
1337 	success = 0;
1338 	retval = -ESRCH;
1339 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1340 		int err = group_send_sig_info(sig, info, p);
1341 		success |= !err;
1342 		retval = err;
1343 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1344 	return success ? 0 : retval;
1345 }
1346 
1347 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1348 {
1349 	int error = -ESRCH;
1350 	struct task_struct *p;
1351 
1352 	rcu_read_lock();
1353 retry:
1354 	p = pid_task(pid, PIDTYPE_PID);
1355 	if (p) {
1356 		error = group_send_sig_info(sig, info, p);
1357 		if (unlikely(error == -ESRCH))
1358 			/*
1359 			 * The task was unhashed in between, try again.
1360 			 * If it is dead, pid_task() will return NULL,
1361 			 * if we race with de_thread() it will find the
1362 			 * new leader.
1363 			 */
1364 			goto retry;
1365 	}
1366 	rcu_read_unlock();
1367 
1368 	return error;
1369 }
1370 
1371 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1372 {
1373 	int error;
1374 	rcu_read_lock();
1375 	error = kill_pid_info(sig, info, find_vpid(pid));
1376 	rcu_read_unlock();
1377 	return error;
1378 }
1379 
1380 static int kill_as_cred_perm(const struct cred *cred,
1381 			     struct task_struct *target)
1382 {
1383 	const struct cred *pcred = __task_cred(target);
1384 	if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1385 	    !uid_eq(cred->uid,  pcred->suid) && !uid_eq(cred->uid,  pcred->uid))
1386 		return 0;
1387 	return 1;
1388 }
1389 
1390 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1391 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1392 			 const struct cred *cred, u32 secid)
1393 {
1394 	int ret = -EINVAL;
1395 	struct task_struct *p;
1396 	unsigned long flags;
1397 
1398 	if (!valid_signal(sig))
1399 		return ret;
1400 
1401 	rcu_read_lock();
1402 	p = pid_task(pid, PIDTYPE_PID);
1403 	if (!p) {
1404 		ret = -ESRCH;
1405 		goto out_unlock;
1406 	}
1407 	if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1408 		ret = -EPERM;
1409 		goto out_unlock;
1410 	}
1411 	ret = security_task_kill(p, info, sig, secid);
1412 	if (ret)
1413 		goto out_unlock;
1414 
1415 	if (sig) {
1416 		if (lock_task_sighand(p, &flags)) {
1417 			ret = __send_signal(sig, info, p, 1, 0);
1418 			unlock_task_sighand(p, &flags);
1419 		} else
1420 			ret = -ESRCH;
1421 	}
1422 out_unlock:
1423 	rcu_read_unlock();
1424 	return ret;
1425 }
1426 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1427 
1428 /*
1429  * kill_something_info() interprets pid in interesting ways just like kill(2).
1430  *
1431  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1432  * is probably wrong.  Should make it like BSD or SYSV.
1433  */
1434 
1435 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1436 {
1437 	int ret;
1438 
1439 	if (pid > 0) {
1440 		rcu_read_lock();
1441 		ret = kill_pid_info(sig, info, find_vpid(pid));
1442 		rcu_read_unlock();
1443 		return ret;
1444 	}
1445 
1446 	read_lock(&tasklist_lock);
1447 	if (pid != -1) {
1448 		ret = __kill_pgrp_info(sig, info,
1449 				pid ? find_vpid(-pid) : task_pgrp(current));
1450 	} else {
1451 		int retval = 0, count = 0;
1452 		struct task_struct * p;
1453 
1454 		for_each_process(p) {
1455 			if (task_pid_vnr(p) > 1 &&
1456 					!same_thread_group(p, current)) {
1457 				int err = group_send_sig_info(sig, info, p);
1458 				++count;
1459 				if (err != -EPERM)
1460 					retval = err;
1461 			}
1462 		}
1463 		ret = count ? retval : -ESRCH;
1464 	}
1465 	read_unlock(&tasklist_lock);
1466 
1467 	return ret;
1468 }
1469 
1470 /*
1471  * These are for backward compatibility with the rest of the kernel source.
1472  */
1473 
1474 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1475 {
1476 	/*
1477 	 * Make sure legacy kernel users don't send in bad values
1478 	 * (normal paths check this in check_kill_permission).
1479 	 */
1480 	if (!valid_signal(sig))
1481 		return -EINVAL;
1482 
1483 	return do_send_sig_info(sig, info, p, false);
1484 }
1485 
1486 #define __si_special(priv) \
1487 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1488 
1489 int
1490 send_sig(int sig, struct task_struct *p, int priv)
1491 {
1492 	return send_sig_info(sig, __si_special(priv), p);
1493 }
1494 
1495 void
1496 force_sig(int sig, struct task_struct *p)
1497 {
1498 	force_sig_info(sig, SEND_SIG_PRIV, p);
1499 }
1500 
1501 /*
1502  * When things go south during signal handling, we
1503  * will force a SIGSEGV. And if the signal that caused
1504  * the problem was already a SIGSEGV, we'll want to
1505  * make sure we don't even try to deliver the signal..
1506  */
1507 int
1508 force_sigsegv(int sig, struct task_struct *p)
1509 {
1510 	if (sig == SIGSEGV) {
1511 		unsigned long flags;
1512 		spin_lock_irqsave(&p->sighand->siglock, flags);
1513 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1514 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1515 	}
1516 	force_sig(SIGSEGV, p);
1517 	return 0;
1518 }
1519 
1520 int kill_pgrp(struct pid *pid, int sig, int priv)
1521 {
1522 	int ret;
1523 
1524 	read_lock(&tasklist_lock);
1525 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1526 	read_unlock(&tasklist_lock);
1527 
1528 	return ret;
1529 }
1530 EXPORT_SYMBOL(kill_pgrp);
1531 
1532 int kill_pid(struct pid *pid, int sig, int priv)
1533 {
1534 	return kill_pid_info(sig, __si_special(priv), pid);
1535 }
1536 EXPORT_SYMBOL(kill_pid);
1537 
1538 /*
1539  * These functions support sending signals using preallocated sigqueue
1540  * structures.  This is needed "because realtime applications cannot
1541  * afford to lose notifications of asynchronous events, like timer
1542  * expirations or I/O completions".  In the case of POSIX Timers
1543  * we allocate the sigqueue structure from the timer_create.  If this
1544  * allocation fails we are able to report the failure to the application
1545  * with an EAGAIN error.
1546  */
1547 struct sigqueue *sigqueue_alloc(void)
1548 {
1549 	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1550 
1551 	if (q)
1552 		q->flags |= SIGQUEUE_PREALLOC;
1553 
1554 	return q;
1555 }
1556 
1557 void sigqueue_free(struct sigqueue *q)
1558 {
1559 	unsigned long flags;
1560 	spinlock_t *lock = &current->sighand->siglock;
1561 
1562 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1563 	/*
1564 	 * We must hold ->siglock while testing q->list
1565 	 * to serialize with collect_signal() or with
1566 	 * __exit_signal()->flush_sigqueue().
1567 	 */
1568 	spin_lock_irqsave(lock, flags);
1569 	q->flags &= ~SIGQUEUE_PREALLOC;
1570 	/*
1571 	 * If it is queued it will be freed when dequeued,
1572 	 * like the "regular" sigqueue.
1573 	 */
1574 	if (!list_empty(&q->list))
1575 		q = NULL;
1576 	spin_unlock_irqrestore(lock, flags);
1577 
1578 	if (q)
1579 		__sigqueue_free(q);
1580 }
1581 
1582 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1583 {
1584 	int sig = q->info.si_signo;
1585 	struct sigpending *pending;
1586 	unsigned long flags;
1587 	int ret, result;
1588 
1589 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1590 
1591 	ret = -1;
1592 	if (!likely(lock_task_sighand(t, &flags)))
1593 		goto ret;
1594 
1595 	ret = 1; /* the signal is ignored */
1596 	result = TRACE_SIGNAL_IGNORED;
1597 	if (!prepare_signal(sig, t, false))
1598 		goto out;
1599 
1600 	ret = 0;
1601 	if (unlikely(!list_empty(&q->list))) {
1602 		/*
1603 		 * If an SI_TIMER entry is already queue just increment
1604 		 * the overrun count.
1605 		 */
1606 		BUG_ON(q->info.si_code != SI_TIMER);
1607 		q->info.si_overrun++;
1608 		result = TRACE_SIGNAL_ALREADY_PENDING;
1609 		goto out;
1610 	}
1611 	q->info.si_overrun = 0;
1612 
1613 	signalfd_notify(t, sig);
1614 	pending = group ? &t->signal->shared_pending : &t->pending;
1615 	list_add_tail(&q->list, &pending->list);
1616 	sigaddset(&pending->signal, sig);
1617 	complete_signal(sig, t, group);
1618 	result = TRACE_SIGNAL_DELIVERED;
1619 out:
1620 	trace_signal_generate(sig, &q->info, t, group, result);
1621 	unlock_task_sighand(t, &flags);
1622 ret:
1623 	return ret;
1624 }
1625 
1626 /*
1627  * Let a parent know about the death of a child.
1628  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1629  *
1630  * Returns true if our parent ignored us and so we've switched to
1631  * self-reaping.
1632  */
1633 bool do_notify_parent(struct task_struct *tsk, int sig)
1634 {
1635 	struct siginfo info;
1636 	unsigned long flags;
1637 	struct sighand_struct *psig;
1638 	bool autoreap = false;
1639 	cputime_t utime, stime;
1640 
1641 	BUG_ON(sig == -1);
1642 
1643  	/* do_notify_parent_cldstop should have been called instead.  */
1644  	BUG_ON(task_is_stopped_or_traced(tsk));
1645 
1646 	BUG_ON(!tsk->ptrace &&
1647 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1648 
1649 	if (sig != SIGCHLD) {
1650 		/*
1651 		 * This is only possible if parent == real_parent.
1652 		 * Check if it has changed security domain.
1653 		 */
1654 		if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1655 			sig = SIGCHLD;
1656 	}
1657 
1658 	info.si_signo = sig;
1659 	info.si_errno = 0;
1660 	/*
1661 	 * We are under tasklist_lock here so our parent is tied to
1662 	 * us and cannot change.
1663 	 *
1664 	 * task_active_pid_ns will always return the same pid namespace
1665 	 * until a task passes through release_task.
1666 	 *
1667 	 * write_lock() currently calls preempt_disable() which is the
1668 	 * same as rcu_read_lock(), but according to Oleg, this is not
1669 	 * correct to rely on this
1670 	 */
1671 	rcu_read_lock();
1672 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1673 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1674 				       task_uid(tsk));
1675 	rcu_read_unlock();
1676 
1677 	task_cputime(tsk, &utime, &stime);
1678 	info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1679 	info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1680 
1681 	info.si_status = tsk->exit_code & 0x7f;
1682 	if (tsk->exit_code & 0x80)
1683 		info.si_code = CLD_DUMPED;
1684 	else if (tsk->exit_code & 0x7f)
1685 		info.si_code = CLD_KILLED;
1686 	else {
1687 		info.si_code = CLD_EXITED;
1688 		info.si_status = tsk->exit_code >> 8;
1689 	}
1690 
1691 	psig = tsk->parent->sighand;
1692 	spin_lock_irqsave(&psig->siglock, flags);
1693 	if (!tsk->ptrace && sig == SIGCHLD &&
1694 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1695 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1696 		/*
1697 		 * We are exiting and our parent doesn't care.  POSIX.1
1698 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1699 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1700 		 * automatically and not left for our parent's wait4 call.
1701 		 * Rather than having the parent do it as a magic kind of
1702 		 * signal handler, we just set this to tell do_exit that we
1703 		 * can be cleaned up without becoming a zombie.  Note that
1704 		 * we still call __wake_up_parent in this case, because a
1705 		 * blocked sys_wait4 might now return -ECHILD.
1706 		 *
1707 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1708 		 * is implementation-defined: we do (if you don't want
1709 		 * it, just use SIG_IGN instead).
1710 		 */
1711 		autoreap = true;
1712 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1713 			sig = 0;
1714 	}
1715 	if (valid_signal(sig) && sig)
1716 		__group_send_sig_info(sig, &info, tsk->parent);
1717 	__wake_up_parent(tsk, tsk->parent);
1718 	spin_unlock_irqrestore(&psig->siglock, flags);
1719 
1720 	return autoreap;
1721 }
1722 
1723 /**
1724  * do_notify_parent_cldstop - notify parent of stopped/continued state change
1725  * @tsk: task reporting the state change
1726  * @for_ptracer: the notification is for ptracer
1727  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1728  *
1729  * Notify @tsk's parent that the stopped/continued state has changed.  If
1730  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1731  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1732  *
1733  * CONTEXT:
1734  * Must be called with tasklist_lock at least read locked.
1735  */
1736 static void do_notify_parent_cldstop(struct task_struct *tsk,
1737 				     bool for_ptracer, int why)
1738 {
1739 	struct siginfo info;
1740 	unsigned long flags;
1741 	struct task_struct *parent;
1742 	struct sighand_struct *sighand;
1743 	cputime_t utime, stime;
1744 
1745 	if (for_ptracer) {
1746 		parent = tsk->parent;
1747 	} else {
1748 		tsk = tsk->group_leader;
1749 		parent = tsk->real_parent;
1750 	}
1751 
1752 	info.si_signo = SIGCHLD;
1753 	info.si_errno = 0;
1754 	/*
1755 	 * see comment in do_notify_parent() about the following 4 lines
1756 	 */
1757 	rcu_read_lock();
1758 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1759 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1760 	rcu_read_unlock();
1761 
1762 	task_cputime(tsk, &utime, &stime);
1763 	info.si_utime = cputime_to_clock_t(utime);
1764 	info.si_stime = cputime_to_clock_t(stime);
1765 
1766  	info.si_code = why;
1767  	switch (why) {
1768  	case CLD_CONTINUED:
1769  		info.si_status = SIGCONT;
1770  		break;
1771  	case CLD_STOPPED:
1772  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1773  		break;
1774  	case CLD_TRAPPED:
1775  		info.si_status = tsk->exit_code & 0x7f;
1776  		break;
1777  	default:
1778  		BUG();
1779  	}
1780 
1781 	sighand = parent->sighand;
1782 	spin_lock_irqsave(&sighand->siglock, flags);
1783 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1784 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1785 		__group_send_sig_info(SIGCHLD, &info, parent);
1786 	/*
1787 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1788 	 */
1789 	__wake_up_parent(tsk, parent);
1790 	spin_unlock_irqrestore(&sighand->siglock, flags);
1791 }
1792 
1793 static inline int may_ptrace_stop(void)
1794 {
1795 	if (!likely(current->ptrace))
1796 		return 0;
1797 	/*
1798 	 * Are we in the middle of do_coredump?
1799 	 * If so and our tracer is also part of the coredump stopping
1800 	 * is a deadlock situation, and pointless because our tracer
1801 	 * is dead so don't allow us to stop.
1802 	 * If SIGKILL was already sent before the caller unlocked
1803 	 * ->siglock we must see ->core_state != NULL. Otherwise it
1804 	 * is safe to enter schedule().
1805 	 *
1806 	 * This is almost outdated, a task with the pending SIGKILL can't
1807 	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1808 	 * after SIGKILL was already dequeued.
1809 	 */
1810 	if (unlikely(current->mm->core_state) &&
1811 	    unlikely(current->mm == current->parent->mm))
1812 		return 0;
1813 
1814 	return 1;
1815 }
1816 
1817 /*
1818  * Return non-zero if there is a SIGKILL that should be waking us up.
1819  * Called with the siglock held.
1820  */
1821 static int sigkill_pending(struct task_struct *tsk)
1822 {
1823 	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1824 		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1825 }
1826 
1827 /*
1828  * This must be called with current->sighand->siglock held.
1829  *
1830  * This should be the path for all ptrace stops.
1831  * We always set current->last_siginfo while stopped here.
1832  * That makes it a way to test a stopped process for
1833  * being ptrace-stopped vs being job-control-stopped.
1834  *
1835  * If we actually decide not to stop at all because the tracer
1836  * is gone, we keep current->exit_code unless clear_code.
1837  */
1838 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1839 	__releases(&current->sighand->siglock)
1840 	__acquires(&current->sighand->siglock)
1841 {
1842 	bool gstop_done = false;
1843 
1844 	if (arch_ptrace_stop_needed(exit_code, info)) {
1845 		/*
1846 		 * The arch code has something special to do before a
1847 		 * ptrace stop.  This is allowed to block, e.g. for faults
1848 		 * on user stack pages.  We can't keep the siglock while
1849 		 * calling arch_ptrace_stop, so we must release it now.
1850 		 * To preserve proper semantics, we must do this before
1851 		 * any signal bookkeeping like checking group_stop_count.
1852 		 * Meanwhile, a SIGKILL could come in before we retake the
1853 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1854 		 * So after regaining the lock, we must check for SIGKILL.
1855 		 */
1856 		spin_unlock_irq(&current->sighand->siglock);
1857 		arch_ptrace_stop(exit_code, info);
1858 		spin_lock_irq(&current->sighand->siglock);
1859 		if (sigkill_pending(current))
1860 			return;
1861 	}
1862 
1863 	/*
1864 	 * We're committing to trapping.  TRACED should be visible before
1865 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1866 	 * Also, transition to TRACED and updates to ->jobctl should be
1867 	 * atomic with respect to siglock and should be done after the arch
1868 	 * hook as siglock is released and regrabbed across it.
1869 	 */
1870 	set_current_state(TASK_TRACED);
1871 
1872 	current->last_siginfo = info;
1873 	current->exit_code = exit_code;
1874 
1875 	/*
1876 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
1877 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
1878 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
1879 	 * could be clear now.  We act as if SIGCONT is received after
1880 	 * TASK_TRACED is entered - ignore it.
1881 	 */
1882 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1883 		gstop_done = task_participate_group_stop(current);
1884 
1885 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1886 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1887 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1888 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1889 
1890 	/* entering a trap, clear TRAPPING */
1891 	task_clear_jobctl_trapping(current);
1892 
1893 	spin_unlock_irq(&current->sighand->siglock);
1894 	read_lock(&tasklist_lock);
1895 	if (may_ptrace_stop()) {
1896 		/*
1897 		 * Notify parents of the stop.
1898 		 *
1899 		 * While ptraced, there are two parents - the ptracer and
1900 		 * the real_parent of the group_leader.  The ptracer should
1901 		 * know about every stop while the real parent is only
1902 		 * interested in the completion of group stop.  The states
1903 		 * for the two don't interact with each other.  Notify
1904 		 * separately unless they're gonna be duplicates.
1905 		 */
1906 		do_notify_parent_cldstop(current, true, why);
1907 		if (gstop_done && ptrace_reparented(current))
1908 			do_notify_parent_cldstop(current, false, why);
1909 
1910 		/*
1911 		 * Don't want to allow preemption here, because
1912 		 * sys_ptrace() needs this task to be inactive.
1913 		 *
1914 		 * XXX: implement read_unlock_no_resched().
1915 		 */
1916 		preempt_disable();
1917 		read_unlock(&tasklist_lock);
1918 		preempt_enable_no_resched();
1919 		freezable_schedule();
1920 	} else {
1921 		/*
1922 		 * By the time we got the lock, our tracer went away.
1923 		 * Don't drop the lock yet, another tracer may come.
1924 		 *
1925 		 * If @gstop_done, the ptracer went away between group stop
1926 		 * completion and here.  During detach, it would have set
1927 		 * JOBCTL_STOP_PENDING on us and we'll re-enter
1928 		 * TASK_STOPPED in do_signal_stop() on return, so notifying
1929 		 * the real parent of the group stop completion is enough.
1930 		 */
1931 		if (gstop_done)
1932 			do_notify_parent_cldstop(current, false, why);
1933 
1934 		/* tasklist protects us from ptrace_freeze_traced() */
1935 		__set_current_state(TASK_RUNNING);
1936 		if (clear_code)
1937 			current->exit_code = 0;
1938 		read_unlock(&tasklist_lock);
1939 	}
1940 
1941 	/*
1942 	 * We are back.  Now reacquire the siglock before touching
1943 	 * last_siginfo, so that we are sure to have synchronized with
1944 	 * any signal-sending on another CPU that wants to examine it.
1945 	 */
1946 	spin_lock_irq(&current->sighand->siglock);
1947 	current->last_siginfo = NULL;
1948 
1949 	/* LISTENING can be set only during STOP traps, clear it */
1950 	current->jobctl &= ~JOBCTL_LISTENING;
1951 
1952 	/*
1953 	 * Queued signals ignored us while we were stopped for tracing.
1954 	 * So check for any that we should take before resuming user mode.
1955 	 * This sets TIF_SIGPENDING, but never clears it.
1956 	 */
1957 	recalc_sigpending_tsk(current);
1958 }
1959 
1960 static void ptrace_do_notify(int signr, int exit_code, int why)
1961 {
1962 	siginfo_t info;
1963 
1964 	memset(&info, 0, sizeof info);
1965 	info.si_signo = signr;
1966 	info.si_code = exit_code;
1967 	info.si_pid = task_pid_vnr(current);
1968 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1969 
1970 	/* Let the debugger run.  */
1971 	ptrace_stop(exit_code, why, 1, &info);
1972 }
1973 
1974 void ptrace_notify(int exit_code)
1975 {
1976 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1977 	if (unlikely(current->task_works))
1978 		task_work_run();
1979 
1980 	spin_lock_irq(&current->sighand->siglock);
1981 	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1982 	spin_unlock_irq(&current->sighand->siglock);
1983 }
1984 
1985 /**
1986  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1987  * @signr: signr causing group stop if initiating
1988  *
1989  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1990  * and participate in it.  If already set, participate in the existing
1991  * group stop.  If participated in a group stop (and thus slept), %true is
1992  * returned with siglock released.
1993  *
1994  * If ptraced, this function doesn't handle stop itself.  Instead,
1995  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1996  * untouched.  The caller must ensure that INTERRUPT trap handling takes
1997  * places afterwards.
1998  *
1999  * CONTEXT:
2000  * Must be called with @current->sighand->siglock held, which is released
2001  * on %true return.
2002  *
2003  * RETURNS:
2004  * %false if group stop is already cancelled or ptrace trap is scheduled.
2005  * %true if participated in group stop.
2006  */
2007 static bool do_signal_stop(int signr)
2008 	__releases(&current->sighand->siglock)
2009 {
2010 	struct signal_struct *sig = current->signal;
2011 
2012 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2013 		unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2014 		struct task_struct *t;
2015 
2016 		/* signr will be recorded in task->jobctl for retries */
2017 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2018 
2019 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2020 		    unlikely(signal_group_exit(sig)))
2021 			return false;
2022 		/*
2023 		 * There is no group stop already in progress.  We must
2024 		 * initiate one now.
2025 		 *
2026 		 * While ptraced, a task may be resumed while group stop is
2027 		 * still in effect and then receive a stop signal and
2028 		 * initiate another group stop.  This deviates from the
2029 		 * usual behavior as two consecutive stop signals can't
2030 		 * cause two group stops when !ptraced.  That is why we
2031 		 * also check !task_is_stopped(t) below.
2032 		 *
2033 		 * The condition can be distinguished by testing whether
2034 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2035 		 * group_exit_code in such case.
2036 		 *
2037 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2038 		 * an intervening stop signal is required to cause two
2039 		 * continued events regardless of ptrace.
2040 		 */
2041 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2042 			sig->group_exit_code = signr;
2043 
2044 		sig->group_stop_count = 0;
2045 
2046 		if (task_set_jobctl_pending(current, signr | gstop))
2047 			sig->group_stop_count++;
2048 
2049 		for (t = next_thread(current); t != current;
2050 		     t = next_thread(t)) {
2051 			/*
2052 			 * Setting state to TASK_STOPPED for a group
2053 			 * stop is always done with the siglock held,
2054 			 * so this check has no races.
2055 			 */
2056 			if (!task_is_stopped(t) &&
2057 			    task_set_jobctl_pending(t, signr | gstop)) {
2058 				sig->group_stop_count++;
2059 				if (likely(!(t->ptrace & PT_SEIZED)))
2060 					signal_wake_up(t, 0);
2061 				else
2062 					ptrace_trap_notify(t);
2063 			}
2064 		}
2065 	}
2066 
2067 	if (likely(!current->ptrace)) {
2068 		int notify = 0;
2069 
2070 		/*
2071 		 * If there are no other threads in the group, or if there
2072 		 * is a group stop in progress and we are the last to stop,
2073 		 * report to the parent.
2074 		 */
2075 		if (task_participate_group_stop(current))
2076 			notify = CLD_STOPPED;
2077 
2078 		__set_current_state(TASK_STOPPED);
2079 		spin_unlock_irq(&current->sighand->siglock);
2080 
2081 		/*
2082 		 * Notify the parent of the group stop completion.  Because
2083 		 * we're not holding either the siglock or tasklist_lock
2084 		 * here, ptracer may attach inbetween; however, this is for
2085 		 * group stop and should always be delivered to the real
2086 		 * parent of the group leader.  The new ptracer will get
2087 		 * its notification when this task transitions into
2088 		 * TASK_TRACED.
2089 		 */
2090 		if (notify) {
2091 			read_lock(&tasklist_lock);
2092 			do_notify_parent_cldstop(current, false, notify);
2093 			read_unlock(&tasklist_lock);
2094 		}
2095 
2096 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2097 		freezable_schedule();
2098 		return true;
2099 	} else {
2100 		/*
2101 		 * While ptraced, group stop is handled by STOP trap.
2102 		 * Schedule it and let the caller deal with it.
2103 		 */
2104 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2105 		return false;
2106 	}
2107 }
2108 
2109 /**
2110  * do_jobctl_trap - take care of ptrace jobctl traps
2111  *
2112  * When PT_SEIZED, it's used for both group stop and explicit
2113  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2114  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2115  * the stop signal; otherwise, %SIGTRAP.
2116  *
2117  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2118  * number as exit_code and no siginfo.
2119  *
2120  * CONTEXT:
2121  * Must be called with @current->sighand->siglock held, which may be
2122  * released and re-acquired before returning with intervening sleep.
2123  */
2124 static void do_jobctl_trap(void)
2125 {
2126 	struct signal_struct *signal = current->signal;
2127 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2128 
2129 	if (current->ptrace & PT_SEIZED) {
2130 		if (!signal->group_stop_count &&
2131 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2132 			signr = SIGTRAP;
2133 		WARN_ON_ONCE(!signr);
2134 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2135 				 CLD_STOPPED);
2136 	} else {
2137 		WARN_ON_ONCE(!signr);
2138 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2139 		current->exit_code = 0;
2140 	}
2141 }
2142 
2143 static int ptrace_signal(int signr, siginfo_t *info)
2144 {
2145 	ptrace_signal_deliver();
2146 	/*
2147 	 * We do not check sig_kernel_stop(signr) but set this marker
2148 	 * unconditionally because we do not know whether debugger will
2149 	 * change signr. This flag has no meaning unless we are going
2150 	 * to stop after return from ptrace_stop(). In this case it will
2151 	 * be checked in do_signal_stop(), we should only stop if it was
2152 	 * not cleared by SIGCONT while we were sleeping. See also the
2153 	 * comment in dequeue_signal().
2154 	 */
2155 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2156 	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2157 
2158 	/* We're back.  Did the debugger cancel the sig?  */
2159 	signr = current->exit_code;
2160 	if (signr == 0)
2161 		return signr;
2162 
2163 	current->exit_code = 0;
2164 
2165 	/*
2166 	 * Update the siginfo structure if the signal has
2167 	 * changed.  If the debugger wanted something
2168 	 * specific in the siginfo structure then it should
2169 	 * have updated *info via PTRACE_SETSIGINFO.
2170 	 */
2171 	if (signr != info->si_signo) {
2172 		info->si_signo = signr;
2173 		info->si_errno = 0;
2174 		info->si_code = SI_USER;
2175 		rcu_read_lock();
2176 		info->si_pid = task_pid_vnr(current->parent);
2177 		info->si_uid = from_kuid_munged(current_user_ns(),
2178 						task_uid(current->parent));
2179 		rcu_read_unlock();
2180 	}
2181 
2182 	/* If the (new) signal is now blocked, requeue it.  */
2183 	if (sigismember(&current->blocked, signr)) {
2184 		specific_send_sig_info(signr, info, current);
2185 		signr = 0;
2186 	}
2187 
2188 	return signr;
2189 }
2190 
2191 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2192 			  struct pt_regs *regs, void *cookie)
2193 {
2194 	struct sighand_struct *sighand = current->sighand;
2195 	struct signal_struct *signal = current->signal;
2196 	int signr;
2197 
2198 	if (unlikely(current->task_works))
2199 		task_work_run();
2200 
2201 	if (unlikely(uprobe_deny_signal()))
2202 		return 0;
2203 
2204 	/*
2205 	 * Do this once, we can't return to user-mode if freezing() == T.
2206 	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2207 	 * thus do not need another check after return.
2208 	 */
2209 	try_to_freeze();
2210 
2211 relock:
2212 	spin_lock_irq(&sighand->siglock);
2213 	/*
2214 	 * Every stopped thread goes here after wakeup. Check to see if
2215 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2216 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2217 	 */
2218 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2219 		int why;
2220 
2221 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2222 			why = CLD_CONTINUED;
2223 		else
2224 			why = CLD_STOPPED;
2225 
2226 		signal->flags &= ~SIGNAL_CLD_MASK;
2227 
2228 		spin_unlock_irq(&sighand->siglock);
2229 
2230 		/*
2231 		 * Notify the parent that we're continuing.  This event is
2232 		 * always per-process and doesn't make whole lot of sense
2233 		 * for ptracers, who shouldn't consume the state via
2234 		 * wait(2) either, but, for backward compatibility, notify
2235 		 * the ptracer of the group leader too unless it's gonna be
2236 		 * a duplicate.
2237 		 */
2238 		read_lock(&tasklist_lock);
2239 		do_notify_parent_cldstop(current, false, why);
2240 
2241 		if (ptrace_reparented(current->group_leader))
2242 			do_notify_parent_cldstop(current->group_leader,
2243 						true, why);
2244 		read_unlock(&tasklist_lock);
2245 
2246 		goto relock;
2247 	}
2248 
2249 	for (;;) {
2250 		struct k_sigaction *ka;
2251 
2252 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2253 		    do_signal_stop(0))
2254 			goto relock;
2255 
2256 		if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2257 			do_jobctl_trap();
2258 			spin_unlock_irq(&sighand->siglock);
2259 			goto relock;
2260 		}
2261 
2262 		signr = dequeue_signal(current, &current->blocked, info);
2263 
2264 		if (!signr)
2265 			break; /* will return 0 */
2266 
2267 		if (unlikely(current->ptrace) && signr != SIGKILL) {
2268 			signr = ptrace_signal(signr, info);
2269 			if (!signr)
2270 				continue;
2271 		}
2272 
2273 		ka = &sighand->action[signr-1];
2274 
2275 		/* Trace actually delivered signals. */
2276 		trace_signal_deliver(signr, info, ka);
2277 
2278 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2279 			continue;
2280 		if (ka->sa.sa_handler != SIG_DFL) {
2281 			/* Run the handler.  */
2282 			*return_ka = *ka;
2283 
2284 			if (ka->sa.sa_flags & SA_ONESHOT)
2285 				ka->sa.sa_handler = SIG_DFL;
2286 
2287 			break; /* will return non-zero "signr" value */
2288 		}
2289 
2290 		/*
2291 		 * Now we are doing the default action for this signal.
2292 		 */
2293 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2294 			continue;
2295 
2296 		/*
2297 		 * Global init gets no signals it doesn't want.
2298 		 * Container-init gets no signals it doesn't want from same
2299 		 * container.
2300 		 *
2301 		 * Note that if global/container-init sees a sig_kernel_only()
2302 		 * signal here, the signal must have been generated internally
2303 		 * or must have come from an ancestor namespace. In either
2304 		 * case, the signal cannot be dropped.
2305 		 */
2306 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2307 				!sig_kernel_only(signr))
2308 			continue;
2309 
2310 		if (sig_kernel_stop(signr)) {
2311 			/*
2312 			 * The default action is to stop all threads in
2313 			 * the thread group.  The job control signals
2314 			 * do nothing in an orphaned pgrp, but SIGSTOP
2315 			 * always works.  Note that siglock needs to be
2316 			 * dropped during the call to is_orphaned_pgrp()
2317 			 * because of lock ordering with tasklist_lock.
2318 			 * This allows an intervening SIGCONT to be posted.
2319 			 * We need to check for that and bail out if necessary.
2320 			 */
2321 			if (signr != SIGSTOP) {
2322 				spin_unlock_irq(&sighand->siglock);
2323 
2324 				/* signals can be posted during this window */
2325 
2326 				if (is_current_pgrp_orphaned())
2327 					goto relock;
2328 
2329 				spin_lock_irq(&sighand->siglock);
2330 			}
2331 
2332 			if (likely(do_signal_stop(info->si_signo))) {
2333 				/* It released the siglock.  */
2334 				goto relock;
2335 			}
2336 
2337 			/*
2338 			 * We didn't actually stop, due to a race
2339 			 * with SIGCONT or something like that.
2340 			 */
2341 			continue;
2342 		}
2343 
2344 		spin_unlock_irq(&sighand->siglock);
2345 
2346 		/*
2347 		 * Anything else is fatal, maybe with a core dump.
2348 		 */
2349 		current->flags |= PF_SIGNALED;
2350 
2351 		if (sig_kernel_coredump(signr)) {
2352 			if (print_fatal_signals)
2353 				print_fatal_signal(info->si_signo);
2354 			proc_coredump_connector(current);
2355 			/*
2356 			 * If it was able to dump core, this kills all
2357 			 * other threads in the group and synchronizes with
2358 			 * their demise.  If we lost the race with another
2359 			 * thread getting here, it set group_exit_code
2360 			 * first and our do_group_exit call below will use
2361 			 * that value and ignore the one we pass it.
2362 			 */
2363 			do_coredump(info);
2364 		}
2365 
2366 		/*
2367 		 * Death signals, no core dump.
2368 		 */
2369 		do_group_exit(info->si_signo);
2370 		/* NOTREACHED */
2371 	}
2372 	spin_unlock_irq(&sighand->siglock);
2373 	return signr;
2374 }
2375 
2376 /**
2377  * signal_delivered -
2378  * @sig:		number of signal being delivered
2379  * @info:		siginfo_t of signal being delivered
2380  * @ka:			sigaction setting that chose the handler
2381  * @regs:		user register state
2382  * @stepping:		nonzero if debugger single-step or block-step in use
2383  *
2384  * This function should be called when a signal has succesfully been
2385  * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask
2386  * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2387  * is set in @ka->sa.sa_flags.  Tracing is notified.
2388  */
2389 void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka,
2390 			struct pt_regs *regs, int stepping)
2391 {
2392 	sigset_t blocked;
2393 
2394 	/* A signal was successfully delivered, and the
2395 	   saved sigmask was stored on the signal frame,
2396 	   and will be restored by sigreturn.  So we can
2397 	   simply clear the restore sigmask flag.  */
2398 	clear_restore_sigmask();
2399 
2400 	sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
2401 	if (!(ka->sa.sa_flags & SA_NODEFER))
2402 		sigaddset(&blocked, sig);
2403 	set_current_blocked(&blocked);
2404 	tracehook_signal_handler(sig, info, ka, regs, stepping);
2405 }
2406 
2407 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2408 {
2409 	if (failed)
2410 		force_sigsegv(ksig->sig, current);
2411 	else
2412 		signal_delivered(ksig->sig, &ksig->info, &ksig->ka,
2413 			signal_pt_regs(), stepping);
2414 }
2415 
2416 /*
2417  * It could be that complete_signal() picked us to notify about the
2418  * group-wide signal. Other threads should be notified now to take
2419  * the shared signals in @which since we will not.
2420  */
2421 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2422 {
2423 	sigset_t retarget;
2424 	struct task_struct *t;
2425 
2426 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2427 	if (sigisemptyset(&retarget))
2428 		return;
2429 
2430 	t = tsk;
2431 	while_each_thread(tsk, t) {
2432 		if (t->flags & PF_EXITING)
2433 			continue;
2434 
2435 		if (!has_pending_signals(&retarget, &t->blocked))
2436 			continue;
2437 		/* Remove the signals this thread can handle. */
2438 		sigandsets(&retarget, &retarget, &t->blocked);
2439 
2440 		if (!signal_pending(t))
2441 			signal_wake_up(t, 0);
2442 
2443 		if (sigisemptyset(&retarget))
2444 			break;
2445 	}
2446 }
2447 
2448 void exit_signals(struct task_struct *tsk)
2449 {
2450 	int group_stop = 0;
2451 	sigset_t unblocked;
2452 
2453 	/*
2454 	 * @tsk is about to have PF_EXITING set - lock out users which
2455 	 * expect stable threadgroup.
2456 	 */
2457 	threadgroup_change_begin(tsk);
2458 
2459 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2460 		tsk->flags |= PF_EXITING;
2461 		threadgroup_change_end(tsk);
2462 		return;
2463 	}
2464 
2465 	spin_lock_irq(&tsk->sighand->siglock);
2466 	/*
2467 	 * From now this task is not visible for group-wide signals,
2468 	 * see wants_signal(), do_signal_stop().
2469 	 */
2470 	tsk->flags |= PF_EXITING;
2471 
2472 	threadgroup_change_end(tsk);
2473 
2474 	if (!signal_pending(tsk))
2475 		goto out;
2476 
2477 	unblocked = tsk->blocked;
2478 	signotset(&unblocked);
2479 	retarget_shared_pending(tsk, &unblocked);
2480 
2481 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2482 	    task_participate_group_stop(tsk))
2483 		group_stop = CLD_STOPPED;
2484 out:
2485 	spin_unlock_irq(&tsk->sighand->siglock);
2486 
2487 	/*
2488 	 * If group stop has completed, deliver the notification.  This
2489 	 * should always go to the real parent of the group leader.
2490 	 */
2491 	if (unlikely(group_stop)) {
2492 		read_lock(&tasklist_lock);
2493 		do_notify_parent_cldstop(tsk, false, group_stop);
2494 		read_unlock(&tasklist_lock);
2495 	}
2496 }
2497 
2498 EXPORT_SYMBOL(recalc_sigpending);
2499 EXPORT_SYMBOL_GPL(dequeue_signal);
2500 EXPORT_SYMBOL(flush_signals);
2501 EXPORT_SYMBOL(force_sig);
2502 EXPORT_SYMBOL(send_sig);
2503 EXPORT_SYMBOL(send_sig_info);
2504 EXPORT_SYMBOL(sigprocmask);
2505 EXPORT_SYMBOL(block_all_signals);
2506 EXPORT_SYMBOL(unblock_all_signals);
2507 
2508 
2509 /*
2510  * System call entry points.
2511  */
2512 
2513 /**
2514  *  sys_restart_syscall - restart a system call
2515  */
2516 SYSCALL_DEFINE0(restart_syscall)
2517 {
2518 	struct restart_block *restart = &current_thread_info()->restart_block;
2519 	return restart->fn(restart);
2520 }
2521 
2522 long do_no_restart_syscall(struct restart_block *param)
2523 {
2524 	return -EINTR;
2525 }
2526 
2527 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2528 {
2529 	if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2530 		sigset_t newblocked;
2531 		/* A set of now blocked but previously unblocked signals. */
2532 		sigandnsets(&newblocked, newset, &current->blocked);
2533 		retarget_shared_pending(tsk, &newblocked);
2534 	}
2535 	tsk->blocked = *newset;
2536 	recalc_sigpending();
2537 }
2538 
2539 /**
2540  * set_current_blocked - change current->blocked mask
2541  * @newset: new mask
2542  *
2543  * It is wrong to change ->blocked directly, this helper should be used
2544  * to ensure the process can't miss a shared signal we are going to block.
2545  */
2546 void set_current_blocked(sigset_t *newset)
2547 {
2548 	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2549 	__set_current_blocked(newset);
2550 }
2551 
2552 void __set_current_blocked(const sigset_t *newset)
2553 {
2554 	struct task_struct *tsk = current;
2555 
2556 	spin_lock_irq(&tsk->sighand->siglock);
2557 	__set_task_blocked(tsk, newset);
2558 	spin_unlock_irq(&tsk->sighand->siglock);
2559 }
2560 
2561 /*
2562  * This is also useful for kernel threads that want to temporarily
2563  * (or permanently) block certain signals.
2564  *
2565  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2566  * interface happily blocks "unblockable" signals like SIGKILL
2567  * and friends.
2568  */
2569 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2570 {
2571 	struct task_struct *tsk = current;
2572 	sigset_t newset;
2573 
2574 	/* Lockless, only current can change ->blocked, never from irq */
2575 	if (oldset)
2576 		*oldset = tsk->blocked;
2577 
2578 	switch (how) {
2579 	case SIG_BLOCK:
2580 		sigorsets(&newset, &tsk->blocked, set);
2581 		break;
2582 	case SIG_UNBLOCK:
2583 		sigandnsets(&newset, &tsk->blocked, set);
2584 		break;
2585 	case SIG_SETMASK:
2586 		newset = *set;
2587 		break;
2588 	default:
2589 		return -EINVAL;
2590 	}
2591 
2592 	__set_current_blocked(&newset);
2593 	return 0;
2594 }
2595 
2596 /**
2597  *  sys_rt_sigprocmask - change the list of currently blocked signals
2598  *  @how: whether to add, remove, or set signals
2599  *  @nset: stores pending signals
2600  *  @oset: previous value of signal mask if non-null
2601  *  @sigsetsize: size of sigset_t type
2602  */
2603 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2604 		sigset_t __user *, oset, size_t, sigsetsize)
2605 {
2606 	sigset_t old_set, new_set;
2607 	int error;
2608 
2609 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2610 	if (sigsetsize != sizeof(sigset_t))
2611 		return -EINVAL;
2612 
2613 	old_set = current->blocked;
2614 
2615 	if (nset) {
2616 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2617 			return -EFAULT;
2618 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2619 
2620 		error = sigprocmask(how, &new_set, NULL);
2621 		if (error)
2622 			return error;
2623 	}
2624 
2625 	if (oset) {
2626 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2627 			return -EFAULT;
2628 	}
2629 
2630 	return 0;
2631 }
2632 
2633 #ifdef CONFIG_COMPAT
2634 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2635 		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2636 {
2637 #ifdef __BIG_ENDIAN
2638 	sigset_t old_set = current->blocked;
2639 
2640 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2641 	if (sigsetsize != sizeof(sigset_t))
2642 		return -EINVAL;
2643 
2644 	if (nset) {
2645 		compat_sigset_t new32;
2646 		sigset_t new_set;
2647 		int error;
2648 		if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2649 			return -EFAULT;
2650 
2651 		sigset_from_compat(&new_set, &new32);
2652 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2653 
2654 		error = sigprocmask(how, &new_set, NULL);
2655 		if (error)
2656 			return error;
2657 	}
2658 	if (oset) {
2659 		compat_sigset_t old32;
2660 		sigset_to_compat(&old32, &old_set);
2661 		if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2662 			return -EFAULT;
2663 	}
2664 	return 0;
2665 #else
2666 	return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2667 				  (sigset_t __user *)oset, sigsetsize);
2668 #endif
2669 }
2670 #endif
2671 
2672 static int do_sigpending(void *set, unsigned long sigsetsize)
2673 {
2674 	if (sigsetsize > sizeof(sigset_t))
2675 		return -EINVAL;
2676 
2677 	spin_lock_irq(&current->sighand->siglock);
2678 	sigorsets(set, &current->pending.signal,
2679 		  &current->signal->shared_pending.signal);
2680 	spin_unlock_irq(&current->sighand->siglock);
2681 
2682 	/* Outside the lock because only this thread touches it.  */
2683 	sigandsets(set, &current->blocked, set);
2684 	return 0;
2685 }
2686 
2687 /**
2688  *  sys_rt_sigpending - examine a pending signal that has been raised
2689  *			while blocked
2690  *  @uset: stores pending signals
2691  *  @sigsetsize: size of sigset_t type or larger
2692  */
2693 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2694 {
2695 	sigset_t set;
2696 	int err = do_sigpending(&set, sigsetsize);
2697 	if (!err && copy_to_user(uset, &set, sigsetsize))
2698 		err = -EFAULT;
2699 	return err;
2700 }
2701 
2702 #ifdef CONFIG_COMPAT
2703 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2704 		compat_size_t, sigsetsize)
2705 {
2706 #ifdef __BIG_ENDIAN
2707 	sigset_t set;
2708 	int err = do_sigpending(&set, sigsetsize);
2709 	if (!err) {
2710 		compat_sigset_t set32;
2711 		sigset_to_compat(&set32, &set);
2712 		/* we can get here only if sigsetsize <= sizeof(set) */
2713 		if (copy_to_user(uset, &set32, sigsetsize))
2714 			err = -EFAULT;
2715 	}
2716 	return err;
2717 #else
2718 	return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2719 #endif
2720 }
2721 #endif
2722 
2723 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2724 
2725 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2726 {
2727 	int err;
2728 
2729 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2730 		return -EFAULT;
2731 	if (from->si_code < 0)
2732 		return __copy_to_user(to, from, sizeof(siginfo_t))
2733 			? -EFAULT : 0;
2734 	/*
2735 	 * If you change siginfo_t structure, please be sure
2736 	 * this code is fixed accordingly.
2737 	 * Please remember to update the signalfd_copyinfo() function
2738 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2739 	 * It should never copy any pad contained in the structure
2740 	 * to avoid security leaks, but must copy the generic
2741 	 * 3 ints plus the relevant union member.
2742 	 */
2743 	err = __put_user(from->si_signo, &to->si_signo);
2744 	err |= __put_user(from->si_errno, &to->si_errno);
2745 	err |= __put_user((short)from->si_code, &to->si_code);
2746 	switch (from->si_code & __SI_MASK) {
2747 	case __SI_KILL:
2748 		err |= __put_user(from->si_pid, &to->si_pid);
2749 		err |= __put_user(from->si_uid, &to->si_uid);
2750 		break;
2751 	case __SI_TIMER:
2752 		 err |= __put_user(from->si_tid, &to->si_tid);
2753 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2754 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2755 		break;
2756 	case __SI_POLL:
2757 		err |= __put_user(from->si_band, &to->si_band);
2758 		err |= __put_user(from->si_fd, &to->si_fd);
2759 		break;
2760 	case __SI_FAULT:
2761 		err |= __put_user(from->si_addr, &to->si_addr);
2762 #ifdef __ARCH_SI_TRAPNO
2763 		err |= __put_user(from->si_trapno, &to->si_trapno);
2764 #endif
2765 #ifdef BUS_MCEERR_AO
2766 		/*
2767 		 * Other callers might not initialize the si_lsb field,
2768 		 * so check explicitly for the right codes here.
2769 		 */
2770 		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2771 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2772 #endif
2773 		break;
2774 	case __SI_CHLD:
2775 		err |= __put_user(from->si_pid, &to->si_pid);
2776 		err |= __put_user(from->si_uid, &to->si_uid);
2777 		err |= __put_user(from->si_status, &to->si_status);
2778 		err |= __put_user(from->si_utime, &to->si_utime);
2779 		err |= __put_user(from->si_stime, &to->si_stime);
2780 		break;
2781 	case __SI_RT: /* This is not generated by the kernel as of now. */
2782 	case __SI_MESGQ: /* But this is */
2783 		err |= __put_user(from->si_pid, &to->si_pid);
2784 		err |= __put_user(from->si_uid, &to->si_uid);
2785 		err |= __put_user(from->si_ptr, &to->si_ptr);
2786 		break;
2787 #ifdef __ARCH_SIGSYS
2788 	case __SI_SYS:
2789 		err |= __put_user(from->si_call_addr, &to->si_call_addr);
2790 		err |= __put_user(from->si_syscall, &to->si_syscall);
2791 		err |= __put_user(from->si_arch, &to->si_arch);
2792 		break;
2793 #endif
2794 	default: /* this is just in case for now ... */
2795 		err |= __put_user(from->si_pid, &to->si_pid);
2796 		err |= __put_user(from->si_uid, &to->si_uid);
2797 		break;
2798 	}
2799 	return err;
2800 }
2801 
2802 #endif
2803 
2804 /**
2805  *  do_sigtimedwait - wait for queued signals specified in @which
2806  *  @which: queued signals to wait for
2807  *  @info: if non-null, the signal's siginfo is returned here
2808  *  @ts: upper bound on process time suspension
2809  */
2810 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2811 			const struct timespec *ts)
2812 {
2813 	struct task_struct *tsk = current;
2814 	long timeout = MAX_SCHEDULE_TIMEOUT;
2815 	sigset_t mask = *which;
2816 	int sig;
2817 
2818 	if (ts) {
2819 		if (!timespec_valid(ts))
2820 			return -EINVAL;
2821 		timeout = timespec_to_jiffies(ts);
2822 		/*
2823 		 * We can be close to the next tick, add another one
2824 		 * to ensure we will wait at least the time asked for.
2825 		 */
2826 		if (ts->tv_sec || ts->tv_nsec)
2827 			timeout++;
2828 	}
2829 
2830 	/*
2831 	 * Invert the set of allowed signals to get those we want to block.
2832 	 */
2833 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2834 	signotset(&mask);
2835 
2836 	spin_lock_irq(&tsk->sighand->siglock);
2837 	sig = dequeue_signal(tsk, &mask, info);
2838 	if (!sig && timeout) {
2839 		/*
2840 		 * None ready, temporarily unblock those we're interested
2841 		 * while we are sleeping in so that we'll be awakened when
2842 		 * they arrive. Unblocking is always fine, we can avoid
2843 		 * set_current_blocked().
2844 		 */
2845 		tsk->real_blocked = tsk->blocked;
2846 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2847 		recalc_sigpending();
2848 		spin_unlock_irq(&tsk->sighand->siglock);
2849 
2850 		timeout = schedule_timeout_interruptible(timeout);
2851 
2852 		spin_lock_irq(&tsk->sighand->siglock);
2853 		__set_task_blocked(tsk, &tsk->real_blocked);
2854 		siginitset(&tsk->real_blocked, 0);
2855 		sig = dequeue_signal(tsk, &mask, info);
2856 	}
2857 	spin_unlock_irq(&tsk->sighand->siglock);
2858 
2859 	if (sig)
2860 		return sig;
2861 	return timeout ? -EINTR : -EAGAIN;
2862 }
2863 
2864 /**
2865  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
2866  *			in @uthese
2867  *  @uthese: queued signals to wait for
2868  *  @uinfo: if non-null, the signal's siginfo is returned here
2869  *  @uts: upper bound on process time suspension
2870  *  @sigsetsize: size of sigset_t type
2871  */
2872 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2873 		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2874 		size_t, sigsetsize)
2875 {
2876 	sigset_t these;
2877 	struct timespec ts;
2878 	siginfo_t info;
2879 	int ret;
2880 
2881 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2882 	if (sigsetsize != sizeof(sigset_t))
2883 		return -EINVAL;
2884 
2885 	if (copy_from_user(&these, uthese, sizeof(these)))
2886 		return -EFAULT;
2887 
2888 	if (uts) {
2889 		if (copy_from_user(&ts, uts, sizeof(ts)))
2890 			return -EFAULT;
2891 	}
2892 
2893 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2894 
2895 	if (ret > 0 && uinfo) {
2896 		if (copy_siginfo_to_user(uinfo, &info))
2897 			ret = -EFAULT;
2898 	}
2899 
2900 	return ret;
2901 }
2902 
2903 /**
2904  *  sys_kill - send a signal to a process
2905  *  @pid: the PID of the process
2906  *  @sig: signal to be sent
2907  */
2908 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2909 {
2910 	struct siginfo info;
2911 
2912 	info.si_signo = sig;
2913 	info.si_errno = 0;
2914 	info.si_code = SI_USER;
2915 	info.si_pid = task_tgid_vnr(current);
2916 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2917 
2918 	return kill_something_info(sig, &info, pid);
2919 }
2920 
2921 static int
2922 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2923 {
2924 	struct task_struct *p;
2925 	int error = -ESRCH;
2926 
2927 	rcu_read_lock();
2928 	p = find_task_by_vpid(pid);
2929 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2930 		error = check_kill_permission(sig, info, p);
2931 		/*
2932 		 * The null signal is a permissions and process existence
2933 		 * probe.  No signal is actually delivered.
2934 		 */
2935 		if (!error && sig) {
2936 			error = do_send_sig_info(sig, info, p, false);
2937 			/*
2938 			 * If lock_task_sighand() failed we pretend the task
2939 			 * dies after receiving the signal. The window is tiny,
2940 			 * and the signal is private anyway.
2941 			 */
2942 			if (unlikely(error == -ESRCH))
2943 				error = 0;
2944 		}
2945 	}
2946 	rcu_read_unlock();
2947 
2948 	return error;
2949 }
2950 
2951 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2952 {
2953 	struct siginfo info;
2954 
2955 	info.si_signo = sig;
2956 	info.si_errno = 0;
2957 	info.si_code = SI_TKILL;
2958 	info.si_pid = task_tgid_vnr(current);
2959 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2960 
2961 	return do_send_specific(tgid, pid, sig, &info);
2962 }
2963 
2964 /**
2965  *  sys_tgkill - send signal to one specific thread
2966  *  @tgid: the thread group ID of the thread
2967  *  @pid: the PID of the thread
2968  *  @sig: signal to be sent
2969  *
2970  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2971  *  exists but it's not belonging to the target process anymore. This
2972  *  method solves the problem of threads exiting and PIDs getting reused.
2973  */
2974 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2975 {
2976 	/* This is only valid for single tasks */
2977 	if (pid <= 0 || tgid <= 0)
2978 		return -EINVAL;
2979 
2980 	return do_tkill(tgid, pid, sig);
2981 }
2982 
2983 /**
2984  *  sys_tkill - send signal to one specific task
2985  *  @pid: the PID of the task
2986  *  @sig: signal to be sent
2987  *
2988  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2989  */
2990 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2991 {
2992 	/* This is only valid for single tasks */
2993 	if (pid <= 0)
2994 		return -EINVAL;
2995 
2996 	return do_tkill(0, pid, sig);
2997 }
2998 
2999 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3000 {
3001 	/* Not even root can pretend to send signals from the kernel.
3002 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3003 	 */
3004 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3005 	    (task_pid_vnr(current) != pid)) {
3006 		/* We used to allow any < 0 si_code */
3007 		WARN_ON_ONCE(info->si_code < 0);
3008 		return -EPERM;
3009 	}
3010 	info->si_signo = sig;
3011 
3012 	/* POSIX.1b doesn't mention process groups.  */
3013 	return kill_proc_info(sig, info, pid);
3014 }
3015 
3016 /**
3017  *  sys_rt_sigqueueinfo - send signal information to a signal
3018  *  @pid: the PID of the thread
3019  *  @sig: signal to be sent
3020  *  @uinfo: signal info to be sent
3021  */
3022 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3023 		siginfo_t __user *, uinfo)
3024 {
3025 	siginfo_t info;
3026 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3027 		return -EFAULT;
3028 	return do_rt_sigqueueinfo(pid, sig, &info);
3029 }
3030 
3031 #ifdef CONFIG_COMPAT
3032 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3033 			compat_pid_t, pid,
3034 			int, sig,
3035 			struct compat_siginfo __user *, uinfo)
3036 {
3037 	siginfo_t info;
3038 	int ret = copy_siginfo_from_user32(&info, uinfo);
3039 	if (unlikely(ret))
3040 		return ret;
3041 	return do_rt_sigqueueinfo(pid, sig, &info);
3042 }
3043 #endif
3044 
3045 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3046 {
3047 	/* This is only valid for single tasks */
3048 	if (pid <= 0 || tgid <= 0)
3049 		return -EINVAL;
3050 
3051 	/* Not even root can pretend to send signals from the kernel.
3052 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3053 	 */
3054 	if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
3055 	    (task_pid_vnr(current) != pid)) {
3056 		/* We used to allow any < 0 si_code */
3057 		WARN_ON_ONCE(info->si_code < 0);
3058 		return -EPERM;
3059 	}
3060 	info->si_signo = sig;
3061 
3062 	return do_send_specific(tgid, pid, sig, info);
3063 }
3064 
3065 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3066 		siginfo_t __user *, uinfo)
3067 {
3068 	siginfo_t info;
3069 
3070 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3071 		return -EFAULT;
3072 
3073 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3074 }
3075 
3076 #ifdef CONFIG_COMPAT
3077 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3078 			compat_pid_t, tgid,
3079 			compat_pid_t, pid,
3080 			int, sig,
3081 			struct compat_siginfo __user *, uinfo)
3082 {
3083 	siginfo_t info;
3084 
3085 	if (copy_siginfo_from_user32(&info, uinfo))
3086 		return -EFAULT;
3087 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3088 }
3089 #endif
3090 
3091 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3092 {
3093 	struct task_struct *t = current;
3094 	struct k_sigaction *k;
3095 	sigset_t mask;
3096 
3097 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3098 		return -EINVAL;
3099 
3100 	k = &t->sighand->action[sig-1];
3101 
3102 	spin_lock_irq(&current->sighand->siglock);
3103 	if (oact)
3104 		*oact = *k;
3105 
3106 	if (act) {
3107 		sigdelsetmask(&act->sa.sa_mask,
3108 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
3109 		*k = *act;
3110 		/*
3111 		 * POSIX 3.3.1.3:
3112 		 *  "Setting a signal action to SIG_IGN for a signal that is
3113 		 *   pending shall cause the pending signal to be discarded,
3114 		 *   whether or not it is blocked."
3115 		 *
3116 		 *  "Setting a signal action to SIG_DFL for a signal that is
3117 		 *   pending and whose default action is to ignore the signal
3118 		 *   (for example, SIGCHLD), shall cause the pending signal to
3119 		 *   be discarded, whether or not it is blocked"
3120 		 */
3121 		if (sig_handler_ignored(sig_handler(t, sig), sig)) {
3122 			sigemptyset(&mask);
3123 			sigaddset(&mask, sig);
3124 			rm_from_queue_full(&mask, &t->signal->shared_pending);
3125 			do {
3126 				rm_from_queue_full(&mask, &t->pending);
3127 				t = next_thread(t);
3128 			} while (t != current);
3129 		}
3130 	}
3131 
3132 	spin_unlock_irq(&current->sighand->siglock);
3133 	return 0;
3134 }
3135 
3136 static int
3137 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3138 {
3139 	stack_t oss;
3140 	int error;
3141 
3142 	oss.ss_sp = (void __user *) current->sas_ss_sp;
3143 	oss.ss_size = current->sas_ss_size;
3144 	oss.ss_flags = sas_ss_flags(sp);
3145 
3146 	if (uss) {
3147 		void __user *ss_sp;
3148 		size_t ss_size;
3149 		int ss_flags;
3150 
3151 		error = -EFAULT;
3152 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3153 			goto out;
3154 		error = __get_user(ss_sp, &uss->ss_sp) |
3155 			__get_user(ss_flags, &uss->ss_flags) |
3156 			__get_user(ss_size, &uss->ss_size);
3157 		if (error)
3158 			goto out;
3159 
3160 		error = -EPERM;
3161 		if (on_sig_stack(sp))
3162 			goto out;
3163 
3164 		error = -EINVAL;
3165 		/*
3166 		 * Note - this code used to test ss_flags incorrectly:
3167 		 *  	  old code may have been written using ss_flags==0
3168 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
3169 		 *	  way that worked) - this fix preserves that older
3170 		 *	  mechanism.
3171 		 */
3172 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3173 			goto out;
3174 
3175 		if (ss_flags == SS_DISABLE) {
3176 			ss_size = 0;
3177 			ss_sp = NULL;
3178 		} else {
3179 			error = -ENOMEM;
3180 			if (ss_size < MINSIGSTKSZ)
3181 				goto out;
3182 		}
3183 
3184 		current->sas_ss_sp = (unsigned long) ss_sp;
3185 		current->sas_ss_size = ss_size;
3186 	}
3187 
3188 	error = 0;
3189 	if (uoss) {
3190 		error = -EFAULT;
3191 		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3192 			goto out;
3193 		error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3194 			__put_user(oss.ss_size, &uoss->ss_size) |
3195 			__put_user(oss.ss_flags, &uoss->ss_flags);
3196 	}
3197 
3198 out:
3199 	return error;
3200 }
3201 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3202 {
3203 	return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3204 }
3205 
3206 int restore_altstack(const stack_t __user *uss)
3207 {
3208 	int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3209 	/* squash all but EFAULT for now */
3210 	return err == -EFAULT ? err : 0;
3211 }
3212 
3213 int __save_altstack(stack_t __user *uss, unsigned long sp)
3214 {
3215 	struct task_struct *t = current;
3216 	return  __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3217 		__put_user(sas_ss_flags(sp), &uss->ss_flags) |
3218 		__put_user(t->sas_ss_size, &uss->ss_size);
3219 }
3220 
3221 #ifdef CONFIG_COMPAT
3222 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3223 			const compat_stack_t __user *, uss_ptr,
3224 			compat_stack_t __user *, uoss_ptr)
3225 {
3226 	stack_t uss, uoss;
3227 	int ret;
3228 	mm_segment_t seg;
3229 
3230 	if (uss_ptr) {
3231 		compat_stack_t uss32;
3232 
3233 		memset(&uss, 0, sizeof(stack_t));
3234 		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3235 			return -EFAULT;
3236 		uss.ss_sp = compat_ptr(uss32.ss_sp);
3237 		uss.ss_flags = uss32.ss_flags;
3238 		uss.ss_size = uss32.ss_size;
3239 	}
3240 	seg = get_fs();
3241 	set_fs(KERNEL_DS);
3242 	ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3243 			     (stack_t __force __user *) &uoss,
3244 			     compat_user_stack_pointer());
3245 	set_fs(seg);
3246 	if (ret >= 0 && uoss_ptr)  {
3247 		if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3248 		    __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3249 		    __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3250 		    __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3251 			ret = -EFAULT;
3252 	}
3253 	return ret;
3254 }
3255 
3256 int compat_restore_altstack(const compat_stack_t __user *uss)
3257 {
3258 	int err = compat_sys_sigaltstack(uss, NULL);
3259 	/* squash all but -EFAULT for now */
3260 	return err == -EFAULT ? err : 0;
3261 }
3262 
3263 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3264 {
3265 	struct task_struct *t = current;
3266 	return  __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
3267 		__put_user(sas_ss_flags(sp), &uss->ss_flags) |
3268 		__put_user(t->sas_ss_size, &uss->ss_size);
3269 }
3270 #endif
3271 
3272 #ifdef __ARCH_WANT_SYS_SIGPENDING
3273 
3274 /**
3275  *  sys_sigpending - examine pending signals
3276  *  @set: where mask of pending signal is returned
3277  */
3278 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3279 {
3280 	return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3281 }
3282 
3283 #endif
3284 
3285 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3286 /**
3287  *  sys_sigprocmask - examine and change blocked signals
3288  *  @how: whether to add, remove, or set signals
3289  *  @nset: signals to add or remove (if non-null)
3290  *  @oset: previous value of signal mask if non-null
3291  *
3292  * Some platforms have their own version with special arguments;
3293  * others support only sys_rt_sigprocmask.
3294  */
3295 
3296 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3297 		old_sigset_t __user *, oset)
3298 {
3299 	old_sigset_t old_set, new_set;
3300 	sigset_t new_blocked;
3301 
3302 	old_set = current->blocked.sig[0];
3303 
3304 	if (nset) {
3305 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
3306 			return -EFAULT;
3307 
3308 		new_blocked = current->blocked;
3309 
3310 		switch (how) {
3311 		case SIG_BLOCK:
3312 			sigaddsetmask(&new_blocked, new_set);
3313 			break;
3314 		case SIG_UNBLOCK:
3315 			sigdelsetmask(&new_blocked, new_set);
3316 			break;
3317 		case SIG_SETMASK:
3318 			new_blocked.sig[0] = new_set;
3319 			break;
3320 		default:
3321 			return -EINVAL;
3322 		}
3323 
3324 		set_current_blocked(&new_blocked);
3325 	}
3326 
3327 	if (oset) {
3328 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
3329 			return -EFAULT;
3330 	}
3331 
3332 	return 0;
3333 }
3334 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3335 
3336 #ifndef CONFIG_ODD_RT_SIGACTION
3337 /**
3338  *  sys_rt_sigaction - alter an action taken by a process
3339  *  @sig: signal to be sent
3340  *  @act: new sigaction
3341  *  @oact: used to save the previous sigaction
3342  *  @sigsetsize: size of sigset_t type
3343  */
3344 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3345 		const struct sigaction __user *, act,
3346 		struct sigaction __user *, oact,
3347 		size_t, sigsetsize)
3348 {
3349 	struct k_sigaction new_sa, old_sa;
3350 	int ret = -EINVAL;
3351 
3352 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3353 	if (sigsetsize != sizeof(sigset_t))
3354 		goto out;
3355 
3356 	if (act) {
3357 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3358 			return -EFAULT;
3359 	}
3360 
3361 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3362 
3363 	if (!ret && oact) {
3364 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3365 			return -EFAULT;
3366 	}
3367 out:
3368 	return ret;
3369 }
3370 #ifdef CONFIG_COMPAT
3371 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3372 		const struct compat_sigaction __user *, act,
3373 		struct compat_sigaction __user *, oact,
3374 		compat_size_t, sigsetsize)
3375 {
3376 	struct k_sigaction new_ka, old_ka;
3377 	compat_sigset_t mask;
3378 #ifdef __ARCH_HAS_SA_RESTORER
3379 	compat_uptr_t restorer;
3380 #endif
3381 	int ret;
3382 
3383 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3384 	if (sigsetsize != sizeof(compat_sigset_t))
3385 		return -EINVAL;
3386 
3387 	if (act) {
3388 		compat_uptr_t handler;
3389 		ret = get_user(handler, &act->sa_handler);
3390 		new_ka.sa.sa_handler = compat_ptr(handler);
3391 #ifdef __ARCH_HAS_SA_RESTORER
3392 		ret |= get_user(restorer, &act->sa_restorer);
3393 		new_ka.sa.sa_restorer = compat_ptr(restorer);
3394 #endif
3395 		ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3396 		ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
3397 		if (ret)
3398 			return -EFAULT;
3399 		sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3400 	}
3401 
3402 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3403 	if (!ret && oact) {
3404 		sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3405 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3406 			       &oact->sa_handler);
3407 		ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3408 		ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3409 #ifdef __ARCH_HAS_SA_RESTORER
3410 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3411 				&oact->sa_restorer);
3412 #endif
3413 	}
3414 	return ret;
3415 }
3416 #endif
3417 #endif /* !CONFIG_ODD_RT_SIGACTION */
3418 
3419 #ifdef CONFIG_OLD_SIGACTION
3420 SYSCALL_DEFINE3(sigaction, int, sig,
3421 		const struct old_sigaction __user *, act,
3422 	        struct old_sigaction __user *, oact)
3423 {
3424 	struct k_sigaction new_ka, old_ka;
3425 	int ret;
3426 
3427 	if (act) {
3428 		old_sigset_t mask;
3429 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3430 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3431 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3432 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3433 		    __get_user(mask, &act->sa_mask))
3434 			return -EFAULT;
3435 #ifdef __ARCH_HAS_KA_RESTORER
3436 		new_ka.ka_restorer = NULL;
3437 #endif
3438 		siginitset(&new_ka.sa.sa_mask, mask);
3439 	}
3440 
3441 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3442 
3443 	if (!ret && oact) {
3444 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3445 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3446 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3447 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3448 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3449 			return -EFAULT;
3450 	}
3451 
3452 	return ret;
3453 }
3454 #endif
3455 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3456 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3457 		const struct compat_old_sigaction __user *, act,
3458 	        struct compat_old_sigaction __user *, oact)
3459 {
3460 	struct k_sigaction new_ka, old_ka;
3461 	int ret;
3462 	compat_old_sigset_t mask;
3463 	compat_uptr_t handler, restorer;
3464 
3465 	if (act) {
3466 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3467 		    __get_user(handler, &act->sa_handler) ||
3468 		    __get_user(restorer, &act->sa_restorer) ||
3469 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3470 		    __get_user(mask, &act->sa_mask))
3471 			return -EFAULT;
3472 
3473 #ifdef __ARCH_HAS_KA_RESTORER
3474 		new_ka.ka_restorer = NULL;
3475 #endif
3476 		new_ka.sa.sa_handler = compat_ptr(handler);
3477 		new_ka.sa.sa_restorer = compat_ptr(restorer);
3478 		siginitset(&new_ka.sa.sa_mask, mask);
3479 	}
3480 
3481 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3482 
3483 	if (!ret && oact) {
3484 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3485 		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3486 			       &oact->sa_handler) ||
3487 		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3488 			       &oact->sa_restorer) ||
3489 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3490 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3491 			return -EFAULT;
3492 	}
3493 	return ret;
3494 }
3495 #endif
3496 
3497 #ifdef __ARCH_WANT_SYS_SGETMASK
3498 
3499 /*
3500  * For backwards compatibility.  Functionality superseded by sigprocmask.
3501  */
3502 SYSCALL_DEFINE0(sgetmask)
3503 {
3504 	/* SMP safe */
3505 	return current->blocked.sig[0];
3506 }
3507 
3508 SYSCALL_DEFINE1(ssetmask, int, newmask)
3509 {
3510 	int old = current->blocked.sig[0];
3511 	sigset_t newset;
3512 
3513 	siginitset(&newset, newmask);
3514 	set_current_blocked(&newset);
3515 
3516 	return old;
3517 }
3518 #endif /* __ARCH_WANT_SGETMASK */
3519 
3520 #ifdef __ARCH_WANT_SYS_SIGNAL
3521 /*
3522  * For backwards compatibility.  Functionality superseded by sigaction.
3523  */
3524 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3525 {
3526 	struct k_sigaction new_sa, old_sa;
3527 	int ret;
3528 
3529 	new_sa.sa.sa_handler = handler;
3530 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3531 	sigemptyset(&new_sa.sa.sa_mask);
3532 
3533 	ret = do_sigaction(sig, &new_sa, &old_sa);
3534 
3535 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3536 }
3537 #endif /* __ARCH_WANT_SYS_SIGNAL */
3538 
3539 #ifdef __ARCH_WANT_SYS_PAUSE
3540 
3541 SYSCALL_DEFINE0(pause)
3542 {
3543 	while (!signal_pending(current)) {
3544 		current->state = TASK_INTERRUPTIBLE;
3545 		schedule();
3546 	}
3547 	return -ERESTARTNOHAND;
3548 }
3549 
3550 #endif
3551 
3552 int sigsuspend(sigset_t *set)
3553 {
3554 	current->saved_sigmask = current->blocked;
3555 	set_current_blocked(set);
3556 
3557 	current->state = TASK_INTERRUPTIBLE;
3558 	schedule();
3559 	set_restore_sigmask();
3560 	return -ERESTARTNOHAND;
3561 }
3562 
3563 /**
3564  *  sys_rt_sigsuspend - replace the signal mask for a value with the
3565  *	@unewset value until a signal is received
3566  *  @unewset: new signal mask value
3567  *  @sigsetsize: size of sigset_t type
3568  */
3569 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3570 {
3571 	sigset_t newset;
3572 
3573 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3574 	if (sigsetsize != sizeof(sigset_t))
3575 		return -EINVAL;
3576 
3577 	if (copy_from_user(&newset, unewset, sizeof(newset)))
3578 		return -EFAULT;
3579 	return sigsuspend(&newset);
3580 }
3581 
3582 #ifdef CONFIG_COMPAT
3583 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3584 {
3585 #ifdef __BIG_ENDIAN
3586 	sigset_t newset;
3587 	compat_sigset_t newset32;
3588 
3589 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3590 	if (sigsetsize != sizeof(sigset_t))
3591 		return -EINVAL;
3592 
3593 	if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3594 		return -EFAULT;
3595 	sigset_from_compat(&newset, &newset32);
3596 	return sigsuspend(&newset);
3597 #else
3598 	/* on little-endian bitmaps don't care about granularity */
3599 	return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3600 #endif
3601 }
3602 #endif
3603 
3604 #ifdef CONFIG_OLD_SIGSUSPEND
3605 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3606 {
3607 	sigset_t blocked;
3608 	siginitset(&blocked, mask);
3609 	return sigsuspend(&blocked);
3610 }
3611 #endif
3612 #ifdef CONFIG_OLD_SIGSUSPEND3
3613 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3614 {
3615 	sigset_t blocked;
3616 	siginitset(&blocked, mask);
3617 	return sigsuspend(&blocked);
3618 }
3619 #endif
3620 
3621 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
3622 {
3623 	return NULL;
3624 }
3625 
3626 void __init signals_init(void)
3627 {
3628 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3629 }
3630 
3631 #ifdef CONFIG_KGDB_KDB
3632 #include <linux/kdb.h>
3633 /*
3634  * kdb_send_sig_info - Allows kdb to send signals without exposing
3635  * signal internals.  This function checks if the required locks are
3636  * available before calling the main signal code, to avoid kdb
3637  * deadlocks.
3638  */
3639 void
3640 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3641 {
3642 	static struct task_struct *kdb_prev_t;
3643 	int sig, new_t;
3644 	if (!spin_trylock(&t->sighand->siglock)) {
3645 		kdb_printf("Can't do kill command now.\n"
3646 			   "The sigmask lock is held somewhere else in "
3647 			   "kernel, try again later\n");
3648 		return;
3649 	}
3650 	spin_unlock(&t->sighand->siglock);
3651 	new_t = kdb_prev_t != t;
3652 	kdb_prev_t = t;
3653 	if (t->state != TASK_RUNNING && new_t) {
3654 		kdb_printf("Process is not RUNNING, sending a signal from "
3655 			   "kdb risks deadlock\n"
3656 			   "on the run queue locks. "
3657 			   "The signal has _not_ been sent.\n"
3658 			   "Reissue the kill command if you want to risk "
3659 			   "the deadlock.\n");
3660 		return;
3661 	}
3662 	sig = info->si_signo;
3663 	if (send_sig_info(sig, info, t))
3664 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
3665 			   sig, t->pid);
3666 	else
3667 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3668 }
3669 #endif	/* CONFIG_KGDB_KDB */
3670