xref: /linux/kernel/signal.c (revision 7fe2f6399a84760a9af8896ac152728250f82adb)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/ratelimit.h>
26 #include <linux/tracehook.h>
27 #include <linux/capability.h>
28 #include <linux/freezer.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/nsproxy.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/signal.h>
33 
34 #include <asm/param.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/siginfo.h>
38 #include "audit.h"	/* audit_signal_info() */
39 
40 /*
41  * SLAB caches for signal bits.
42  */
43 
44 static struct kmem_cache *sigqueue_cachep;
45 
46 int print_fatal_signals __read_mostly;
47 
48 static void __user *sig_handler(struct task_struct *t, int sig)
49 {
50 	return t->sighand->action[sig - 1].sa.sa_handler;
51 }
52 
53 static int sig_handler_ignored(void __user *handler, int sig)
54 {
55 	/* Is it explicitly or implicitly ignored? */
56 	return handler == SIG_IGN ||
57 		(handler == SIG_DFL && sig_kernel_ignore(sig));
58 }
59 
60 static int sig_task_ignored(struct task_struct *t, int sig,
61 		int from_ancestor_ns)
62 {
63 	void __user *handler;
64 
65 	handler = sig_handler(t, sig);
66 
67 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
68 			handler == SIG_DFL && !from_ancestor_ns)
69 		return 1;
70 
71 	return sig_handler_ignored(handler, sig);
72 }
73 
74 static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
75 {
76 	/*
77 	 * Blocked signals are never ignored, since the
78 	 * signal handler may change by the time it is
79 	 * unblocked.
80 	 */
81 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
82 		return 0;
83 
84 	if (!sig_task_ignored(t, sig, from_ancestor_ns))
85 		return 0;
86 
87 	/*
88 	 * Tracers may want to know about even ignored signals.
89 	 */
90 	return !tracehook_consider_ignored_signal(t, sig);
91 }
92 
93 /*
94  * Re-calculate pending state from the set of locally pending
95  * signals, globally pending signals, and blocked signals.
96  */
97 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
98 {
99 	unsigned long ready;
100 	long i;
101 
102 	switch (_NSIG_WORDS) {
103 	default:
104 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105 			ready |= signal->sig[i] &~ blocked->sig[i];
106 		break;
107 
108 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
109 		ready |= signal->sig[2] &~ blocked->sig[2];
110 		ready |= signal->sig[1] &~ blocked->sig[1];
111 		ready |= signal->sig[0] &~ blocked->sig[0];
112 		break;
113 
114 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
115 		ready |= signal->sig[0] &~ blocked->sig[0];
116 		break;
117 
118 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
119 	}
120 	return ready !=	0;
121 }
122 
123 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
124 
125 static int recalc_sigpending_tsk(struct task_struct *t)
126 {
127 	if ((t->group_stop & GROUP_STOP_PENDING) ||
128 	    PENDING(&t->pending, &t->blocked) ||
129 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
130 		set_tsk_thread_flag(t, TIF_SIGPENDING);
131 		return 1;
132 	}
133 	/*
134 	 * We must never clear the flag in another thread, or in current
135 	 * when it's possible the current syscall is returning -ERESTART*.
136 	 * So we don't clear it here, and only callers who know they should do.
137 	 */
138 	return 0;
139 }
140 
141 /*
142  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143  * This is superfluous when called on current, the wakeup is a harmless no-op.
144  */
145 void recalc_sigpending_and_wake(struct task_struct *t)
146 {
147 	if (recalc_sigpending_tsk(t))
148 		signal_wake_up(t, 0);
149 }
150 
151 void recalc_sigpending(void)
152 {
153 	if (unlikely(tracehook_force_sigpending()))
154 		set_thread_flag(TIF_SIGPENDING);
155 	else if (!recalc_sigpending_tsk(current) && !freezing(current))
156 		clear_thread_flag(TIF_SIGPENDING);
157 
158 }
159 
160 /* Given the mask, find the first available signal that should be serviced. */
161 
162 #define SYNCHRONOUS_MASK \
163 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164 	 sigmask(SIGTRAP) | sigmask(SIGFPE))
165 
166 int next_signal(struct sigpending *pending, sigset_t *mask)
167 {
168 	unsigned long i, *s, *m, x;
169 	int sig = 0;
170 
171 	s = pending->signal.sig;
172 	m = mask->sig;
173 
174 	/*
175 	 * Handle the first word specially: it contains the
176 	 * synchronous signals that need to be dequeued first.
177 	 */
178 	x = *s &~ *m;
179 	if (x) {
180 		if (x & SYNCHRONOUS_MASK)
181 			x &= SYNCHRONOUS_MASK;
182 		sig = ffz(~x) + 1;
183 		return sig;
184 	}
185 
186 	switch (_NSIG_WORDS) {
187 	default:
188 		for (i = 1; i < _NSIG_WORDS; ++i) {
189 			x = *++s &~ *++m;
190 			if (!x)
191 				continue;
192 			sig = ffz(~x) + i*_NSIG_BPW + 1;
193 			break;
194 		}
195 		break;
196 
197 	case 2:
198 		x = s[1] &~ m[1];
199 		if (!x)
200 			break;
201 		sig = ffz(~x) + _NSIG_BPW + 1;
202 		break;
203 
204 	case 1:
205 		/* Nothing to do */
206 		break;
207 	}
208 
209 	return sig;
210 }
211 
212 static inline void print_dropped_signal(int sig)
213 {
214 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
215 
216 	if (!print_fatal_signals)
217 		return;
218 
219 	if (!__ratelimit(&ratelimit_state))
220 		return;
221 
222 	printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223 				current->comm, current->pid, sig);
224 }
225 
226 /**
227  * task_clear_group_stop_trapping - clear group stop trapping bit
228  * @task: target task
229  *
230  * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us.  Clear it
231  * and wake up the ptracer.  Note that we don't need any further locking.
232  * @task->siglock guarantees that @task->parent points to the ptracer.
233  *
234  * CONTEXT:
235  * Must be called with @task->sighand->siglock held.
236  */
237 static void task_clear_group_stop_trapping(struct task_struct *task)
238 {
239 	if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) {
240 		task->group_stop &= ~GROUP_STOP_TRAPPING;
241 		__wake_up_sync_key(&task->parent->signal->wait_chldexit,
242 				   TASK_UNINTERRUPTIBLE, 1, task);
243 	}
244 }
245 
246 /**
247  * task_clear_group_stop_pending - clear pending group stop
248  * @task: target task
249  *
250  * Clear group stop states for @task.
251  *
252  * CONTEXT:
253  * Must be called with @task->sighand->siglock held.
254  */
255 void task_clear_group_stop_pending(struct task_struct *task)
256 {
257 	task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME |
258 			      GROUP_STOP_DEQUEUED);
259 }
260 
261 /**
262  * task_participate_group_stop - participate in a group stop
263  * @task: task participating in a group stop
264  *
265  * @task has GROUP_STOP_PENDING set and is participating in a group stop.
266  * Group stop states are cleared and the group stop count is consumed if
267  * %GROUP_STOP_CONSUME was set.  If the consumption completes the group
268  * stop, the appropriate %SIGNAL_* flags are set.
269  *
270  * CONTEXT:
271  * Must be called with @task->sighand->siglock held.
272  *
273  * RETURNS:
274  * %true if group stop completion should be notified to the parent, %false
275  * otherwise.
276  */
277 static bool task_participate_group_stop(struct task_struct *task)
278 {
279 	struct signal_struct *sig = task->signal;
280 	bool consume = task->group_stop & GROUP_STOP_CONSUME;
281 
282 	WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
283 
284 	task_clear_group_stop_pending(task);
285 
286 	if (!consume)
287 		return false;
288 
289 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
290 		sig->group_stop_count--;
291 
292 	/*
293 	 * Tell the caller to notify completion iff we are entering into a
294 	 * fresh group stop.  Read comment in do_signal_stop() for details.
295 	 */
296 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
297 		sig->flags = SIGNAL_STOP_STOPPED;
298 		return true;
299 	}
300 	return false;
301 }
302 
303 /*
304  * allocate a new signal queue record
305  * - this may be called without locks if and only if t == current, otherwise an
306  *   appropriate lock must be held to stop the target task from exiting
307  */
308 static struct sigqueue *
309 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
310 {
311 	struct sigqueue *q = NULL;
312 	struct user_struct *user;
313 
314 	/*
315 	 * Protect access to @t credentials. This can go away when all
316 	 * callers hold rcu read lock.
317 	 */
318 	rcu_read_lock();
319 	user = get_uid(__task_cred(t)->user);
320 	atomic_inc(&user->sigpending);
321 	rcu_read_unlock();
322 
323 	if (override_rlimit ||
324 	    atomic_read(&user->sigpending) <=
325 			task_rlimit(t, RLIMIT_SIGPENDING)) {
326 		q = kmem_cache_alloc(sigqueue_cachep, flags);
327 	} else {
328 		print_dropped_signal(sig);
329 	}
330 
331 	if (unlikely(q == NULL)) {
332 		atomic_dec(&user->sigpending);
333 		free_uid(user);
334 	} else {
335 		INIT_LIST_HEAD(&q->list);
336 		q->flags = 0;
337 		q->user = user;
338 	}
339 
340 	return q;
341 }
342 
343 static void __sigqueue_free(struct sigqueue *q)
344 {
345 	if (q->flags & SIGQUEUE_PREALLOC)
346 		return;
347 	atomic_dec(&q->user->sigpending);
348 	free_uid(q->user);
349 	kmem_cache_free(sigqueue_cachep, q);
350 }
351 
352 void flush_sigqueue(struct sigpending *queue)
353 {
354 	struct sigqueue *q;
355 
356 	sigemptyset(&queue->signal);
357 	while (!list_empty(&queue->list)) {
358 		q = list_entry(queue->list.next, struct sigqueue , list);
359 		list_del_init(&q->list);
360 		__sigqueue_free(q);
361 	}
362 }
363 
364 /*
365  * Flush all pending signals for a task.
366  */
367 void __flush_signals(struct task_struct *t)
368 {
369 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
370 	flush_sigqueue(&t->pending);
371 	flush_sigqueue(&t->signal->shared_pending);
372 }
373 
374 void flush_signals(struct task_struct *t)
375 {
376 	unsigned long flags;
377 
378 	spin_lock_irqsave(&t->sighand->siglock, flags);
379 	__flush_signals(t);
380 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
381 }
382 
383 static void __flush_itimer_signals(struct sigpending *pending)
384 {
385 	sigset_t signal, retain;
386 	struct sigqueue *q, *n;
387 
388 	signal = pending->signal;
389 	sigemptyset(&retain);
390 
391 	list_for_each_entry_safe(q, n, &pending->list, list) {
392 		int sig = q->info.si_signo;
393 
394 		if (likely(q->info.si_code != SI_TIMER)) {
395 			sigaddset(&retain, sig);
396 		} else {
397 			sigdelset(&signal, sig);
398 			list_del_init(&q->list);
399 			__sigqueue_free(q);
400 		}
401 	}
402 
403 	sigorsets(&pending->signal, &signal, &retain);
404 }
405 
406 void flush_itimer_signals(void)
407 {
408 	struct task_struct *tsk = current;
409 	unsigned long flags;
410 
411 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
412 	__flush_itimer_signals(&tsk->pending);
413 	__flush_itimer_signals(&tsk->signal->shared_pending);
414 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
415 }
416 
417 void ignore_signals(struct task_struct *t)
418 {
419 	int i;
420 
421 	for (i = 0; i < _NSIG; ++i)
422 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
423 
424 	flush_signals(t);
425 }
426 
427 /*
428  * Flush all handlers for a task.
429  */
430 
431 void
432 flush_signal_handlers(struct task_struct *t, int force_default)
433 {
434 	int i;
435 	struct k_sigaction *ka = &t->sighand->action[0];
436 	for (i = _NSIG ; i != 0 ; i--) {
437 		if (force_default || ka->sa.sa_handler != SIG_IGN)
438 			ka->sa.sa_handler = SIG_DFL;
439 		ka->sa.sa_flags = 0;
440 		sigemptyset(&ka->sa.sa_mask);
441 		ka++;
442 	}
443 }
444 
445 int unhandled_signal(struct task_struct *tsk, int sig)
446 {
447 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
448 	if (is_global_init(tsk))
449 		return 1;
450 	if (handler != SIG_IGN && handler != SIG_DFL)
451 		return 0;
452 	return !tracehook_consider_fatal_signal(tsk, sig);
453 }
454 
455 /*
456  * Notify the system that a driver wants to block all signals for this
457  * process, and wants to be notified if any signals at all were to be
458  * sent/acted upon.  If the notifier routine returns non-zero, then the
459  * signal will be acted upon after all.  If the notifier routine returns 0,
460  * then then signal will be blocked.  Only one block per process is
461  * allowed.  priv is a pointer to private data that the notifier routine
462  * can use to determine if the signal should be blocked or not.
463  */
464 void
465 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
466 {
467 	unsigned long flags;
468 
469 	spin_lock_irqsave(&current->sighand->siglock, flags);
470 	current->notifier_mask = mask;
471 	current->notifier_data = priv;
472 	current->notifier = notifier;
473 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
474 }
475 
476 /* Notify the system that blocking has ended. */
477 
478 void
479 unblock_all_signals(void)
480 {
481 	unsigned long flags;
482 
483 	spin_lock_irqsave(&current->sighand->siglock, flags);
484 	current->notifier = NULL;
485 	current->notifier_data = NULL;
486 	recalc_sigpending();
487 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
488 }
489 
490 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
491 {
492 	struct sigqueue *q, *first = NULL;
493 
494 	/*
495 	 * Collect the siginfo appropriate to this signal.  Check if
496 	 * there is another siginfo for the same signal.
497 	*/
498 	list_for_each_entry(q, &list->list, list) {
499 		if (q->info.si_signo == sig) {
500 			if (first)
501 				goto still_pending;
502 			first = q;
503 		}
504 	}
505 
506 	sigdelset(&list->signal, sig);
507 
508 	if (first) {
509 still_pending:
510 		list_del_init(&first->list);
511 		copy_siginfo(info, &first->info);
512 		__sigqueue_free(first);
513 	} else {
514 		/*
515 		 * Ok, it wasn't in the queue.  This must be
516 		 * a fast-pathed signal or we must have been
517 		 * out of queue space.  So zero out the info.
518 		 */
519 		info->si_signo = sig;
520 		info->si_errno = 0;
521 		info->si_code = SI_USER;
522 		info->si_pid = 0;
523 		info->si_uid = 0;
524 	}
525 }
526 
527 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
528 			siginfo_t *info)
529 {
530 	int sig = next_signal(pending, mask);
531 
532 	if (sig) {
533 		if (current->notifier) {
534 			if (sigismember(current->notifier_mask, sig)) {
535 				if (!(current->notifier)(current->notifier_data)) {
536 					clear_thread_flag(TIF_SIGPENDING);
537 					return 0;
538 				}
539 			}
540 		}
541 
542 		collect_signal(sig, pending, info);
543 	}
544 
545 	return sig;
546 }
547 
548 /*
549  * Dequeue a signal and return the element to the caller, which is
550  * expected to free it.
551  *
552  * All callers have to hold the siglock.
553  */
554 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
555 {
556 	int signr;
557 
558 	/* We only dequeue private signals from ourselves, we don't let
559 	 * signalfd steal them
560 	 */
561 	signr = __dequeue_signal(&tsk->pending, mask, info);
562 	if (!signr) {
563 		signr = __dequeue_signal(&tsk->signal->shared_pending,
564 					 mask, info);
565 		/*
566 		 * itimer signal ?
567 		 *
568 		 * itimers are process shared and we restart periodic
569 		 * itimers in the signal delivery path to prevent DoS
570 		 * attacks in the high resolution timer case. This is
571 		 * compliant with the old way of self-restarting
572 		 * itimers, as the SIGALRM is a legacy signal and only
573 		 * queued once. Changing the restart behaviour to
574 		 * restart the timer in the signal dequeue path is
575 		 * reducing the timer noise on heavy loaded !highres
576 		 * systems too.
577 		 */
578 		if (unlikely(signr == SIGALRM)) {
579 			struct hrtimer *tmr = &tsk->signal->real_timer;
580 
581 			if (!hrtimer_is_queued(tmr) &&
582 			    tsk->signal->it_real_incr.tv64 != 0) {
583 				hrtimer_forward(tmr, tmr->base->get_time(),
584 						tsk->signal->it_real_incr);
585 				hrtimer_restart(tmr);
586 			}
587 		}
588 	}
589 
590 	recalc_sigpending();
591 	if (!signr)
592 		return 0;
593 
594 	if (unlikely(sig_kernel_stop(signr))) {
595 		/*
596 		 * Set a marker that we have dequeued a stop signal.  Our
597 		 * caller might release the siglock and then the pending
598 		 * stop signal it is about to process is no longer in the
599 		 * pending bitmasks, but must still be cleared by a SIGCONT
600 		 * (and overruled by a SIGKILL).  So those cases clear this
601 		 * shared flag after we've set it.  Note that this flag may
602 		 * remain set after the signal we return is ignored or
603 		 * handled.  That doesn't matter because its only purpose
604 		 * is to alert stop-signal processing code when another
605 		 * processor has come along and cleared the flag.
606 		 */
607 		current->group_stop |= GROUP_STOP_DEQUEUED;
608 	}
609 	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
610 		/*
611 		 * Release the siglock to ensure proper locking order
612 		 * of timer locks outside of siglocks.  Note, we leave
613 		 * irqs disabled here, since the posix-timers code is
614 		 * about to disable them again anyway.
615 		 */
616 		spin_unlock(&tsk->sighand->siglock);
617 		do_schedule_next_timer(info);
618 		spin_lock(&tsk->sighand->siglock);
619 	}
620 	return signr;
621 }
622 
623 /*
624  * Tell a process that it has a new active signal..
625  *
626  * NOTE! we rely on the previous spin_lock to
627  * lock interrupts for us! We can only be called with
628  * "siglock" held, and the local interrupt must
629  * have been disabled when that got acquired!
630  *
631  * No need to set need_resched since signal event passing
632  * goes through ->blocked
633  */
634 void signal_wake_up(struct task_struct *t, int resume)
635 {
636 	unsigned int mask;
637 
638 	set_tsk_thread_flag(t, TIF_SIGPENDING);
639 
640 	/*
641 	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
642 	 * case. We don't check t->state here because there is a race with it
643 	 * executing another processor and just now entering stopped state.
644 	 * By using wake_up_state, we ensure the process will wake up and
645 	 * handle its death signal.
646 	 */
647 	mask = TASK_INTERRUPTIBLE;
648 	if (resume)
649 		mask |= TASK_WAKEKILL;
650 	if (!wake_up_state(t, mask))
651 		kick_process(t);
652 }
653 
654 /*
655  * Remove signals in mask from the pending set and queue.
656  * Returns 1 if any signals were found.
657  *
658  * All callers must be holding the siglock.
659  *
660  * This version takes a sigset mask and looks at all signals,
661  * not just those in the first mask word.
662  */
663 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
664 {
665 	struct sigqueue *q, *n;
666 	sigset_t m;
667 
668 	sigandsets(&m, mask, &s->signal);
669 	if (sigisemptyset(&m))
670 		return 0;
671 
672 	sigandnsets(&s->signal, &s->signal, mask);
673 	list_for_each_entry_safe(q, n, &s->list, list) {
674 		if (sigismember(mask, q->info.si_signo)) {
675 			list_del_init(&q->list);
676 			__sigqueue_free(q);
677 		}
678 	}
679 	return 1;
680 }
681 /*
682  * Remove signals in mask from the pending set and queue.
683  * Returns 1 if any signals were found.
684  *
685  * All callers must be holding the siglock.
686  */
687 static int rm_from_queue(unsigned long mask, struct sigpending *s)
688 {
689 	struct sigqueue *q, *n;
690 
691 	if (!sigtestsetmask(&s->signal, mask))
692 		return 0;
693 
694 	sigdelsetmask(&s->signal, mask);
695 	list_for_each_entry_safe(q, n, &s->list, list) {
696 		if (q->info.si_signo < SIGRTMIN &&
697 		    (mask & sigmask(q->info.si_signo))) {
698 			list_del_init(&q->list);
699 			__sigqueue_free(q);
700 		}
701 	}
702 	return 1;
703 }
704 
705 static inline int is_si_special(const struct siginfo *info)
706 {
707 	return info <= SEND_SIG_FORCED;
708 }
709 
710 static inline bool si_fromuser(const struct siginfo *info)
711 {
712 	return info == SEND_SIG_NOINFO ||
713 		(!is_si_special(info) && SI_FROMUSER(info));
714 }
715 
716 /*
717  * called with RCU read lock from check_kill_permission()
718  */
719 static int kill_ok_by_cred(struct task_struct *t)
720 {
721 	const struct cred *cred = current_cred();
722 	const struct cred *tcred = __task_cred(t);
723 
724 	if (cred->user->user_ns == tcred->user->user_ns &&
725 	    (cred->euid == tcred->suid ||
726 	     cred->euid == tcred->uid ||
727 	     cred->uid  == tcred->suid ||
728 	     cred->uid  == tcred->uid))
729 		return 1;
730 
731 	if (ns_capable(tcred->user->user_ns, CAP_KILL))
732 		return 1;
733 
734 	return 0;
735 }
736 
737 /*
738  * Bad permissions for sending the signal
739  * - the caller must hold the RCU read lock
740  */
741 static int check_kill_permission(int sig, struct siginfo *info,
742 				 struct task_struct *t)
743 {
744 	struct pid *sid;
745 	int error;
746 
747 	if (!valid_signal(sig))
748 		return -EINVAL;
749 
750 	if (!si_fromuser(info))
751 		return 0;
752 
753 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
754 	if (error)
755 		return error;
756 
757 	if (!same_thread_group(current, t) &&
758 	    !kill_ok_by_cred(t)) {
759 		switch (sig) {
760 		case SIGCONT:
761 			sid = task_session(t);
762 			/*
763 			 * We don't return the error if sid == NULL. The
764 			 * task was unhashed, the caller must notice this.
765 			 */
766 			if (!sid || sid == task_session(current))
767 				break;
768 		default:
769 			return -EPERM;
770 		}
771 	}
772 
773 	return security_task_kill(t, info, sig, 0);
774 }
775 
776 /*
777  * Handle magic process-wide effects of stop/continue signals. Unlike
778  * the signal actions, these happen immediately at signal-generation
779  * time regardless of blocking, ignoring, or handling.  This does the
780  * actual continuing for SIGCONT, but not the actual stopping for stop
781  * signals. The process stop is done as a signal action for SIG_DFL.
782  *
783  * Returns true if the signal should be actually delivered, otherwise
784  * it should be dropped.
785  */
786 static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
787 {
788 	struct signal_struct *signal = p->signal;
789 	struct task_struct *t;
790 
791 	if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
792 		/*
793 		 * The process is in the middle of dying, nothing to do.
794 		 */
795 	} else if (sig_kernel_stop(sig)) {
796 		/*
797 		 * This is a stop signal.  Remove SIGCONT from all queues.
798 		 */
799 		rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
800 		t = p;
801 		do {
802 			rm_from_queue(sigmask(SIGCONT), &t->pending);
803 		} while_each_thread(p, t);
804 	} else if (sig == SIGCONT) {
805 		unsigned int why;
806 		/*
807 		 * Remove all stop signals from all queues, wake all threads.
808 		 */
809 		rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
810 		t = p;
811 		do {
812 			task_clear_group_stop_pending(t);
813 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
814 			wake_up_state(t, __TASK_STOPPED);
815 		} while_each_thread(p, t);
816 
817 		/*
818 		 * Notify the parent with CLD_CONTINUED if we were stopped.
819 		 *
820 		 * If we were in the middle of a group stop, we pretend it
821 		 * was already finished, and then continued. Since SIGCHLD
822 		 * doesn't queue we report only CLD_STOPPED, as if the next
823 		 * CLD_CONTINUED was dropped.
824 		 */
825 		why = 0;
826 		if (signal->flags & SIGNAL_STOP_STOPPED)
827 			why |= SIGNAL_CLD_CONTINUED;
828 		else if (signal->group_stop_count)
829 			why |= SIGNAL_CLD_STOPPED;
830 
831 		if (why) {
832 			/*
833 			 * The first thread which returns from do_signal_stop()
834 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
835 			 * notify its parent. See get_signal_to_deliver().
836 			 */
837 			signal->flags = why | SIGNAL_STOP_CONTINUED;
838 			signal->group_stop_count = 0;
839 			signal->group_exit_code = 0;
840 		}
841 	}
842 
843 	return !sig_ignored(p, sig, from_ancestor_ns);
844 }
845 
846 /*
847  * Test if P wants to take SIG.  After we've checked all threads with this,
848  * it's equivalent to finding no threads not blocking SIG.  Any threads not
849  * blocking SIG were ruled out because they are not running and already
850  * have pending signals.  Such threads will dequeue from the shared queue
851  * as soon as they're available, so putting the signal on the shared queue
852  * will be equivalent to sending it to one such thread.
853  */
854 static inline int wants_signal(int sig, struct task_struct *p)
855 {
856 	if (sigismember(&p->blocked, sig))
857 		return 0;
858 	if (p->flags & PF_EXITING)
859 		return 0;
860 	if (sig == SIGKILL)
861 		return 1;
862 	if (task_is_stopped_or_traced(p))
863 		return 0;
864 	return task_curr(p) || !signal_pending(p);
865 }
866 
867 static void complete_signal(int sig, struct task_struct *p, int group)
868 {
869 	struct signal_struct *signal = p->signal;
870 	struct task_struct *t;
871 
872 	/*
873 	 * Now find a thread we can wake up to take the signal off the queue.
874 	 *
875 	 * If the main thread wants the signal, it gets first crack.
876 	 * Probably the least surprising to the average bear.
877 	 */
878 	if (wants_signal(sig, p))
879 		t = p;
880 	else if (!group || thread_group_empty(p))
881 		/*
882 		 * There is just one thread and it does not need to be woken.
883 		 * It will dequeue unblocked signals before it runs again.
884 		 */
885 		return;
886 	else {
887 		/*
888 		 * Otherwise try to find a suitable thread.
889 		 */
890 		t = signal->curr_target;
891 		while (!wants_signal(sig, t)) {
892 			t = next_thread(t);
893 			if (t == signal->curr_target)
894 				/*
895 				 * No thread needs to be woken.
896 				 * Any eligible threads will see
897 				 * the signal in the queue soon.
898 				 */
899 				return;
900 		}
901 		signal->curr_target = t;
902 	}
903 
904 	/*
905 	 * Found a killable thread.  If the signal will be fatal,
906 	 * then start taking the whole group down immediately.
907 	 */
908 	if (sig_fatal(p, sig) &&
909 	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
910 	    !sigismember(&t->real_blocked, sig) &&
911 	    (sig == SIGKILL ||
912 	     !tracehook_consider_fatal_signal(t, sig))) {
913 		/*
914 		 * This signal will be fatal to the whole group.
915 		 */
916 		if (!sig_kernel_coredump(sig)) {
917 			/*
918 			 * Start a group exit and wake everybody up.
919 			 * This way we don't have other threads
920 			 * running and doing things after a slower
921 			 * thread has the fatal signal pending.
922 			 */
923 			signal->flags = SIGNAL_GROUP_EXIT;
924 			signal->group_exit_code = sig;
925 			signal->group_stop_count = 0;
926 			t = p;
927 			do {
928 				task_clear_group_stop_pending(t);
929 				sigaddset(&t->pending.signal, SIGKILL);
930 				signal_wake_up(t, 1);
931 			} while_each_thread(p, t);
932 			return;
933 		}
934 	}
935 
936 	/*
937 	 * The signal is already in the shared-pending queue.
938 	 * Tell the chosen thread to wake up and dequeue it.
939 	 */
940 	signal_wake_up(t, sig == SIGKILL);
941 	return;
942 }
943 
944 static inline int legacy_queue(struct sigpending *signals, int sig)
945 {
946 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
947 }
948 
949 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
950 			int group, int from_ancestor_ns)
951 {
952 	struct sigpending *pending;
953 	struct sigqueue *q;
954 	int override_rlimit;
955 
956 	trace_signal_generate(sig, info, t);
957 
958 	assert_spin_locked(&t->sighand->siglock);
959 
960 	if (!prepare_signal(sig, t, from_ancestor_ns))
961 		return 0;
962 
963 	pending = group ? &t->signal->shared_pending : &t->pending;
964 	/*
965 	 * Short-circuit ignored signals and support queuing
966 	 * exactly one non-rt signal, so that we can get more
967 	 * detailed information about the cause of the signal.
968 	 */
969 	if (legacy_queue(pending, sig))
970 		return 0;
971 	/*
972 	 * fast-pathed signals for kernel-internal things like SIGSTOP
973 	 * or SIGKILL.
974 	 */
975 	if (info == SEND_SIG_FORCED)
976 		goto out_set;
977 
978 	/*
979 	 * Real-time signals must be queued if sent by sigqueue, or
980 	 * some other real-time mechanism.  It is implementation
981 	 * defined whether kill() does so.  We attempt to do so, on
982 	 * the principle of least surprise, but since kill is not
983 	 * allowed to fail with EAGAIN when low on memory we just
984 	 * make sure at least one signal gets delivered and don't
985 	 * pass on the info struct.
986 	 */
987 	if (sig < SIGRTMIN)
988 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
989 	else
990 		override_rlimit = 0;
991 
992 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
993 		override_rlimit);
994 	if (q) {
995 		list_add_tail(&q->list, &pending->list);
996 		switch ((unsigned long) info) {
997 		case (unsigned long) SEND_SIG_NOINFO:
998 			q->info.si_signo = sig;
999 			q->info.si_errno = 0;
1000 			q->info.si_code = SI_USER;
1001 			q->info.si_pid = task_tgid_nr_ns(current,
1002 							task_active_pid_ns(t));
1003 			q->info.si_uid = current_uid();
1004 			break;
1005 		case (unsigned long) SEND_SIG_PRIV:
1006 			q->info.si_signo = sig;
1007 			q->info.si_errno = 0;
1008 			q->info.si_code = SI_KERNEL;
1009 			q->info.si_pid = 0;
1010 			q->info.si_uid = 0;
1011 			break;
1012 		default:
1013 			copy_siginfo(&q->info, info);
1014 			if (from_ancestor_ns)
1015 				q->info.si_pid = 0;
1016 			break;
1017 		}
1018 	} else if (!is_si_special(info)) {
1019 		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1020 			/*
1021 			 * Queue overflow, abort.  We may abort if the
1022 			 * signal was rt and sent by user using something
1023 			 * other than kill().
1024 			 */
1025 			trace_signal_overflow_fail(sig, group, info);
1026 			return -EAGAIN;
1027 		} else {
1028 			/*
1029 			 * This is a silent loss of information.  We still
1030 			 * send the signal, but the *info bits are lost.
1031 			 */
1032 			trace_signal_lose_info(sig, group, info);
1033 		}
1034 	}
1035 
1036 out_set:
1037 	signalfd_notify(t, sig);
1038 	sigaddset(&pending->signal, sig);
1039 	complete_signal(sig, t, group);
1040 	return 0;
1041 }
1042 
1043 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1044 			int group)
1045 {
1046 	int from_ancestor_ns = 0;
1047 
1048 #ifdef CONFIG_PID_NS
1049 	from_ancestor_ns = si_fromuser(info) &&
1050 			   !task_pid_nr_ns(current, task_active_pid_ns(t));
1051 #endif
1052 
1053 	return __send_signal(sig, info, t, group, from_ancestor_ns);
1054 }
1055 
1056 static void print_fatal_signal(struct pt_regs *regs, int signr)
1057 {
1058 	printk("%s/%d: potentially unexpected fatal signal %d.\n",
1059 		current->comm, task_pid_nr(current), signr);
1060 
1061 #if defined(__i386__) && !defined(__arch_um__)
1062 	printk("code at %08lx: ", regs->ip);
1063 	{
1064 		int i;
1065 		for (i = 0; i < 16; i++) {
1066 			unsigned char insn;
1067 
1068 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1069 				break;
1070 			printk("%02x ", insn);
1071 		}
1072 	}
1073 #endif
1074 	printk("\n");
1075 	preempt_disable();
1076 	show_regs(regs);
1077 	preempt_enable();
1078 }
1079 
1080 static int __init setup_print_fatal_signals(char *str)
1081 {
1082 	get_option (&str, &print_fatal_signals);
1083 
1084 	return 1;
1085 }
1086 
1087 __setup("print-fatal-signals=", setup_print_fatal_signals);
1088 
1089 int
1090 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1091 {
1092 	return send_signal(sig, info, p, 1);
1093 }
1094 
1095 static int
1096 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1097 {
1098 	return send_signal(sig, info, t, 0);
1099 }
1100 
1101 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1102 			bool group)
1103 {
1104 	unsigned long flags;
1105 	int ret = -ESRCH;
1106 
1107 	if (lock_task_sighand(p, &flags)) {
1108 		ret = send_signal(sig, info, p, group);
1109 		unlock_task_sighand(p, &flags);
1110 	}
1111 
1112 	return ret;
1113 }
1114 
1115 /*
1116  * Force a signal that the process can't ignore: if necessary
1117  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1118  *
1119  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1120  * since we do not want to have a signal handler that was blocked
1121  * be invoked when user space had explicitly blocked it.
1122  *
1123  * We don't want to have recursive SIGSEGV's etc, for example,
1124  * that is why we also clear SIGNAL_UNKILLABLE.
1125  */
1126 int
1127 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1128 {
1129 	unsigned long int flags;
1130 	int ret, blocked, ignored;
1131 	struct k_sigaction *action;
1132 
1133 	spin_lock_irqsave(&t->sighand->siglock, flags);
1134 	action = &t->sighand->action[sig-1];
1135 	ignored = action->sa.sa_handler == SIG_IGN;
1136 	blocked = sigismember(&t->blocked, sig);
1137 	if (blocked || ignored) {
1138 		action->sa.sa_handler = SIG_DFL;
1139 		if (blocked) {
1140 			sigdelset(&t->blocked, sig);
1141 			recalc_sigpending_and_wake(t);
1142 		}
1143 	}
1144 	if (action->sa.sa_handler == SIG_DFL)
1145 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1146 	ret = specific_send_sig_info(sig, info, t);
1147 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1148 
1149 	return ret;
1150 }
1151 
1152 /*
1153  * Nuke all other threads in the group.
1154  */
1155 int zap_other_threads(struct task_struct *p)
1156 {
1157 	struct task_struct *t = p;
1158 	int count = 0;
1159 
1160 	p->signal->group_stop_count = 0;
1161 
1162 	while_each_thread(p, t) {
1163 		task_clear_group_stop_pending(t);
1164 		count++;
1165 
1166 		/* Don't bother with already dead threads */
1167 		if (t->exit_state)
1168 			continue;
1169 		sigaddset(&t->pending.signal, SIGKILL);
1170 		signal_wake_up(t, 1);
1171 	}
1172 
1173 	return count;
1174 }
1175 
1176 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1177 					   unsigned long *flags)
1178 {
1179 	struct sighand_struct *sighand;
1180 
1181 	for (;;) {
1182 		local_irq_save(*flags);
1183 		rcu_read_lock();
1184 		sighand = rcu_dereference(tsk->sighand);
1185 		if (unlikely(sighand == NULL)) {
1186 			rcu_read_unlock();
1187 			local_irq_restore(*flags);
1188 			break;
1189 		}
1190 
1191 		spin_lock(&sighand->siglock);
1192 		if (likely(sighand == tsk->sighand)) {
1193 			rcu_read_unlock();
1194 			break;
1195 		}
1196 		spin_unlock(&sighand->siglock);
1197 		rcu_read_unlock();
1198 		local_irq_restore(*flags);
1199 	}
1200 
1201 	return sighand;
1202 }
1203 
1204 /*
1205  * send signal info to all the members of a group
1206  */
1207 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1208 {
1209 	int ret;
1210 
1211 	rcu_read_lock();
1212 	ret = check_kill_permission(sig, info, p);
1213 	rcu_read_unlock();
1214 
1215 	if (!ret && sig)
1216 		ret = do_send_sig_info(sig, info, p, true);
1217 
1218 	return ret;
1219 }
1220 
1221 /*
1222  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1223  * control characters do (^C, ^Z etc)
1224  * - the caller must hold at least a readlock on tasklist_lock
1225  */
1226 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1227 {
1228 	struct task_struct *p = NULL;
1229 	int retval, success;
1230 
1231 	success = 0;
1232 	retval = -ESRCH;
1233 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1234 		int err = group_send_sig_info(sig, info, p);
1235 		success |= !err;
1236 		retval = err;
1237 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1238 	return success ? 0 : retval;
1239 }
1240 
1241 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1242 {
1243 	int error = -ESRCH;
1244 	struct task_struct *p;
1245 
1246 	rcu_read_lock();
1247 retry:
1248 	p = pid_task(pid, PIDTYPE_PID);
1249 	if (p) {
1250 		error = group_send_sig_info(sig, info, p);
1251 		if (unlikely(error == -ESRCH))
1252 			/*
1253 			 * The task was unhashed in between, try again.
1254 			 * If it is dead, pid_task() will return NULL,
1255 			 * if we race with de_thread() it will find the
1256 			 * new leader.
1257 			 */
1258 			goto retry;
1259 	}
1260 	rcu_read_unlock();
1261 
1262 	return error;
1263 }
1264 
1265 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1266 {
1267 	int error;
1268 	rcu_read_lock();
1269 	error = kill_pid_info(sig, info, find_vpid(pid));
1270 	rcu_read_unlock();
1271 	return error;
1272 }
1273 
1274 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1275 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1276 		      uid_t uid, uid_t euid, u32 secid)
1277 {
1278 	int ret = -EINVAL;
1279 	struct task_struct *p;
1280 	const struct cred *pcred;
1281 	unsigned long flags;
1282 
1283 	if (!valid_signal(sig))
1284 		return ret;
1285 
1286 	rcu_read_lock();
1287 	p = pid_task(pid, PIDTYPE_PID);
1288 	if (!p) {
1289 		ret = -ESRCH;
1290 		goto out_unlock;
1291 	}
1292 	pcred = __task_cred(p);
1293 	if (si_fromuser(info) &&
1294 	    euid != pcred->suid && euid != pcred->uid &&
1295 	    uid  != pcred->suid && uid  != pcred->uid) {
1296 		ret = -EPERM;
1297 		goto out_unlock;
1298 	}
1299 	ret = security_task_kill(p, info, sig, secid);
1300 	if (ret)
1301 		goto out_unlock;
1302 
1303 	if (sig) {
1304 		if (lock_task_sighand(p, &flags)) {
1305 			ret = __send_signal(sig, info, p, 1, 0);
1306 			unlock_task_sighand(p, &flags);
1307 		} else
1308 			ret = -ESRCH;
1309 	}
1310 out_unlock:
1311 	rcu_read_unlock();
1312 	return ret;
1313 }
1314 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1315 
1316 /*
1317  * kill_something_info() interprets pid in interesting ways just like kill(2).
1318  *
1319  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1320  * is probably wrong.  Should make it like BSD or SYSV.
1321  */
1322 
1323 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1324 {
1325 	int ret;
1326 
1327 	if (pid > 0) {
1328 		rcu_read_lock();
1329 		ret = kill_pid_info(sig, info, find_vpid(pid));
1330 		rcu_read_unlock();
1331 		return ret;
1332 	}
1333 
1334 	read_lock(&tasklist_lock);
1335 	if (pid != -1) {
1336 		ret = __kill_pgrp_info(sig, info,
1337 				pid ? find_vpid(-pid) : task_pgrp(current));
1338 	} else {
1339 		int retval = 0, count = 0;
1340 		struct task_struct * p;
1341 
1342 		for_each_process(p) {
1343 			if (task_pid_vnr(p) > 1 &&
1344 					!same_thread_group(p, current)) {
1345 				int err = group_send_sig_info(sig, info, p);
1346 				++count;
1347 				if (err != -EPERM)
1348 					retval = err;
1349 			}
1350 		}
1351 		ret = count ? retval : -ESRCH;
1352 	}
1353 	read_unlock(&tasklist_lock);
1354 
1355 	return ret;
1356 }
1357 
1358 /*
1359  * These are for backward compatibility with the rest of the kernel source.
1360  */
1361 
1362 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1363 {
1364 	/*
1365 	 * Make sure legacy kernel users don't send in bad values
1366 	 * (normal paths check this in check_kill_permission).
1367 	 */
1368 	if (!valid_signal(sig))
1369 		return -EINVAL;
1370 
1371 	return do_send_sig_info(sig, info, p, false);
1372 }
1373 
1374 #define __si_special(priv) \
1375 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1376 
1377 int
1378 send_sig(int sig, struct task_struct *p, int priv)
1379 {
1380 	return send_sig_info(sig, __si_special(priv), p);
1381 }
1382 
1383 void
1384 force_sig(int sig, struct task_struct *p)
1385 {
1386 	force_sig_info(sig, SEND_SIG_PRIV, p);
1387 }
1388 
1389 /*
1390  * When things go south during signal handling, we
1391  * will force a SIGSEGV. And if the signal that caused
1392  * the problem was already a SIGSEGV, we'll want to
1393  * make sure we don't even try to deliver the signal..
1394  */
1395 int
1396 force_sigsegv(int sig, struct task_struct *p)
1397 {
1398 	if (sig == SIGSEGV) {
1399 		unsigned long flags;
1400 		spin_lock_irqsave(&p->sighand->siglock, flags);
1401 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1402 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1403 	}
1404 	force_sig(SIGSEGV, p);
1405 	return 0;
1406 }
1407 
1408 int kill_pgrp(struct pid *pid, int sig, int priv)
1409 {
1410 	int ret;
1411 
1412 	read_lock(&tasklist_lock);
1413 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1414 	read_unlock(&tasklist_lock);
1415 
1416 	return ret;
1417 }
1418 EXPORT_SYMBOL(kill_pgrp);
1419 
1420 int kill_pid(struct pid *pid, int sig, int priv)
1421 {
1422 	return kill_pid_info(sig, __si_special(priv), pid);
1423 }
1424 EXPORT_SYMBOL(kill_pid);
1425 
1426 /*
1427  * These functions support sending signals using preallocated sigqueue
1428  * structures.  This is needed "because realtime applications cannot
1429  * afford to lose notifications of asynchronous events, like timer
1430  * expirations or I/O completions".  In the case of POSIX Timers
1431  * we allocate the sigqueue structure from the timer_create.  If this
1432  * allocation fails we are able to report the failure to the application
1433  * with an EAGAIN error.
1434  */
1435 struct sigqueue *sigqueue_alloc(void)
1436 {
1437 	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1438 
1439 	if (q)
1440 		q->flags |= SIGQUEUE_PREALLOC;
1441 
1442 	return q;
1443 }
1444 
1445 void sigqueue_free(struct sigqueue *q)
1446 {
1447 	unsigned long flags;
1448 	spinlock_t *lock = &current->sighand->siglock;
1449 
1450 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1451 	/*
1452 	 * We must hold ->siglock while testing q->list
1453 	 * to serialize with collect_signal() or with
1454 	 * __exit_signal()->flush_sigqueue().
1455 	 */
1456 	spin_lock_irqsave(lock, flags);
1457 	q->flags &= ~SIGQUEUE_PREALLOC;
1458 	/*
1459 	 * If it is queued it will be freed when dequeued,
1460 	 * like the "regular" sigqueue.
1461 	 */
1462 	if (!list_empty(&q->list))
1463 		q = NULL;
1464 	spin_unlock_irqrestore(lock, flags);
1465 
1466 	if (q)
1467 		__sigqueue_free(q);
1468 }
1469 
1470 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1471 {
1472 	int sig = q->info.si_signo;
1473 	struct sigpending *pending;
1474 	unsigned long flags;
1475 	int ret;
1476 
1477 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1478 
1479 	ret = -1;
1480 	if (!likely(lock_task_sighand(t, &flags)))
1481 		goto ret;
1482 
1483 	ret = 1; /* the signal is ignored */
1484 	if (!prepare_signal(sig, t, 0))
1485 		goto out;
1486 
1487 	ret = 0;
1488 	if (unlikely(!list_empty(&q->list))) {
1489 		/*
1490 		 * If an SI_TIMER entry is already queue just increment
1491 		 * the overrun count.
1492 		 */
1493 		BUG_ON(q->info.si_code != SI_TIMER);
1494 		q->info.si_overrun++;
1495 		goto out;
1496 	}
1497 	q->info.si_overrun = 0;
1498 
1499 	signalfd_notify(t, sig);
1500 	pending = group ? &t->signal->shared_pending : &t->pending;
1501 	list_add_tail(&q->list, &pending->list);
1502 	sigaddset(&pending->signal, sig);
1503 	complete_signal(sig, t, group);
1504 out:
1505 	unlock_task_sighand(t, &flags);
1506 ret:
1507 	return ret;
1508 }
1509 
1510 /*
1511  * Let a parent know about the death of a child.
1512  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1513  *
1514  * Returns -1 if our parent ignored us and so we've switched to
1515  * self-reaping, or else @sig.
1516  */
1517 int do_notify_parent(struct task_struct *tsk, int sig)
1518 {
1519 	struct siginfo info;
1520 	unsigned long flags;
1521 	struct sighand_struct *psig;
1522 	int ret = sig;
1523 
1524 	BUG_ON(sig == -1);
1525 
1526  	/* do_notify_parent_cldstop should have been called instead.  */
1527  	BUG_ON(task_is_stopped_or_traced(tsk));
1528 
1529 	BUG_ON(!task_ptrace(tsk) &&
1530 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1531 
1532 	info.si_signo = sig;
1533 	info.si_errno = 0;
1534 	/*
1535 	 * we are under tasklist_lock here so our parent is tied to
1536 	 * us and cannot exit and release its namespace.
1537 	 *
1538 	 * the only it can is to switch its nsproxy with sys_unshare,
1539 	 * bu uncharing pid namespaces is not allowed, so we'll always
1540 	 * see relevant namespace
1541 	 *
1542 	 * write_lock() currently calls preempt_disable() which is the
1543 	 * same as rcu_read_lock(), but according to Oleg, this is not
1544 	 * correct to rely on this
1545 	 */
1546 	rcu_read_lock();
1547 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1548 	info.si_uid = __task_cred(tsk)->uid;
1549 	rcu_read_unlock();
1550 
1551 	info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1552 				tsk->signal->utime));
1553 	info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1554 				tsk->signal->stime));
1555 
1556 	info.si_status = tsk->exit_code & 0x7f;
1557 	if (tsk->exit_code & 0x80)
1558 		info.si_code = CLD_DUMPED;
1559 	else if (tsk->exit_code & 0x7f)
1560 		info.si_code = CLD_KILLED;
1561 	else {
1562 		info.si_code = CLD_EXITED;
1563 		info.si_status = tsk->exit_code >> 8;
1564 	}
1565 
1566 	psig = tsk->parent->sighand;
1567 	spin_lock_irqsave(&psig->siglock, flags);
1568 	if (!task_ptrace(tsk) && sig == SIGCHLD &&
1569 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1570 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1571 		/*
1572 		 * We are exiting and our parent doesn't care.  POSIX.1
1573 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1574 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1575 		 * automatically and not left for our parent's wait4 call.
1576 		 * Rather than having the parent do it as a magic kind of
1577 		 * signal handler, we just set this to tell do_exit that we
1578 		 * can be cleaned up without becoming a zombie.  Note that
1579 		 * we still call __wake_up_parent in this case, because a
1580 		 * blocked sys_wait4 might now return -ECHILD.
1581 		 *
1582 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1583 		 * is implementation-defined: we do (if you don't want
1584 		 * it, just use SIG_IGN instead).
1585 		 */
1586 		ret = tsk->exit_signal = -1;
1587 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1588 			sig = -1;
1589 	}
1590 	if (valid_signal(sig) && sig > 0)
1591 		__group_send_sig_info(sig, &info, tsk->parent);
1592 	__wake_up_parent(tsk, tsk->parent);
1593 	spin_unlock_irqrestore(&psig->siglock, flags);
1594 
1595 	return ret;
1596 }
1597 
1598 /**
1599  * do_notify_parent_cldstop - notify parent of stopped/continued state change
1600  * @tsk: task reporting the state change
1601  * @for_ptracer: the notification is for ptracer
1602  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1603  *
1604  * Notify @tsk's parent that the stopped/continued state has changed.  If
1605  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1606  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1607  *
1608  * CONTEXT:
1609  * Must be called with tasklist_lock at least read locked.
1610  */
1611 static void do_notify_parent_cldstop(struct task_struct *tsk,
1612 				     bool for_ptracer, int why)
1613 {
1614 	struct siginfo info;
1615 	unsigned long flags;
1616 	struct task_struct *parent;
1617 	struct sighand_struct *sighand;
1618 
1619 	if (for_ptracer) {
1620 		parent = tsk->parent;
1621 	} else {
1622 		tsk = tsk->group_leader;
1623 		parent = tsk->real_parent;
1624 	}
1625 
1626 	info.si_signo = SIGCHLD;
1627 	info.si_errno = 0;
1628 	/*
1629 	 * see comment in do_notify_parent() about the following 4 lines
1630 	 */
1631 	rcu_read_lock();
1632 	info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1633 	info.si_uid = __task_cred(tsk)->uid;
1634 	rcu_read_unlock();
1635 
1636 	info.si_utime = cputime_to_clock_t(tsk->utime);
1637 	info.si_stime = cputime_to_clock_t(tsk->stime);
1638 
1639  	info.si_code = why;
1640  	switch (why) {
1641  	case CLD_CONTINUED:
1642  		info.si_status = SIGCONT;
1643  		break;
1644  	case CLD_STOPPED:
1645  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1646  		break;
1647  	case CLD_TRAPPED:
1648  		info.si_status = tsk->exit_code & 0x7f;
1649  		break;
1650  	default:
1651  		BUG();
1652  	}
1653 
1654 	sighand = parent->sighand;
1655 	spin_lock_irqsave(&sighand->siglock, flags);
1656 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1657 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1658 		__group_send_sig_info(SIGCHLD, &info, parent);
1659 	/*
1660 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1661 	 */
1662 	__wake_up_parent(tsk, parent);
1663 	spin_unlock_irqrestore(&sighand->siglock, flags);
1664 }
1665 
1666 static inline int may_ptrace_stop(void)
1667 {
1668 	if (!likely(task_ptrace(current)))
1669 		return 0;
1670 	/*
1671 	 * Are we in the middle of do_coredump?
1672 	 * If so and our tracer is also part of the coredump stopping
1673 	 * is a deadlock situation, and pointless because our tracer
1674 	 * is dead so don't allow us to stop.
1675 	 * If SIGKILL was already sent before the caller unlocked
1676 	 * ->siglock we must see ->core_state != NULL. Otherwise it
1677 	 * is safe to enter schedule().
1678 	 */
1679 	if (unlikely(current->mm->core_state) &&
1680 	    unlikely(current->mm == current->parent->mm))
1681 		return 0;
1682 
1683 	return 1;
1684 }
1685 
1686 /*
1687  * Return non-zero if there is a SIGKILL that should be waking us up.
1688  * Called with the siglock held.
1689  */
1690 static int sigkill_pending(struct task_struct *tsk)
1691 {
1692 	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1693 		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1694 }
1695 
1696 /*
1697  * Test whether the target task of the usual cldstop notification - the
1698  * real_parent of @child - is in the same group as the ptracer.
1699  */
1700 static bool real_parent_is_ptracer(struct task_struct *child)
1701 {
1702 	return same_thread_group(child->parent, child->real_parent);
1703 }
1704 
1705 /*
1706  * This must be called with current->sighand->siglock held.
1707  *
1708  * This should be the path for all ptrace stops.
1709  * We always set current->last_siginfo while stopped here.
1710  * That makes it a way to test a stopped process for
1711  * being ptrace-stopped vs being job-control-stopped.
1712  *
1713  * If we actually decide not to stop at all because the tracer
1714  * is gone, we keep current->exit_code unless clear_code.
1715  */
1716 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1717 	__releases(&current->sighand->siglock)
1718 	__acquires(&current->sighand->siglock)
1719 {
1720 	bool gstop_done = false;
1721 
1722 	if (arch_ptrace_stop_needed(exit_code, info)) {
1723 		/*
1724 		 * The arch code has something special to do before a
1725 		 * ptrace stop.  This is allowed to block, e.g. for faults
1726 		 * on user stack pages.  We can't keep the siglock while
1727 		 * calling arch_ptrace_stop, so we must release it now.
1728 		 * To preserve proper semantics, we must do this before
1729 		 * any signal bookkeeping like checking group_stop_count.
1730 		 * Meanwhile, a SIGKILL could come in before we retake the
1731 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1732 		 * So after regaining the lock, we must check for SIGKILL.
1733 		 */
1734 		spin_unlock_irq(&current->sighand->siglock);
1735 		arch_ptrace_stop(exit_code, info);
1736 		spin_lock_irq(&current->sighand->siglock);
1737 		if (sigkill_pending(current))
1738 			return;
1739 	}
1740 
1741 	/*
1742 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
1743 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
1744 	 * while siglock was released for the arch hook, PENDING could be
1745 	 * clear now.  We act as if SIGCONT is received after TASK_TRACED
1746 	 * is entered - ignore it.
1747 	 */
1748 	if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING))
1749 		gstop_done = task_participate_group_stop(current);
1750 
1751 	current->last_siginfo = info;
1752 	current->exit_code = exit_code;
1753 
1754 	/*
1755 	 * TRACED should be visible before TRAPPING is cleared; otherwise,
1756 	 * the tracer might fail do_wait().
1757 	 */
1758 	set_current_state(TASK_TRACED);
1759 
1760 	/*
1761 	 * We're committing to trapping.  Clearing GROUP_STOP_TRAPPING and
1762 	 * transition to TASK_TRACED should be atomic with respect to
1763 	 * siglock.  This hsould be done after the arch hook as siglock is
1764 	 * released and regrabbed across it.
1765 	 */
1766 	task_clear_group_stop_trapping(current);
1767 
1768 	spin_unlock_irq(&current->sighand->siglock);
1769 	read_lock(&tasklist_lock);
1770 	if (may_ptrace_stop()) {
1771 		/*
1772 		 * Notify parents of the stop.
1773 		 *
1774 		 * While ptraced, there are two parents - the ptracer and
1775 		 * the real_parent of the group_leader.  The ptracer should
1776 		 * know about every stop while the real parent is only
1777 		 * interested in the completion of group stop.  The states
1778 		 * for the two don't interact with each other.  Notify
1779 		 * separately unless they're gonna be duplicates.
1780 		 */
1781 		do_notify_parent_cldstop(current, true, why);
1782 		if (gstop_done && !real_parent_is_ptracer(current))
1783 			do_notify_parent_cldstop(current, false, why);
1784 
1785 		/*
1786 		 * Don't want to allow preemption here, because
1787 		 * sys_ptrace() needs this task to be inactive.
1788 		 *
1789 		 * XXX: implement read_unlock_no_resched().
1790 		 */
1791 		preempt_disable();
1792 		read_unlock(&tasklist_lock);
1793 		preempt_enable_no_resched();
1794 		schedule();
1795 	} else {
1796 		/*
1797 		 * By the time we got the lock, our tracer went away.
1798 		 * Don't drop the lock yet, another tracer may come.
1799 		 *
1800 		 * If @gstop_done, the ptracer went away between group stop
1801 		 * completion and here.  During detach, it would have set
1802 		 * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED
1803 		 * in do_signal_stop() on return, so notifying the real
1804 		 * parent of the group stop completion is enough.
1805 		 */
1806 		if (gstop_done)
1807 			do_notify_parent_cldstop(current, false, why);
1808 
1809 		__set_current_state(TASK_RUNNING);
1810 		if (clear_code)
1811 			current->exit_code = 0;
1812 		read_unlock(&tasklist_lock);
1813 	}
1814 
1815 	/*
1816 	 * While in TASK_TRACED, we were considered "frozen enough".
1817 	 * Now that we woke up, it's crucial if we're supposed to be
1818 	 * frozen that we freeze now before running anything substantial.
1819 	 */
1820 	try_to_freeze();
1821 
1822 	/*
1823 	 * We are back.  Now reacquire the siglock before touching
1824 	 * last_siginfo, so that we are sure to have synchronized with
1825 	 * any signal-sending on another CPU that wants to examine it.
1826 	 */
1827 	spin_lock_irq(&current->sighand->siglock);
1828 	current->last_siginfo = NULL;
1829 
1830 	/*
1831 	 * Queued signals ignored us while we were stopped for tracing.
1832 	 * So check for any that we should take before resuming user mode.
1833 	 * This sets TIF_SIGPENDING, but never clears it.
1834 	 */
1835 	recalc_sigpending_tsk(current);
1836 }
1837 
1838 void ptrace_notify(int exit_code)
1839 {
1840 	siginfo_t info;
1841 
1842 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1843 
1844 	memset(&info, 0, sizeof info);
1845 	info.si_signo = SIGTRAP;
1846 	info.si_code = exit_code;
1847 	info.si_pid = task_pid_vnr(current);
1848 	info.si_uid = current_uid();
1849 
1850 	/* Let the debugger run.  */
1851 	spin_lock_irq(&current->sighand->siglock);
1852 	ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
1853 	spin_unlock_irq(&current->sighand->siglock);
1854 }
1855 
1856 /*
1857  * This performs the stopping for SIGSTOP and other stop signals.
1858  * We have to stop all threads in the thread group.
1859  * Returns non-zero if we've actually stopped and released the siglock.
1860  * Returns zero if we didn't stop and still hold the siglock.
1861  */
1862 static int do_signal_stop(int signr)
1863 {
1864 	struct signal_struct *sig = current->signal;
1865 
1866 	if (!(current->group_stop & GROUP_STOP_PENDING)) {
1867 		unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
1868 		struct task_struct *t;
1869 
1870 		/* signr will be recorded in task->group_stop for retries */
1871 		WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK);
1872 
1873 		if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) ||
1874 		    unlikely(signal_group_exit(sig)))
1875 			return 0;
1876 		/*
1877 		 * There is no group stop already in progress.  We must
1878 		 * initiate one now.
1879 		 *
1880 		 * While ptraced, a task may be resumed while group stop is
1881 		 * still in effect and then receive a stop signal and
1882 		 * initiate another group stop.  This deviates from the
1883 		 * usual behavior as two consecutive stop signals can't
1884 		 * cause two group stops when !ptraced.  That is why we
1885 		 * also check !task_is_stopped(t) below.
1886 		 *
1887 		 * The condition can be distinguished by testing whether
1888 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
1889 		 * group_exit_code in such case.
1890 		 *
1891 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
1892 		 * an intervening stop signal is required to cause two
1893 		 * continued events regardless of ptrace.
1894 		 */
1895 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
1896 			sig->group_exit_code = signr;
1897 		else
1898 			WARN_ON_ONCE(!task_ptrace(current));
1899 
1900 		current->group_stop &= ~GROUP_STOP_SIGMASK;
1901 		current->group_stop |= signr | gstop;
1902 		sig->group_stop_count = 1;
1903 		for (t = next_thread(current); t != current;
1904 		     t = next_thread(t)) {
1905 			t->group_stop &= ~GROUP_STOP_SIGMASK;
1906 			/*
1907 			 * Setting state to TASK_STOPPED for a group
1908 			 * stop is always done with the siglock held,
1909 			 * so this check has no races.
1910 			 */
1911 			if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
1912 				t->group_stop |= signr | gstop;
1913 				sig->group_stop_count++;
1914 				signal_wake_up(t, 0);
1915 			}
1916 		}
1917 	}
1918 retry:
1919 	if (likely(!task_ptrace(current))) {
1920 		int notify = 0;
1921 
1922 		/*
1923 		 * If there are no other threads in the group, or if there
1924 		 * is a group stop in progress and we are the last to stop,
1925 		 * report to the parent.
1926 		 */
1927 		if (task_participate_group_stop(current))
1928 			notify = CLD_STOPPED;
1929 
1930 		__set_current_state(TASK_STOPPED);
1931 		spin_unlock_irq(&current->sighand->siglock);
1932 
1933 		/*
1934 		 * Notify the parent of the group stop completion.  Because
1935 		 * we're not holding either the siglock or tasklist_lock
1936 		 * here, ptracer may attach inbetween; however, this is for
1937 		 * group stop and should always be delivered to the real
1938 		 * parent of the group leader.  The new ptracer will get
1939 		 * its notification when this task transitions into
1940 		 * TASK_TRACED.
1941 		 */
1942 		if (notify) {
1943 			read_lock(&tasklist_lock);
1944 			do_notify_parent_cldstop(current, false, notify);
1945 			read_unlock(&tasklist_lock);
1946 		}
1947 
1948 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
1949 		schedule();
1950 
1951 		spin_lock_irq(&current->sighand->siglock);
1952 	} else {
1953 		ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK,
1954 			    CLD_STOPPED, 0, NULL);
1955 		current->exit_code = 0;
1956 	}
1957 
1958 	/*
1959 	 * GROUP_STOP_PENDING could be set if another group stop has
1960 	 * started since being woken up or ptrace wants us to transit
1961 	 * between TASK_STOPPED and TRACED.  Retry group stop.
1962 	 */
1963 	if (current->group_stop & GROUP_STOP_PENDING) {
1964 		WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK));
1965 		goto retry;
1966 	}
1967 
1968 	/* PTRACE_ATTACH might have raced with task killing, clear trapping */
1969 	task_clear_group_stop_trapping(current);
1970 
1971 	spin_unlock_irq(&current->sighand->siglock);
1972 
1973 	tracehook_finish_jctl();
1974 
1975 	return 1;
1976 }
1977 
1978 static int ptrace_signal(int signr, siginfo_t *info,
1979 			 struct pt_regs *regs, void *cookie)
1980 {
1981 	if (!task_ptrace(current))
1982 		return signr;
1983 
1984 	ptrace_signal_deliver(regs, cookie);
1985 
1986 	/* Let the debugger run.  */
1987 	ptrace_stop(signr, CLD_TRAPPED, 0, info);
1988 
1989 	/* We're back.  Did the debugger cancel the sig?  */
1990 	signr = current->exit_code;
1991 	if (signr == 0)
1992 		return signr;
1993 
1994 	current->exit_code = 0;
1995 
1996 	/*
1997 	 * Update the siginfo structure if the signal has
1998 	 * changed.  If the debugger wanted something
1999 	 * specific in the siginfo structure then it should
2000 	 * have updated *info via PTRACE_SETSIGINFO.
2001 	 */
2002 	if (signr != info->si_signo) {
2003 		info->si_signo = signr;
2004 		info->si_errno = 0;
2005 		info->si_code = SI_USER;
2006 		info->si_pid = task_pid_vnr(current->parent);
2007 		info->si_uid = task_uid(current->parent);
2008 	}
2009 
2010 	/* If the (new) signal is now blocked, requeue it.  */
2011 	if (sigismember(&current->blocked, signr)) {
2012 		specific_send_sig_info(signr, info, current);
2013 		signr = 0;
2014 	}
2015 
2016 	return signr;
2017 }
2018 
2019 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2020 			  struct pt_regs *regs, void *cookie)
2021 {
2022 	struct sighand_struct *sighand = current->sighand;
2023 	struct signal_struct *signal = current->signal;
2024 	int signr;
2025 
2026 relock:
2027 	/*
2028 	 * We'll jump back here after any time we were stopped in TASK_STOPPED.
2029 	 * While in TASK_STOPPED, we were considered "frozen enough".
2030 	 * Now that we woke up, it's crucial if we're supposed to be
2031 	 * frozen that we freeze now before running anything substantial.
2032 	 */
2033 	try_to_freeze();
2034 
2035 	spin_lock_irq(&sighand->siglock);
2036 	/*
2037 	 * Every stopped thread goes here after wakeup. Check to see if
2038 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2039 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2040 	 */
2041 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2042 		struct task_struct *leader;
2043 		int why;
2044 
2045 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2046 			why = CLD_CONTINUED;
2047 		else
2048 			why = CLD_STOPPED;
2049 
2050 		signal->flags &= ~SIGNAL_CLD_MASK;
2051 
2052 		spin_unlock_irq(&sighand->siglock);
2053 
2054 		/*
2055 		 * Notify the parent that we're continuing.  This event is
2056 		 * always per-process and doesn't make whole lot of sense
2057 		 * for ptracers, who shouldn't consume the state via
2058 		 * wait(2) either, but, for backward compatibility, notify
2059 		 * the ptracer of the group leader too unless it's gonna be
2060 		 * a duplicate.
2061 		 */
2062 		read_lock(&tasklist_lock);
2063 
2064 		do_notify_parent_cldstop(current, false, why);
2065 
2066 		leader = current->group_leader;
2067 		if (task_ptrace(leader) && !real_parent_is_ptracer(leader))
2068 			do_notify_parent_cldstop(leader, true, why);
2069 
2070 		read_unlock(&tasklist_lock);
2071 
2072 		goto relock;
2073 	}
2074 
2075 	for (;;) {
2076 		struct k_sigaction *ka;
2077 		/*
2078 		 * Tracing can induce an artificial signal and choose sigaction.
2079 		 * The return value in @signr determines the default action,
2080 		 * but @info->si_signo is the signal number we will report.
2081 		 */
2082 		signr = tracehook_get_signal(current, regs, info, return_ka);
2083 		if (unlikely(signr < 0))
2084 			goto relock;
2085 		if (unlikely(signr != 0))
2086 			ka = return_ka;
2087 		else {
2088 			if (unlikely(current->group_stop &
2089 				     GROUP_STOP_PENDING) && do_signal_stop(0))
2090 				goto relock;
2091 
2092 			signr = dequeue_signal(current, &current->blocked,
2093 					       info);
2094 
2095 			if (!signr)
2096 				break; /* will return 0 */
2097 
2098 			if (signr != SIGKILL) {
2099 				signr = ptrace_signal(signr, info,
2100 						      regs, cookie);
2101 				if (!signr)
2102 					continue;
2103 			}
2104 
2105 			ka = &sighand->action[signr-1];
2106 		}
2107 
2108 		/* Trace actually delivered signals. */
2109 		trace_signal_deliver(signr, info, ka);
2110 
2111 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2112 			continue;
2113 		if (ka->sa.sa_handler != SIG_DFL) {
2114 			/* Run the handler.  */
2115 			*return_ka = *ka;
2116 
2117 			if (ka->sa.sa_flags & SA_ONESHOT)
2118 				ka->sa.sa_handler = SIG_DFL;
2119 
2120 			break; /* will return non-zero "signr" value */
2121 		}
2122 
2123 		/*
2124 		 * Now we are doing the default action for this signal.
2125 		 */
2126 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2127 			continue;
2128 
2129 		/*
2130 		 * Global init gets no signals it doesn't want.
2131 		 * Container-init gets no signals it doesn't want from same
2132 		 * container.
2133 		 *
2134 		 * Note that if global/container-init sees a sig_kernel_only()
2135 		 * signal here, the signal must have been generated internally
2136 		 * or must have come from an ancestor namespace. In either
2137 		 * case, the signal cannot be dropped.
2138 		 */
2139 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2140 				!sig_kernel_only(signr))
2141 			continue;
2142 
2143 		if (sig_kernel_stop(signr)) {
2144 			/*
2145 			 * The default action is to stop all threads in
2146 			 * the thread group.  The job control signals
2147 			 * do nothing in an orphaned pgrp, but SIGSTOP
2148 			 * always works.  Note that siglock needs to be
2149 			 * dropped during the call to is_orphaned_pgrp()
2150 			 * because of lock ordering with tasklist_lock.
2151 			 * This allows an intervening SIGCONT to be posted.
2152 			 * We need to check for that and bail out if necessary.
2153 			 */
2154 			if (signr != SIGSTOP) {
2155 				spin_unlock_irq(&sighand->siglock);
2156 
2157 				/* signals can be posted during this window */
2158 
2159 				if (is_current_pgrp_orphaned())
2160 					goto relock;
2161 
2162 				spin_lock_irq(&sighand->siglock);
2163 			}
2164 
2165 			if (likely(do_signal_stop(info->si_signo))) {
2166 				/* It released the siglock.  */
2167 				goto relock;
2168 			}
2169 
2170 			/*
2171 			 * We didn't actually stop, due to a race
2172 			 * with SIGCONT or something like that.
2173 			 */
2174 			continue;
2175 		}
2176 
2177 		spin_unlock_irq(&sighand->siglock);
2178 
2179 		/*
2180 		 * Anything else is fatal, maybe with a core dump.
2181 		 */
2182 		current->flags |= PF_SIGNALED;
2183 
2184 		if (sig_kernel_coredump(signr)) {
2185 			if (print_fatal_signals)
2186 				print_fatal_signal(regs, info->si_signo);
2187 			/*
2188 			 * If it was able to dump core, this kills all
2189 			 * other threads in the group and synchronizes with
2190 			 * their demise.  If we lost the race with another
2191 			 * thread getting here, it set group_exit_code
2192 			 * first and our do_group_exit call below will use
2193 			 * that value and ignore the one we pass it.
2194 			 */
2195 			do_coredump(info->si_signo, info->si_signo, regs);
2196 		}
2197 
2198 		/*
2199 		 * Death signals, no core dump.
2200 		 */
2201 		do_group_exit(info->si_signo);
2202 		/* NOTREACHED */
2203 	}
2204 	spin_unlock_irq(&sighand->siglock);
2205 	return signr;
2206 }
2207 
2208 /*
2209  * It could be that complete_signal() picked us to notify about the
2210  * group-wide signal. Other threads should be notified now to take
2211  * the shared signals in @which since we will not.
2212  */
2213 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2214 {
2215 	sigset_t retarget;
2216 	struct task_struct *t;
2217 
2218 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2219 	if (sigisemptyset(&retarget))
2220 		return;
2221 
2222 	t = tsk;
2223 	while_each_thread(tsk, t) {
2224 		if (t->flags & PF_EXITING)
2225 			continue;
2226 
2227 		if (!has_pending_signals(&retarget, &t->blocked))
2228 			continue;
2229 		/* Remove the signals this thread can handle. */
2230 		sigandsets(&retarget, &retarget, &t->blocked);
2231 
2232 		if (!signal_pending(t))
2233 			signal_wake_up(t, 0);
2234 
2235 		if (sigisemptyset(&retarget))
2236 			break;
2237 	}
2238 }
2239 
2240 void exit_signals(struct task_struct *tsk)
2241 {
2242 	int group_stop = 0;
2243 	sigset_t unblocked;
2244 
2245 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2246 		tsk->flags |= PF_EXITING;
2247 		return;
2248 	}
2249 
2250 	spin_lock_irq(&tsk->sighand->siglock);
2251 	/*
2252 	 * From now this task is not visible for group-wide signals,
2253 	 * see wants_signal(), do_signal_stop().
2254 	 */
2255 	tsk->flags |= PF_EXITING;
2256 	if (!signal_pending(tsk))
2257 		goto out;
2258 
2259 	unblocked = tsk->blocked;
2260 	signotset(&unblocked);
2261 	retarget_shared_pending(tsk, &unblocked);
2262 
2263 	if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
2264 	    task_participate_group_stop(tsk))
2265 		group_stop = CLD_STOPPED;
2266 out:
2267 	spin_unlock_irq(&tsk->sighand->siglock);
2268 
2269 	/*
2270 	 * If group stop has completed, deliver the notification.  This
2271 	 * should always go to the real parent of the group leader.
2272 	 */
2273 	if (unlikely(group_stop)) {
2274 		read_lock(&tasklist_lock);
2275 		do_notify_parent_cldstop(tsk, false, group_stop);
2276 		read_unlock(&tasklist_lock);
2277 	}
2278 }
2279 
2280 EXPORT_SYMBOL(recalc_sigpending);
2281 EXPORT_SYMBOL_GPL(dequeue_signal);
2282 EXPORT_SYMBOL(flush_signals);
2283 EXPORT_SYMBOL(force_sig);
2284 EXPORT_SYMBOL(send_sig);
2285 EXPORT_SYMBOL(send_sig_info);
2286 EXPORT_SYMBOL(sigprocmask);
2287 EXPORT_SYMBOL(block_all_signals);
2288 EXPORT_SYMBOL(unblock_all_signals);
2289 
2290 
2291 /*
2292  * System call entry points.
2293  */
2294 
2295 /**
2296  *  sys_restart_syscall - restart a system call
2297  */
2298 SYSCALL_DEFINE0(restart_syscall)
2299 {
2300 	struct restart_block *restart = &current_thread_info()->restart_block;
2301 	return restart->fn(restart);
2302 }
2303 
2304 long do_no_restart_syscall(struct restart_block *param)
2305 {
2306 	return -EINTR;
2307 }
2308 
2309 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2310 {
2311 	if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2312 		sigset_t newblocked;
2313 		/* A set of now blocked but previously unblocked signals. */
2314 		sigandnsets(&newblocked, newset, &current->blocked);
2315 		retarget_shared_pending(tsk, &newblocked);
2316 	}
2317 	tsk->blocked = *newset;
2318 	recalc_sigpending();
2319 }
2320 
2321 /**
2322  * set_current_blocked - change current->blocked mask
2323  * @newset: new mask
2324  *
2325  * It is wrong to change ->blocked directly, this helper should be used
2326  * to ensure the process can't miss a shared signal we are going to block.
2327  */
2328 void set_current_blocked(const sigset_t *newset)
2329 {
2330 	struct task_struct *tsk = current;
2331 
2332 	spin_lock_irq(&tsk->sighand->siglock);
2333 	__set_task_blocked(tsk, newset);
2334 	spin_unlock_irq(&tsk->sighand->siglock);
2335 }
2336 
2337 /*
2338  * This is also useful for kernel threads that want to temporarily
2339  * (or permanently) block certain signals.
2340  *
2341  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2342  * interface happily blocks "unblockable" signals like SIGKILL
2343  * and friends.
2344  */
2345 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2346 {
2347 	struct task_struct *tsk = current;
2348 	sigset_t newset;
2349 
2350 	/* Lockless, only current can change ->blocked, never from irq */
2351 	if (oldset)
2352 		*oldset = tsk->blocked;
2353 
2354 	switch (how) {
2355 	case SIG_BLOCK:
2356 		sigorsets(&newset, &tsk->blocked, set);
2357 		break;
2358 	case SIG_UNBLOCK:
2359 		sigandnsets(&newset, &tsk->blocked, set);
2360 		break;
2361 	case SIG_SETMASK:
2362 		newset = *set;
2363 		break;
2364 	default:
2365 		return -EINVAL;
2366 	}
2367 
2368 	set_current_blocked(&newset);
2369 	return 0;
2370 }
2371 
2372 /**
2373  *  sys_rt_sigprocmask - change the list of currently blocked signals
2374  *  @how: whether to add, remove, or set signals
2375  *  @nset: stores pending signals
2376  *  @oset: previous value of signal mask if non-null
2377  *  @sigsetsize: size of sigset_t type
2378  */
2379 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2380 		sigset_t __user *, oset, size_t, sigsetsize)
2381 {
2382 	sigset_t old_set, new_set;
2383 	int error;
2384 
2385 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2386 	if (sigsetsize != sizeof(sigset_t))
2387 		return -EINVAL;
2388 
2389 	old_set = current->blocked;
2390 
2391 	if (nset) {
2392 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2393 			return -EFAULT;
2394 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2395 
2396 		error = sigprocmask(how, &new_set, NULL);
2397 		if (error)
2398 			return error;
2399 	}
2400 
2401 	if (oset) {
2402 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2403 			return -EFAULT;
2404 	}
2405 
2406 	return 0;
2407 }
2408 
2409 long do_sigpending(void __user *set, unsigned long sigsetsize)
2410 {
2411 	long error = -EINVAL;
2412 	sigset_t pending;
2413 
2414 	if (sigsetsize > sizeof(sigset_t))
2415 		goto out;
2416 
2417 	spin_lock_irq(&current->sighand->siglock);
2418 	sigorsets(&pending, &current->pending.signal,
2419 		  &current->signal->shared_pending.signal);
2420 	spin_unlock_irq(&current->sighand->siglock);
2421 
2422 	/* Outside the lock because only this thread touches it.  */
2423 	sigandsets(&pending, &current->blocked, &pending);
2424 
2425 	error = -EFAULT;
2426 	if (!copy_to_user(set, &pending, sigsetsize))
2427 		error = 0;
2428 
2429 out:
2430 	return error;
2431 }
2432 
2433 /**
2434  *  sys_rt_sigpending - examine a pending signal that has been raised
2435  *			while blocked
2436  *  @set: stores pending signals
2437  *  @sigsetsize: size of sigset_t type or larger
2438  */
2439 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2440 {
2441 	return do_sigpending(set, sigsetsize);
2442 }
2443 
2444 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2445 
2446 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2447 {
2448 	int err;
2449 
2450 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2451 		return -EFAULT;
2452 	if (from->si_code < 0)
2453 		return __copy_to_user(to, from, sizeof(siginfo_t))
2454 			? -EFAULT : 0;
2455 	/*
2456 	 * If you change siginfo_t structure, please be sure
2457 	 * this code is fixed accordingly.
2458 	 * Please remember to update the signalfd_copyinfo() function
2459 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2460 	 * It should never copy any pad contained in the structure
2461 	 * to avoid security leaks, but must copy the generic
2462 	 * 3 ints plus the relevant union member.
2463 	 */
2464 	err = __put_user(from->si_signo, &to->si_signo);
2465 	err |= __put_user(from->si_errno, &to->si_errno);
2466 	err |= __put_user((short)from->si_code, &to->si_code);
2467 	switch (from->si_code & __SI_MASK) {
2468 	case __SI_KILL:
2469 		err |= __put_user(from->si_pid, &to->si_pid);
2470 		err |= __put_user(from->si_uid, &to->si_uid);
2471 		break;
2472 	case __SI_TIMER:
2473 		 err |= __put_user(from->si_tid, &to->si_tid);
2474 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2475 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2476 		break;
2477 	case __SI_POLL:
2478 		err |= __put_user(from->si_band, &to->si_band);
2479 		err |= __put_user(from->si_fd, &to->si_fd);
2480 		break;
2481 	case __SI_FAULT:
2482 		err |= __put_user(from->si_addr, &to->si_addr);
2483 #ifdef __ARCH_SI_TRAPNO
2484 		err |= __put_user(from->si_trapno, &to->si_trapno);
2485 #endif
2486 #ifdef BUS_MCEERR_AO
2487 		/*
2488 		 * Other callers might not initialize the si_lsb field,
2489 		 * so check explicitly for the right codes here.
2490 		 */
2491 		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2492 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2493 #endif
2494 		break;
2495 	case __SI_CHLD:
2496 		err |= __put_user(from->si_pid, &to->si_pid);
2497 		err |= __put_user(from->si_uid, &to->si_uid);
2498 		err |= __put_user(from->si_status, &to->si_status);
2499 		err |= __put_user(from->si_utime, &to->si_utime);
2500 		err |= __put_user(from->si_stime, &to->si_stime);
2501 		break;
2502 	case __SI_RT: /* This is not generated by the kernel as of now. */
2503 	case __SI_MESGQ: /* But this is */
2504 		err |= __put_user(from->si_pid, &to->si_pid);
2505 		err |= __put_user(from->si_uid, &to->si_uid);
2506 		err |= __put_user(from->si_ptr, &to->si_ptr);
2507 		break;
2508 	default: /* this is just in case for now ... */
2509 		err |= __put_user(from->si_pid, &to->si_pid);
2510 		err |= __put_user(from->si_uid, &to->si_uid);
2511 		break;
2512 	}
2513 	return err;
2514 }
2515 
2516 #endif
2517 
2518 /**
2519  *  do_sigtimedwait - wait for queued signals specified in @which
2520  *  @which: queued signals to wait for
2521  *  @info: if non-null, the signal's siginfo is returned here
2522  *  @ts: upper bound on process time suspension
2523  */
2524 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2525 			const struct timespec *ts)
2526 {
2527 	struct task_struct *tsk = current;
2528 	long timeout = MAX_SCHEDULE_TIMEOUT;
2529 	sigset_t mask = *which;
2530 	int sig;
2531 
2532 	if (ts) {
2533 		if (!timespec_valid(ts))
2534 			return -EINVAL;
2535 		timeout = timespec_to_jiffies(ts);
2536 		/*
2537 		 * We can be close to the next tick, add another one
2538 		 * to ensure we will wait at least the time asked for.
2539 		 */
2540 		if (ts->tv_sec || ts->tv_nsec)
2541 			timeout++;
2542 	}
2543 
2544 	/*
2545 	 * Invert the set of allowed signals to get those we want to block.
2546 	 */
2547 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2548 	signotset(&mask);
2549 
2550 	spin_lock_irq(&tsk->sighand->siglock);
2551 	sig = dequeue_signal(tsk, &mask, info);
2552 	if (!sig && timeout) {
2553 		/*
2554 		 * None ready, temporarily unblock those we're interested
2555 		 * while we are sleeping in so that we'll be awakened when
2556 		 * they arrive. Unblocking is always fine, we can avoid
2557 		 * set_current_blocked().
2558 		 */
2559 		tsk->real_blocked = tsk->blocked;
2560 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2561 		recalc_sigpending();
2562 		spin_unlock_irq(&tsk->sighand->siglock);
2563 
2564 		timeout = schedule_timeout_interruptible(timeout);
2565 
2566 		spin_lock_irq(&tsk->sighand->siglock);
2567 		__set_task_blocked(tsk, &tsk->real_blocked);
2568 		siginitset(&tsk->real_blocked, 0);
2569 		sig = dequeue_signal(tsk, &mask, info);
2570 	}
2571 	spin_unlock_irq(&tsk->sighand->siglock);
2572 
2573 	if (sig)
2574 		return sig;
2575 	return timeout ? -EINTR : -EAGAIN;
2576 }
2577 
2578 /**
2579  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
2580  *			in @uthese
2581  *  @uthese: queued signals to wait for
2582  *  @uinfo: if non-null, the signal's siginfo is returned here
2583  *  @uts: upper bound on process time suspension
2584  *  @sigsetsize: size of sigset_t type
2585  */
2586 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2587 		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2588 		size_t, sigsetsize)
2589 {
2590 	sigset_t these;
2591 	struct timespec ts;
2592 	siginfo_t info;
2593 	int ret;
2594 
2595 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2596 	if (sigsetsize != sizeof(sigset_t))
2597 		return -EINVAL;
2598 
2599 	if (copy_from_user(&these, uthese, sizeof(these)))
2600 		return -EFAULT;
2601 
2602 	if (uts) {
2603 		if (copy_from_user(&ts, uts, sizeof(ts)))
2604 			return -EFAULT;
2605 	}
2606 
2607 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2608 
2609 	if (ret > 0 && uinfo) {
2610 		if (copy_siginfo_to_user(uinfo, &info))
2611 			ret = -EFAULT;
2612 	}
2613 
2614 	return ret;
2615 }
2616 
2617 /**
2618  *  sys_kill - send a signal to a process
2619  *  @pid: the PID of the process
2620  *  @sig: signal to be sent
2621  */
2622 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2623 {
2624 	struct siginfo info;
2625 
2626 	info.si_signo = sig;
2627 	info.si_errno = 0;
2628 	info.si_code = SI_USER;
2629 	info.si_pid = task_tgid_vnr(current);
2630 	info.si_uid = current_uid();
2631 
2632 	return kill_something_info(sig, &info, pid);
2633 }
2634 
2635 static int
2636 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2637 {
2638 	struct task_struct *p;
2639 	int error = -ESRCH;
2640 
2641 	rcu_read_lock();
2642 	p = find_task_by_vpid(pid);
2643 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2644 		error = check_kill_permission(sig, info, p);
2645 		/*
2646 		 * The null signal is a permissions and process existence
2647 		 * probe.  No signal is actually delivered.
2648 		 */
2649 		if (!error && sig) {
2650 			error = do_send_sig_info(sig, info, p, false);
2651 			/*
2652 			 * If lock_task_sighand() failed we pretend the task
2653 			 * dies after receiving the signal. The window is tiny,
2654 			 * and the signal is private anyway.
2655 			 */
2656 			if (unlikely(error == -ESRCH))
2657 				error = 0;
2658 		}
2659 	}
2660 	rcu_read_unlock();
2661 
2662 	return error;
2663 }
2664 
2665 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2666 {
2667 	struct siginfo info;
2668 
2669 	info.si_signo = sig;
2670 	info.si_errno = 0;
2671 	info.si_code = SI_TKILL;
2672 	info.si_pid = task_tgid_vnr(current);
2673 	info.si_uid = current_uid();
2674 
2675 	return do_send_specific(tgid, pid, sig, &info);
2676 }
2677 
2678 /**
2679  *  sys_tgkill - send signal to one specific thread
2680  *  @tgid: the thread group ID of the thread
2681  *  @pid: the PID of the thread
2682  *  @sig: signal to be sent
2683  *
2684  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2685  *  exists but it's not belonging to the target process anymore. This
2686  *  method solves the problem of threads exiting and PIDs getting reused.
2687  */
2688 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2689 {
2690 	/* This is only valid for single tasks */
2691 	if (pid <= 0 || tgid <= 0)
2692 		return -EINVAL;
2693 
2694 	return do_tkill(tgid, pid, sig);
2695 }
2696 
2697 /**
2698  *  sys_tkill - send signal to one specific task
2699  *  @pid: the PID of the task
2700  *  @sig: signal to be sent
2701  *
2702  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2703  */
2704 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2705 {
2706 	/* This is only valid for single tasks */
2707 	if (pid <= 0)
2708 		return -EINVAL;
2709 
2710 	return do_tkill(0, pid, sig);
2711 }
2712 
2713 /**
2714  *  sys_rt_sigqueueinfo - send signal information to a signal
2715  *  @pid: the PID of the thread
2716  *  @sig: signal to be sent
2717  *  @uinfo: signal info to be sent
2718  */
2719 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2720 		siginfo_t __user *, uinfo)
2721 {
2722 	siginfo_t info;
2723 
2724 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2725 		return -EFAULT;
2726 
2727 	/* Not even root can pretend to send signals from the kernel.
2728 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2729 	 */
2730 	if (info.si_code >= 0 || info.si_code == SI_TKILL) {
2731 		/* We used to allow any < 0 si_code */
2732 		WARN_ON_ONCE(info.si_code < 0);
2733 		return -EPERM;
2734 	}
2735 	info.si_signo = sig;
2736 
2737 	/* POSIX.1b doesn't mention process groups.  */
2738 	return kill_proc_info(sig, &info, pid);
2739 }
2740 
2741 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2742 {
2743 	/* This is only valid for single tasks */
2744 	if (pid <= 0 || tgid <= 0)
2745 		return -EINVAL;
2746 
2747 	/* Not even root can pretend to send signals from the kernel.
2748 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2749 	 */
2750 	if (info->si_code >= 0 || info->si_code == SI_TKILL) {
2751 		/* We used to allow any < 0 si_code */
2752 		WARN_ON_ONCE(info->si_code < 0);
2753 		return -EPERM;
2754 	}
2755 	info->si_signo = sig;
2756 
2757 	return do_send_specific(tgid, pid, sig, info);
2758 }
2759 
2760 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2761 		siginfo_t __user *, uinfo)
2762 {
2763 	siginfo_t info;
2764 
2765 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2766 		return -EFAULT;
2767 
2768 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2769 }
2770 
2771 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2772 {
2773 	struct task_struct *t = current;
2774 	struct k_sigaction *k;
2775 	sigset_t mask;
2776 
2777 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2778 		return -EINVAL;
2779 
2780 	k = &t->sighand->action[sig-1];
2781 
2782 	spin_lock_irq(&current->sighand->siglock);
2783 	if (oact)
2784 		*oact = *k;
2785 
2786 	if (act) {
2787 		sigdelsetmask(&act->sa.sa_mask,
2788 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2789 		*k = *act;
2790 		/*
2791 		 * POSIX 3.3.1.3:
2792 		 *  "Setting a signal action to SIG_IGN for a signal that is
2793 		 *   pending shall cause the pending signal to be discarded,
2794 		 *   whether or not it is blocked."
2795 		 *
2796 		 *  "Setting a signal action to SIG_DFL for a signal that is
2797 		 *   pending and whose default action is to ignore the signal
2798 		 *   (for example, SIGCHLD), shall cause the pending signal to
2799 		 *   be discarded, whether or not it is blocked"
2800 		 */
2801 		if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2802 			sigemptyset(&mask);
2803 			sigaddset(&mask, sig);
2804 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2805 			do {
2806 				rm_from_queue_full(&mask, &t->pending);
2807 				t = next_thread(t);
2808 			} while (t != current);
2809 		}
2810 	}
2811 
2812 	spin_unlock_irq(&current->sighand->siglock);
2813 	return 0;
2814 }
2815 
2816 int
2817 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2818 {
2819 	stack_t oss;
2820 	int error;
2821 
2822 	oss.ss_sp = (void __user *) current->sas_ss_sp;
2823 	oss.ss_size = current->sas_ss_size;
2824 	oss.ss_flags = sas_ss_flags(sp);
2825 
2826 	if (uss) {
2827 		void __user *ss_sp;
2828 		size_t ss_size;
2829 		int ss_flags;
2830 
2831 		error = -EFAULT;
2832 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2833 			goto out;
2834 		error = __get_user(ss_sp, &uss->ss_sp) |
2835 			__get_user(ss_flags, &uss->ss_flags) |
2836 			__get_user(ss_size, &uss->ss_size);
2837 		if (error)
2838 			goto out;
2839 
2840 		error = -EPERM;
2841 		if (on_sig_stack(sp))
2842 			goto out;
2843 
2844 		error = -EINVAL;
2845 		/*
2846 		 * Note - this code used to test ss_flags incorrectly:
2847 		 *  	  old code may have been written using ss_flags==0
2848 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2849 		 *	  way that worked) - this fix preserves that older
2850 		 *	  mechanism.
2851 		 */
2852 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2853 			goto out;
2854 
2855 		if (ss_flags == SS_DISABLE) {
2856 			ss_size = 0;
2857 			ss_sp = NULL;
2858 		} else {
2859 			error = -ENOMEM;
2860 			if (ss_size < MINSIGSTKSZ)
2861 				goto out;
2862 		}
2863 
2864 		current->sas_ss_sp = (unsigned long) ss_sp;
2865 		current->sas_ss_size = ss_size;
2866 	}
2867 
2868 	error = 0;
2869 	if (uoss) {
2870 		error = -EFAULT;
2871 		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2872 			goto out;
2873 		error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2874 			__put_user(oss.ss_size, &uoss->ss_size) |
2875 			__put_user(oss.ss_flags, &uoss->ss_flags);
2876 	}
2877 
2878 out:
2879 	return error;
2880 }
2881 
2882 #ifdef __ARCH_WANT_SYS_SIGPENDING
2883 
2884 /**
2885  *  sys_sigpending - examine pending signals
2886  *  @set: where mask of pending signal is returned
2887  */
2888 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2889 {
2890 	return do_sigpending(set, sizeof(*set));
2891 }
2892 
2893 #endif
2894 
2895 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2896 /**
2897  *  sys_sigprocmask - examine and change blocked signals
2898  *  @how: whether to add, remove, or set signals
2899  *  @nset: signals to add or remove (if non-null)
2900  *  @oset: previous value of signal mask if non-null
2901  *
2902  * Some platforms have their own version with special arguments;
2903  * others support only sys_rt_sigprocmask.
2904  */
2905 
2906 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
2907 		old_sigset_t __user *, oset)
2908 {
2909 	old_sigset_t old_set, new_set;
2910 	sigset_t new_blocked;
2911 
2912 	old_set = current->blocked.sig[0];
2913 
2914 	if (nset) {
2915 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
2916 			return -EFAULT;
2917 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2918 
2919 		new_blocked = current->blocked;
2920 
2921 		switch (how) {
2922 		case SIG_BLOCK:
2923 			sigaddsetmask(&new_blocked, new_set);
2924 			break;
2925 		case SIG_UNBLOCK:
2926 			sigdelsetmask(&new_blocked, new_set);
2927 			break;
2928 		case SIG_SETMASK:
2929 			new_blocked.sig[0] = new_set;
2930 			break;
2931 		default:
2932 			return -EINVAL;
2933 		}
2934 
2935 		set_current_blocked(&new_blocked);
2936 	}
2937 
2938 	if (oset) {
2939 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2940 			return -EFAULT;
2941 	}
2942 
2943 	return 0;
2944 }
2945 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2946 
2947 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2948 /**
2949  *  sys_rt_sigaction - alter an action taken by a process
2950  *  @sig: signal to be sent
2951  *  @act: new sigaction
2952  *  @oact: used to save the previous sigaction
2953  *  @sigsetsize: size of sigset_t type
2954  */
2955 SYSCALL_DEFINE4(rt_sigaction, int, sig,
2956 		const struct sigaction __user *, act,
2957 		struct sigaction __user *, oact,
2958 		size_t, sigsetsize)
2959 {
2960 	struct k_sigaction new_sa, old_sa;
2961 	int ret = -EINVAL;
2962 
2963 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2964 	if (sigsetsize != sizeof(sigset_t))
2965 		goto out;
2966 
2967 	if (act) {
2968 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2969 			return -EFAULT;
2970 	}
2971 
2972 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2973 
2974 	if (!ret && oact) {
2975 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2976 			return -EFAULT;
2977 	}
2978 out:
2979 	return ret;
2980 }
2981 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2982 
2983 #ifdef __ARCH_WANT_SYS_SGETMASK
2984 
2985 /*
2986  * For backwards compatibility.  Functionality superseded by sigprocmask.
2987  */
2988 SYSCALL_DEFINE0(sgetmask)
2989 {
2990 	/* SMP safe */
2991 	return current->blocked.sig[0];
2992 }
2993 
2994 SYSCALL_DEFINE1(ssetmask, int, newmask)
2995 {
2996 	int old;
2997 
2998 	spin_lock_irq(&current->sighand->siglock);
2999 	old = current->blocked.sig[0];
3000 
3001 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
3002 						  sigmask(SIGSTOP)));
3003 	recalc_sigpending();
3004 	spin_unlock_irq(&current->sighand->siglock);
3005 
3006 	return old;
3007 }
3008 #endif /* __ARCH_WANT_SGETMASK */
3009 
3010 #ifdef __ARCH_WANT_SYS_SIGNAL
3011 /*
3012  * For backwards compatibility.  Functionality superseded by sigaction.
3013  */
3014 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3015 {
3016 	struct k_sigaction new_sa, old_sa;
3017 	int ret;
3018 
3019 	new_sa.sa.sa_handler = handler;
3020 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3021 	sigemptyset(&new_sa.sa.sa_mask);
3022 
3023 	ret = do_sigaction(sig, &new_sa, &old_sa);
3024 
3025 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3026 }
3027 #endif /* __ARCH_WANT_SYS_SIGNAL */
3028 
3029 #ifdef __ARCH_WANT_SYS_PAUSE
3030 
3031 SYSCALL_DEFINE0(pause)
3032 {
3033 	while (!signal_pending(current)) {
3034 		current->state = TASK_INTERRUPTIBLE;
3035 		schedule();
3036 	}
3037 	return -ERESTARTNOHAND;
3038 }
3039 
3040 #endif
3041 
3042 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
3043 /**
3044  *  sys_rt_sigsuspend - replace the signal mask for a value with the
3045  *	@unewset value until a signal is received
3046  *  @unewset: new signal mask value
3047  *  @sigsetsize: size of sigset_t type
3048  */
3049 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3050 {
3051 	sigset_t newset;
3052 
3053 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3054 	if (sigsetsize != sizeof(sigset_t))
3055 		return -EINVAL;
3056 
3057 	if (copy_from_user(&newset, unewset, sizeof(newset)))
3058 		return -EFAULT;
3059 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
3060 
3061 	spin_lock_irq(&current->sighand->siglock);
3062 	current->saved_sigmask = current->blocked;
3063 	current->blocked = newset;
3064 	recalc_sigpending();
3065 	spin_unlock_irq(&current->sighand->siglock);
3066 
3067 	current->state = TASK_INTERRUPTIBLE;
3068 	schedule();
3069 	set_restore_sigmask();
3070 	return -ERESTARTNOHAND;
3071 }
3072 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
3073 
3074 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
3075 {
3076 	return NULL;
3077 }
3078 
3079 void __init signals_init(void)
3080 {
3081 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3082 }
3083 
3084 #ifdef CONFIG_KGDB_KDB
3085 #include <linux/kdb.h>
3086 /*
3087  * kdb_send_sig_info - Allows kdb to send signals without exposing
3088  * signal internals.  This function checks if the required locks are
3089  * available before calling the main signal code, to avoid kdb
3090  * deadlocks.
3091  */
3092 void
3093 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3094 {
3095 	static struct task_struct *kdb_prev_t;
3096 	int sig, new_t;
3097 	if (!spin_trylock(&t->sighand->siglock)) {
3098 		kdb_printf("Can't do kill command now.\n"
3099 			   "The sigmask lock is held somewhere else in "
3100 			   "kernel, try again later\n");
3101 		return;
3102 	}
3103 	spin_unlock(&t->sighand->siglock);
3104 	new_t = kdb_prev_t != t;
3105 	kdb_prev_t = t;
3106 	if (t->state != TASK_RUNNING && new_t) {
3107 		kdb_printf("Process is not RUNNING, sending a signal from "
3108 			   "kdb risks deadlock\n"
3109 			   "on the run queue locks. "
3110 			   "The signal has _not_ been sent.\n"
3111 			   "Reissue the kill command if you want to risk "
3112 			   "the deadlock.\n");
3113 		return;
3114 	}
3115 	sig = info->si_signo;
3116 	if (send_sig_info(sig, info, t))
3117 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
3118 			   sig, t->pid);
3119 	else
3120 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3121 }
3122 #endif	/* CONFIG_KGDB_KDB */
3123