xref: /linux/kernel/signal.c (revision f7511d5f66f01fc451747b24e79f3ada7a3af9af)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
29 
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h"	/* audit_signal_info() */
35 
36 /*
37  * SLAB caches for signal bits.
38  */
39 
40 static struct kmem_cache *sigqueue_cachep;
41 
42 static int __sig_ignored(struct task_struct *t, int sig)
43 {
44 	void __user *handler;
45 
46 	/* Is it explicitly or implicitly ignored? */
47 
48 	handler = t->sighand->action[sig - 1].sa.sa_handler;
49 	return handler == SIG_IGN ||
50 		(handler == SIG_DFL && sig_kernel_ignore(sig));
51 }
52 
53 static int sig_ignored(struct task_struct *t, int sig)
54 {
55 	/*
56 	 * Tracers always want to know about signals..
57 	 */
58 	if (t->ptrace & PT_PTRACED)
59 		return 0;
60 
61 	/*
62 	 * Blocked signals are never ignored, since the
63 	 * signal handler may change by the time it is
64 	 * unblocked.
65 	 */
66 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
67 		return 0;
68 
69 	return __sig_ignored(t, sig);
70 }
71 
72 /*
73  * Re-calculate pending state from the set of locally pending
74  * signals, globally pending signals, and blocked signals.
75  */
76 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
77 {
78 	unsigned long ready;
79 	long i;
80 
81 	switch (_NSIG_WORDS) {
82 	default:
83 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
84 			ready |= signal->sig[i] &~ blocked->sig[i];
85 		break;
86 
87 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
88 		ready |= signal->sig[2] &~ blocked->sig[2];
89 		ready |= signal->sig[1] &~ blocked->sig[1];
90 		ready |= signal->sig[0] &~ blocked->sig[0];
91 		break;
92 
93 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
94 		ready |= signal->sig[0] &~ blocked->sig[0];
95 		break;
96 
97 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
98 	}
99 	return ready !=	0;
100 }
101 
102 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
103 
104 static int recalc_sigpending_tsk(struct task_struct *t)
105 {
106 	if (t->signal->group_stop_count > 0 ||
107 	    PENDING(&t->pending, &t->blocked) ||
108 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
109 		set_tsk_thread_flag(t, TIF_SIGPENDING);
110 		return 1;
111 	}
112 	/*
113 	 * We must never clear the flag in another thread, or in current
114 	 * when it's possible the current syscall is returning -ERESTART*.
115 	 * So we don't clear it here, and only callers who know they should do.
116 	 */
117 	return 0;
118 }
119 
120 /*
121  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
122  * This is superfluous when called on current, the wakeup is a harmless no-op.
123  */
124 void recalc_sigpending_and_wake(struct task_struct *t)
125 {
126 	if (recalc_sigpending_tsk(t))
127 		signal_wake_up(t, 0);
128 }
129 
130 void recalc_sigpending(void)
131 {
132 	if (!recalc_sigpending_tsk(current) && !freezing(current))
133 		clear_thread_flag(TIF_SIGPENDING);
134 
135 }
136 
137 /* Given the mask, find the first available signal that should be serviced. */
138 
139 int next_signal(struct sigpending *pending, sigset_t *mask)
140 {
141 	unsigned long i, *s, *m, x;
142 	int sig = 0;
143 
144 	s = pending->signal.sig;
145 	m = mask->sig;
146 	switch (_NSIG_WORDS) {
147 	default:
148 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
149 			if ((x = *s &~ *m) != 0) {
150 				sig = ffz(~x) + i*_NSIG_BPW + 1;
151 				break;
152 			}
153 		break;
154 
155 	case 2: if ((x = s[0] &~ m[0]) != 0)
156 			sig = 1;
157 		else if ((x = s[1] &~ m[1]) != 0)
158 			sig = _NSIG_BPW + 1;
159 		else
160 			break;
161 		sig += ffz(~x);
162 		break;
163 
164 	case 1: if ((x = *s &~ *m) != 0)
165 			sig = ffz(~x) + 1;
166 		break;
167 	}
168 
169 	return sig;
170 }
171 
172 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
173 					 int override_rlimit)
174 {
175 	struct sigqueue *q = NULL;
176 	struct user_struct *user;
177 
178 	/*
179 	 * In order to avoid problems with "switch_user()", we want to make
180 	 * sure that the compiler doesn't re-load "t->user"
181 	 */
182 	user = t->user;
183 	barrier();
184 	atomic_inc(&user->sigpending);
185 	if (override_rlimit ||
186 	    atomic_read(&user->sigpending) <=
187 			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
188 		q = kmem_cache_alloc(sigqueue_cachep, flags);
189 	if (unlikely(q == NULL)) {
190 		atomic_dec(&user->sigpending);
191 	} else {
192 		INIT_LIST_HEAD(&q->list);
193 		q->flags = 0;
194 		q->user = get_uid(user);
195 	}
196 	return(q);
197 }
198 
199 static void __sigqueue_free(struct sigqueue *q)
200 {
201 	if (q->flags & SIGQUEUE_PREALLOC)
202 		return;
203 	atomic_dec(&q->user->sigpending);
204 	free_uid(q->user);
205 	kmem_cache_free(sigqueue_cachep, q);
206 }
207 
208 void flush_sigqueue(struct sigpending *queue)
209 {
210 	struct sigqueue *q;
211 
212 	sigemptyset(&queue->signal);
213 	while (!list_empty(&queue->list)) {
214 		q = list_entry(queue->list.next, struct sigqueue , list);
215 		list_del_init(&q->list);
216 		__sigqueue_free(q);
217 	}
218 }
219 
220 /*
221  * Flush all pending signals for a task.
222  */
223 void flush_signals(struct task_struct *t)
224 {
225 	unsigned long flags;
226 
227 	spin_lock_irqsave(&t->sighand->siglock, flags);
228 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
229 	flush_sigqueue(&t->pending);
230 	flush_sigqueue(&t->signal->shared_pending);
231 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
232 }
233 
234 void ignore_signals(struct task_struct *t)
235 {
236 	int i;
237 
238 	for (i = 0; i < _NSIG; ++i)
239 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
240 
241 	flush_signals(t);
242 }
243 
244 /*
245  * Flush all handlers for a task.
246  */
247 
248 void
249 flush_signal_handlers(struct task_struct *t, int force_default)
250 {
251 	int i;
252 	struct k_sigaction *ka = &t->sighand->action[0];
253 	for (i = _NSIG ; i != 0 ; i--) {
254 		if (force_default || ka->sa.sa_handler != SIG_IGN)
255 			ka->sa.sa_handler = SIG_DFL;
256 		ka->sa.sa_flags = 0;
257 		sigemptyset(&ka->sa.sa_mask);
258 		ka++;
259 	}
260 }
261 
262 int unhandled_signal(struct task_struct *tsk, int sig)
263 {
264 	if (is_global_init(tsk))
265 		return 1;
266 	if (tsk->ptrace & PT_PTRACED)
267 		return 0;
268 	return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
269 		(tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
270 }
271 
272 
273 /* Notify the system that a driver wants to block all signals for this
274  * process, and wants to be notified if any signals at all were to be
275  * sent/acted upon.  If the notifier routine returns non-zero, then the
276  * signal will be acted upon after all.  If the notifier routine returns 0,
277  * then then signal will be blocked.  Only one block per process is
278  * allowed.  priv is a pointer to private data that the notifier routine
279  * can use to determine if the signal should be blocked or not.  */
280 
281 void
282 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
283 {
284 	unsigned long flags;
285 
286 	spin_lock_irqsave(&current->sighand->siglock, flags);
287 	current->notifier_mask = mask;
288 	current->notifier_data = priv;
289 	current->notifier = notifier;
290 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
291 }
292 
293 /* Notify the system that blocking has ended. */
294 
295 void
296 unblock_all_signals(void)
297 {
298 	unsigned long flags;
299 
300 	spin_lock_irqsave(&current->sighand->siglock, flags);
301 	current->notifier = NULL;
302 	current->notifier_data = NULL;
303 	recalc_sigpending();
304 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
305 }
306 
307 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
308 {
309 	struct sigqueue *q, *first = NULL;
310 	int still_pending = 0;
311 
312 	if (unlikely(!sigismember(&list->signal, sig)))
313 		return 0;
314 
315 	/*
316 	 * Collect the siginfo appropriate to this signal.  Check if
317 	 * there is another siginfo for the same signal.
318 	*/
319 	list_for_each_entry(q, &list->list, list) {
320 		if (q->info.si_signo == sig) {
321 			if (first) {
322 				still_pending = 1;
323 				break;
324 			}
325 			first = q;
326 		}
327 	}
328 	if (first) {
329 		list_del_init(&first->list);
330 		copy_siginfo(info, &first->info);
331 		__sigqueue_free(first);
332 		if (!still_pending)
333 			sigdelset(&list->signal, sig);
334 	} else {
335 
336 		/* Ok, it wasn't in the queue.  This must be
337 		   a fast-pathed signal or we must have been
338 		   out of queue space.  So zero out the info.
339 		 */
340 		sigdelset(&list->signal, sig);
341 		info->si_signo = sig;
342 		info->si_errno = 0;
343 		info->si_code = 0;
344 		info->si_pid = 0;
345 		info->si_uid = 0;
346 	}
347 	return 1;
348 }
349 
350 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
351 			siginfo_t *info)
352 {
353 	int sig = next_signal(pending, mask);
354 
355 	if (sig) {
356 		if (current->notifier) {
357 			if (sigismember(current->notifier_mask, sig)) {
358 				if (!(current->notifier)(current->notifier_data)) {
359 					clear_thread_flag(TIF_SIGPENDING);
360 					return 0;
361 				}
362 			}
363 		}
364 
365 		if (!collect_signal(sig, pending, info))
366 			sig = 0;
367 	}
368 
369 	return sig;
370 }
371 
372 /*
373  * Dequeue a signal and return the element to the caller, which is
374  * expected to free it.
375  *
376  * All callers have to hold the siglock.
377  */
378 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
379 {
380 	int signr;
381 
382 	/* We only dequeue private signals from ourselves, we don't let
383 	 * signalfd steal them
384 	 */
385 	signr = __dequeue_signal(&tsk->pending, mask, info);
386 	if (!signr) {
387 		signr = __dequeue_signal(&tsk->signal->shared_pending,
388 					 mask, info);
389 		/*
390 		 * itimer signal ?
391 		 *
392 		 * itimers are process shared and we restart periodic
393 		 * itimers in the signal delivery path to prevent DoS
394 		 * attacks in the high resolution timer case. This is
395 		 * compliant with the old way of self restarting
396 		 * itimers, as the SIGALRM is a legacy signal and only
397 		 * queued once. Changing the restart behaviour to
398 		 * restart the timer in the signal dequeue path is
399 		 * reducing the timer noise on heavy loaded !highres
400 		 * systems too.
401 		 */
402 		if (unlikely(signr == SIGALRM)) {
403 			struct hrtimer *tmr = &tsk->signal->real_timer;
404 
405 			if (!hrtimer_is_queued(tmr) &&
406 			    tsk->signal->it_real_incr.tv64 != 0) {
407 				hrtimer_forward(tmr, tmr->base->get_time(),
408 						tsk->signal->it_real_incr);
409 				hrtimer_restart(tmr);
410 			}
411 		}
412 	}
413 
414 	recalc_sigpending();
415 	if (!signr)
416 		return 0;
417 
418 	if (unlikely(sig_kernel_stop(signr))) {
419 		/*
420 		 * Set a marker that we have dequeued a stop signal.  Our
421 		 * caller might release the siglock and then the pending
422 		 * stop signal it is about to process is no longer in the
423 		 * pending bitmasks, but must still be cleared by a SIGCONT
424 		 * (and overruled by a SIGKILL).  So those cases clear this
425 		 * shared flag after we've set it.  Note that this flag may
426 		 * remain set after the signal we return is ignored or
427 		 * handled.  That doesn't matter because its only purpose
428 		 * is to alert stop-signal processing code when another
429 		 * processor has come along and cleared the flag.
430 		 */
431 		if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
432 			tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
433 	}
434 	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
435 		/*
436 		 * Release the siglock to ensure proper locking order
437 		 * of timer locks outside of siglocks.  Note, we leave
438 		 * irqs disabled here, since the posix-timers code is
439 		 * about to disable them again anyway.
440 		 */
441 		spin_unlock(&tsk->sighand->siglock);
442 		do_schedule_next_timer(info);
443 		spin_lock(&tsk->sighand->siglock);
444 	}
445 	return signr;
446 }
447 
448 /*
449  * Tell a process that it has a new active signal..
450  *
451  * NOTE! we rely on the previous spin_lock to
452  * lock interrupts for us! We can only be called with
453  * "siglock" held, and the local interrupt must
454  * have been disabled when that got acquired!
455  *
456  * No need to set need_resched since signal event passing
457  * goes through ->blocked
458  */
459 void signal_wake_up(struct task_struct *t, int resume)
460 {
461 	unsigned int mask;
462 
463 	set_tsk_thread_flag(t, TIF_SIGPENDING);
464 
465 	/*
466 	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
467 	 * case. We don't check t->state here because there is a race with it
468 	 * executing another processor and just now entering stopped state.
469 	 * By using wake_up_state, we ensure the process will wake up and
470 	 * handle its death signal.
471 	 */
472 	mask = TASK_INTERRUPTIBLE;
473 	if (resume)
474 		mask |= TASK_WAKEKILL;
475 	if (!wake_up_state(t, mask))
476 		kick_process(t);
477 }
478 
479 /*
480  * Remove signals in mask from the pending set and queue.
481  * Returns 1 if any signals were found.
482  *
483  * All callers must be holding the siglock.
484  *
485  * This version takes a sigset mask and looks at all signals,
486  * not just those in the first mask word.
487  */
488 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
489 {
490 	struct sigqueue *q, *n;
491 	sigset_t m;
492 
493 	sigandsets(&m, mask, &s->signal);
494 	if (sigisemptyset(&m))
495 		return 0;
496 
497 	signandsets(&s->signal, &s->signal, mask);
498 	list_for_each_entry_safe(q, n, &s->list, list) {
499 		if (sigismember(mask, q->info.si_signo)) {
500 			list_del_init(&q->list);
501 			__sigqueue_free(q);
502 		}
503 	}
504 	return 1;
505 }
506 /*
507  * Remove signals in mask from the pending set and queue.
508  * Returns 1 if any signals were found.
509  *
510  * All callers must be holding the siglock.
511  */
512 static int rm_from_queue(unsigned long mask, struct sigpending *s)
513 {
514 	struct sigqueue *q, *n;
515 
516 	if (!sigtestsetmask(&s->signal, mask))
517 		return 0;
518 
519 	sigdelsetmask(&s->signal, mask);
520 	list_for_each_entry_safe(q, n, &s->list, list) {
521 		if (q->info.si_signo < SIGRTMIN &&
522 		    (mask & sigmask(q->info.si_signo))) {
523 			list_del_init(&q->list);
524 			__sigqueue_free(q);
525 		}
526 	}
527 	return 1;
528 }
529 
530 /*
531  * Bad permissions for sending the signal
532  */
533 static int check_kill_permission(int sig, struct siginfo *info,
534 				 struct task_struct *t)
535 {
536 	struct pid *sid;
537 	int error;
538 
539 	if (!valid_signal(sig))
540 		return -EINVAL;
541 
542 	if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
543 		return 0;
544 
545 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
546 	if (error)
547 		return error;
548 
549 	if ((current->euid ^ t->suid) && (current->euid ^ t->uid) &&
550 	    (current->uid  ^ t->suid) && (current->uid  ^ t->uid) &&
551 	    !capable(CAP_KILL)) {
552 		switch (sig) {
553 		case SIGCONT:
554 			sid = task_session(t);
555 			/*
556 			 * We don't return the error if sid == NULL. The
557 			 * task was unhashed, the caller must notice this.
558 			 */
559 			if (!sid || sid == task_session(current))
560 				break;
561 		default:
562 			return -EPERM;
563 		}
564 	}
565 
566 	return security_task_kill(t, info, sig, 0);
567 }
568 
569 /* forward decl */
570 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
571 
572 /*
573  * Handle magic process-wide effects of stop/continue signals. Unlike
574  * the signal actions, these happen immediately at signal-generation
575  * time regardless of blocking, ignoring, or handling.  This does the
576  * actual continuing for SIGCONT, but not the actual stopping for stop
577  * signals. The process stop is done as a signal action for SIG_DFL.
578  *
579  * Returns true if the signal should be actually delivered, otherwise
580  * it should be dropped.
581  */
582 static int prepare_signal(int sig, struct task_struct *p)
583 {
584 	struct signal_struct *signal = p->signal;
585 	struct task_struct *t;
586 
587 	if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
588 		/*
589 		 * The process is in the middle of dying, nothing to do.
590 		 */
591 	} else if (sig_kernel_stop(sig)) {
592 		/*
593 		 * This is a stop signal.  Remove SIGCONT from all queues.
594 		 */
595 		rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
596 		t = p;
597 		do {
598 			rm_from_queue(sigmask(SIGCONT), &t->pending);
599 		} while_each_thread(p, t);
600 	} else if (sig == SIGCONT) {
601 		unsigned int why;
602 		/*
603 		 * Remove all stop signals from all queues,
604 		 * and wake all threads.
605 		 */
606 		rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
607 		t = p;
608 		do {
609 			unsigned int state;
610 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
611 			/*
612 			 * If there is a handler for SIGCONT, we must make
613 			 * sure that no thread returns to user mode before
614 			 * we post the signal, in case it was the only
615 			 * thread eligible to run the signal handler--then
616 			 * it must not do anything between resuming and
617 			 * running the handler.  With the TIF_SIGPENDING
618 			 * flag set, the thread will pause and acquire the
619 			 * siglock that we hold now and until we've queued
620 			 * the pending signal.
621 			 *
622 			 * Wake up the stopped thread _after_ setting
623 			 * TIF_SIGPENDING
624 			 */
625 			state = __TASK_STOPPED;
626 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
627 				set_tsk_thread_flag(t, TIF_SIGPENDING);
628 				state |= TASK_INTERRUPTIBLE;
629 			}
630 			wake_up_state(t, state);
631 		} while_each_thread(p, t);
632 
633 		/*
634 		 * Notify the parent with CLD_CONTINUED if we were stopped.
635 		 *
636 		 * If we were in the middle of a group stop, we pretend it
637 		 * was already finished, and then continued. Since SIGCHLD
638 		 * doesn't queue we report only CLD_STOPPED, as if the next
639 		 * CLD_CONTINUED was dropped.
640 		 */
641 		why = 0;
642 		if (signal->flags & SIGNAL_STOP_STOPPED)
643 			why |= SIGNAL_CLD_CONTINUED;
644 		else if (signal->group_stop_count)
645 			why |= SIGNAL_CLD_STOPPED;
646 
647 		if (why) {
648 			/*
649 			 * The first thread which returns from finish_stop()
650 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
651 			 * notify its parent. See get_signal_to_deliver().
652 			 */
653 			signal->flags = why | SIGNAL_STOP_CONTINUED;
654 			signal->group_stop_count = 0;
655 			signal->group_exit_code = 0;
656 		} else {
657 			/*
658 			 * We are not stopped, but there could be a stop
659 			 * signal in the middle of being processed after
660 			 * being removed from the queue.  Clear that too.
661 			 */
662 			signal->flags &= ~SIGNAL_STOP_DEQUEUED;
663 		}
664 	}
665 
666 	return !sig_ignored(p, sig);
667 }
668 
669 /*
670  * Test if P wants to take SIG.  After we've checked all threads with this,
671  * it's equivalent to finding no threads not blocking SIG.  Any threads not
672  * blocking SIG were ruled out because they are not running and already
673  * have pending signals.  Such threads will dequeue from the shared queue
674  * as soon as they're available, so putting the signal on the shared queue
675  * will be equivalent to sending it to one such thread.
676  */
677 static inline int wants_signal(int sig, struct task_struct *p)
678 {
679 	if (sigismember(&p->blocked, sig))
680 		return 0;
681 	if (p->flags & PF_EXITING)
682 		return 0;
683 	if (sig == SIGKILL)
684 		return 1;
685 	if (task_is_stopped_or_traced(p))
686 		return 0;
687 	return task_curr(p) || !signal_pending(p);
688 }
689 
690 static void complete_signal(int sig, struct task_struct *p, int group)
691 {
692 	struct signal_struct *signal = p->signal;
693 	struct task_struct *t;
694 
695 	/*
696 	 * Now find a thread we can wake up to take the signal off the queue.
697 	 *
698 	 * If the main thread wants the signal, it gets first crack.
699 	 * Probably the least surprising to the average bear.
700 	 */
701 	if (wants_signal(sig, p))
702 		t = p;
703 	else if (!group || thread_group_empty(p))
704 		/*
705 		 * There is just one thread and it does not need to be woken.
706 		 * It will dequeue unblocked signals before it runs again.
707 		 */
708 		return;
709 	else {
710 		/*
711 		 * Otherwise try to find a suitable thread.
712 		 */
713 		t = signal->curr_target;
714 		while (!wants_signal(sig, t)) {
715 			t = next_thread(t);
716 			if (t == signal->curr_target)
717 				/*
718 				 * No thread needs to be woken.
719 				 * Any eligible threads will see
720 				 * the signal in the queue soon.
721 				 */
722 				return;
723 		}
724 		signal->curr_target = t;
725 	}
726 
727 	/*
728 	 * Found a killable thread.  If the signal will be fatal,
729 	 * then start taking the whole group down immediately.
730 	 */
731 	if (sig_fatal(p, sig) &&
732 	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
733 	    !sigismember(&t->real_blocked, sig) &&
734 	    (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
735 		/*
736 		 * This signal will be fatal to the whole group.
737 		 */
738 		if (!sig_kernel_coredump(sig)) {
739 			/*
740 			 * Start a group exit and wake everybody up.
741 			 * This way we don't have other threads
742 			 * running and doing things after a slower
743 			 * thread has the fatal signal pending.
744 			 */
745 			signal->flags = SIGNAL_GROUP_EXIT;
746 			signal->group_exit_code = sig;
747 			signal->group_stop_count = 0;
748 			t = p;
749 			do {
750 				sigaddset(&t->pending.signal, SIGKILL);
751 				signal_wake_up(t, 1);
752 			} while_each_thread(p, t);
753 			return;
754 		}
755 	}
756 
757 	/*
758 	 * The signal is already in the shared-pending queue.
759 	 * Tell the chosen thread to wake up and dequeue it.
760 	 */
761 	signal_wake_up(t, sig == SIGKILL);
762 	return;
763 }
764 
765 static inline int legacy_queue(struct sigpending *signals, int sig)
766 {
767 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
768 }
769 
770 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
771 			int group)
772 {
773 	struct sigpending *pending;
774 	struct sigqueue *q;
775 
776 	assert_spin_locked(&t->sighand->siglock);
777 	if (!prepare_signal(sig, t))
778 		return 0;
779 
780 	pending = group ? &t->signal->shared_pending : &t->pending;
781 	/*
782 	 * Short-circuit ignored signals and support queuing
783 	 * exactly one non-rt signal, so that we can get more
784 	 * detailed information about the cause of the signal.
785 	 */
786 	if (legacy_queue(pending, sig))
787 		return 0;
788 	/*
789 	 * fast-pathed signals for kernel-internal things like SIGSTOP
790 	 * or SIGKILL.
791 	 */
792 	if (info == SEND_SIG_FORCED)
793 		goto out_set;
794 
795 	/* Real-time signals must be queued if sent by sigqueue, or
796 	   some other real-time mechanism.  It is implementation
797 	   defined whether kill() does so.  We attempt to do so, on
798 	   the principle of least surprise, but since kill is not
799 	   allowed to fail with EAGAIN when low on memory we just
800 	   make sure at least one signal gets delivered and don't
801 	   pass on the info struct.  */
802 
803 	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
804 					     (is_si_special(info) ||
805 					      info->si_code >= 0)));
806 	if (q) {
807 		list_add_tail(&q->list, &pending->list);
808 		switch ((unsigned long) info) {
809 		case (unsigned long) SEND_SIG_NOINFO:
810 			q->info.si_signo = sig;
811 			q->info.si_errno = 0;
812 			q->info.si_code = SI_USER;
813 			q->info.si_pid = task_pid_vnr(current);
814 			q->info.si_uid = current->uid;
815 			break;
816 		case (unsigned long) SEND_SIG_PRIV:
817 			q->info.si_signo = sig;
818 			q->info.si_errno = 0;
819 			q->info.si_code = SI_KERNEL;
820 			q->info.si_pid = 0;
821 			q->info.si_uid = 0;
822 			break;
823 		default:
824 			copy_siginfo(&q->info, info);
825 			break;
826 		}
827 	} else if (!is_si_special(info)) {
828 		if (sig >= SIGRTMIN && info->si_code != SI_USER)
829 		/*
830 		 * Queue overflow, abort.  We may abort if the signal was rt
831 		 * and sent by user using something other than kill().
832 		 */
833 			return -EAGAIN;
834 	}
835 
836 out_set:
837 	signalfd_notify(t, sig);
838 	sigaddset(&pending->signal, sig);
839 	complete_signal(sig, t, group);
840 	return 0;
841 }
842 
843 int print_fatal_signals;
844 
845 static void print_fatal_signal(struct pt_regs *regs, int signr)
846 {
847 	printk("%s/%d: potentially unexpected fatal signal %d.\n",
848 		current->comm, task_pid_nr(current), signr);
849 
850 #if defined(__i386__) && !defined(__arch_um__)
851 	printk("code at %08lx: ", regs->ip);
852 	{
853 		int i;
854 		for (i = 0; i < 16; i++) {
855 			unsigned char insn;
856 
857 			__get_user(insn, (unsigned char *)(regs->ip + i));
858 			printk("%02x ", insn);
859 		}
860 	}
861 #endif
862 	printk("\n");
863 	show_regs(regs);
864 }
865 
866 static int __init setup_print_fatal_signals(char *str)
867 {
868 	get_option (&str, &print_fatal_signals);
869 
870 	return 1;
871 }
872 
873 __setup("print-fatal-signals=", setup_print_fatal_signals);
874 
875 int
876 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
877 {
878 	return send_signal(sig, info, p, 1);
879 }
880 
881 static int
882 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
883 {
884 	return send_signal(sig, info, t, 0);
885 }
886 
887 /*
888  * Force a signal that the process can't ignore: if necessary
889  * we unblock the signal and change any SIG_IGN to SIG_DFL.
890  *
891  * Note: If we unblock the signal, we always reset it to SIG_DFL,
892  * since we do not want to have a signal handler that was blocked
893  * be invoked when user space had explicitly blocked it.
894  *
895  * We don't want to have recursive SIGSEGV's etc, for example,
896  * that is why we also clear SIGNAL_UNKILLABLE.
897  */
898 int
899 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
900 {
901 	unsigned long int flags;
902 	int ret, blocked, ignored;
903 	struct k_sigaction *action;
904 
905 	spin_lock_irqsave(&t->sighand->siglock, flags);
906 	action = &t->sighand->action[sig-1];
907 	ignored = action->sa.sa_handler == SIG_IGN;
908 	blocked = sigismember(&t->blocked, sig);
909 	if (blocked || ignored) {
910 		action->sa.sa_handler = SIG_DFL;
911 		if (blocked) {
912 			sigdelset(&t->blocked, sig);
913 			recalc_sigpending_and_wake(t);
914 		}
915 	}
916 	if (action->sa.sa_handler == SIG_DFL)
917 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
918 	ret = specific_send_sig_info(sig, info, t);
919 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
920 
921 	return ret;
922 }
923 
924 void
925 force_sig_specific(int sig, struct task_struct *t)
926 {
927 	force_sig_info(sig, SEND_SIG_FORCED, t);
928 }
929 
930 /*
931  * Nuke all other threads in the group.
932  */
933 void zap_other_threads(struct task_struct *p)
934 {
935 	struct task_struct *t;
936 
937 	p->signal->group_stop_count = 0;
938 
939 	for (t = next_thread(p); t != p; t = next_thread(t)) {
940 		/*
941 		 * Don't bother with already dead threads
942 		 */
943 		if (t->exit_state)
944 			continue;
945 
946 		/* SIGKILL will be handled before any pending SIGSTOP */
947 		sigaddset(&t->pending.signal, SIGKILL);
948 		signal_wake_up(t, 1);
949 	}
950 }
951 
952 int __fatal_signal_pending(struct task_struct *tsk)
953 {
954 	return sigismember(&tsk->pending.signal, SIGKILL);
955 }
956 EXPORT_SYMBOL(__fatal_signal_pending);
957 
958 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
959 {
960 	struct sighand_struct *sighand;
961 
962 	rcu_read_lock();
963 	for (;;) {
964 		sighand = rcu_dereference(tsk->sighand);
965 		if (unlikely(sighand == NULL))
966 			break;
967 
968 		spin_lock_irqsave(&sighand->siglock, *flags);
969 		if (likely(sighand == tsk->sighand))
970 			break;
971 		spin_unlock_irqrestore(&sighand->siglock, *flags);
972 	}
973 	rcu_read_unlock();
974 
975 	return sighand;
976 }
977 
978 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
979 {
980 	unsigned long flags;
981 	int ret;
982 
983 	ret = check_kill_permission(sig, info, p);
984 
985 	if (!ret && sig) {
986 		ret = -ESRCH;
987 		if (lock_task_sighand(p, &flags)) {
988 			ret = __group_send_sig_info(sig, info, p);
989 			unlock_task_sighand(p, &flags);
990 		}
991 	}
992 
993 	return ret;
994 }
995 
996 /*
997  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
998  * control characters do (^C, ^Z etc)
999  */
1000 
1001 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1002 {
1003 	struct task_struct *p = NULL;
1004 	int retval, success;
1005 
1006 	success = 0;
1007 	retval = -ESRCH;
1008 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1009 		int err = group_send_sig_info(sig, info, p);
1010 		success |= !err;
1011 		retval = err;
1012 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1013 	return success ? 0 : retval;
1014 }
1015 
1016 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1017 {
1018 	int error = -ESRCH;
1019 	struct task_struct *p;
1020 
1021 	rcu_read_lock();
1022 retry:
1023 	p = pid_task(pid, PIDTYPE_PID);
1024 	if (p) {
1025 		error = group_send_sig_info(sig, info, p);
1026 		if (unlikely(error == -ESRCH))
1027 			/*
1028 			 * The task was unhashed in between, try again.
1029 			 * If it is dead, pid_task() will return NULL,
1030 			 * if we race with de_thread() it will find the
1031 			 * new leader.
1032 			 */
1033 			goto retry;
1034 	}
1035 	rcu_read_unlock();
1036 
1037 	return error;
1038 }
1039 
1040 int
1041 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1042 {
1043 	int error;
1044 	rcu_read_lock();
1045 	error = kill_pid_info(sig, info, find_vpid(pid));
1046 	rcu_read_unlock();
1047 	return error;
1048 }
1049 
1050 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1051 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1052 		      uid_t uid, uid_t euid, u32 secid)
1053 {
1054 	int ret = -EINVAL;
1055 	struct task_struct *p;
1056 
1057 	if (!valid_signal(sig))
1058 		return ret;
1059 
1060 	read_lock(&tasklist_lock);
1061 	p = pid_task(pid, PIDTYPE_PID);
1062 	if (!p) {
1063 		ret = -ESRCH;
1064 		goto out_unlock;
1065 	}
1066 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1067 	    && (euid != p->suid) && (euid != p->uid)
1068 	    && (uid != p->suid) && (uid != p->uid)) {
1069 		ret = -EPERM;
1070 		goto out_unlock;
1071 	}
1072 	ret = security_task_kill(p, info, sig, secid);
1073 	if (ret)
1074 		goto out_unlock;
1075 	if (sig && p->sighand) {
1076 		unsigned long flags;
1077 		spin_lock_irqsave(&p->sighand->siglock, flags);
1078 		ret = __group_send_sig_info(sig, info, p);
1079 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1080 	}
1081 out_unlock:
1082 	read_unlock(&tasklist_lock);
1083 	return ret;
1084 }
1085 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1086 
1087 /*
1088  * kill_something_info() interprets pid in interesting ways just like kill(2).
1089  *
1090  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1091  * is probably wrong.  Should make it like BSD or SYSV.
1092  */
1093 
1094 static int kill_something_info(int sig, struct siginfo *info, int pid)
1095 {
1096 	int ret;
1097 
1098 	if (pid > 0) {
1099 		rcu_read_lock();
1100 		ret = kill_pid_info(sig, info, find_vpid(pid));
1101 		rcu_read_unlock();
1102 		return ret;
1103 	}
1104 
1105 	read_lock(&tasklist_lock);
1106 	if (pid != -1) {
1107 		ret = __kill_pgrp_info(sig, info,
1108 				pid ? find_vpid(-pid) : task_pgrp(current));
1109 	} else {
1110 		int retval = 0, count = 0;
1111 		struct task_struct * p;
1112 
1113 		for_each_process(p) {
1114 			if (p->pid > 1 && !same_thread_group(p, current)) {
1115 				int err = group_send_sig_info(sig, info, p);
1116 				++count;
1117 				if (err != -EPERM)
1118 					retval = err;
1119 			}
1120 		}
1121 		ret = count ? retval : -ESRCH;
1122 	}
1123 	read_unlock(&tasklist_lock);
1124 
1125 	return ret;
1126 }
1127 
1128 /*
1129  * These are for backward compatibility with the rest of the kernel source.
1130  */
1131 
1132 /*
1133  * The caller must ensure the task can't exit.
1134  */
1135 int
1136 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1137 {
1138 	int ret;
1139 	unsigned long flags;
1140 
1141 	/*
1142 	 * Make sure legacy kernel users don't send in bad values
1143 	 * (normal paths check this in check_kill_permission).
1144 	 */
1145 	if (!valid_signal(sig))
1146 		return -EINVAL;
1147 
1148 	spin_lock_irqsave(&p->sighand->siglock, flags);
1149 	ret = specific_send_sig_info(sig, info, p);
1150 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1151 	return ret;
1152 }
1153 
1154 #define __si_special(priv) \
1155 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1156 
1157 int
1158 send_sig(int sig, struct task_struct *p, int priv)
1159 {
1160 	return send_sig_info(sig, __si_special(priv), p);
1161 }
1162 
1163 void
1164 force_sig(int sig, struct task_struct *p)
1165 {
1166 	force_sig_info(sig, SEND_SIG_PRIV, p);
1167 }
1168 
1169 /*
1170  * When things go south during signal handling, we
1171  * will force a SIGSEGV. And if the signal that caused
1172  * the problem was already a SIGSEGV, we'll want to
1173  * make sure we don't even try to deliver the signal..
1174  */
1175 int
1176 force_sigsegv(int sig, struct task_struct *p)
1177 {
1178 	if (sig == SIGSEGV) {
1179 		unsigned long flags;
1180 		spin_lock_irqsave(&p->sighand->siglock, flags);
1181 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1182 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1183 	}
1184 	force_sig(SIGSEGV, p);
1185 	return 0;
1186 }
1187 
1188 int kill_pgrp(struct pid *pid, int sig, int priv)
1189 {
1190 	int ret;
1191 
1192 	read_lock(&tasklist_lock);
1193 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1194 	read_unlock(&tasklist_lock);
1195 
1196 	return ret;
1197 }
1198 EXPORT_SYMBOL(kill_pgrp);
1199 
1200 int kill_pid(struct pid *pid, int sig, int priv)
1201 {
1202 	return kill_pid_info(sig, __si_special(priv), pid);
1203 }
1204 EXPORT_SYMBOL(kill_pid);
1205 
1206 int
1207 kill_proc(pid_t pid, int sig, int priv)
1208 {
1209 	int ret;
1210 
1211 	rcu_read_lock();
1212 	ret = kill_pid_info(sig, __si_special(priv), find_pid(pid));
1213 	rcu_read_unlock();
1214 	return ret;
1215 }
1216 
1217 /*
1218  * These functions support sending signals using preallocated sigqueue
1219  * structures.  This is needed "because realtime applications cannot
1220  * afford to lose notifications of asynchronous events, like timer
1221  * expirations or I/O completions".  In the case of Posix Timers
1222  * we allocate the sigqueue structure from the timer_create.  If this
1223  * allocation fails we are able to report the failure to the application
1224  * with an EAGAIN error.
1225  */
1226 
1227 struct sigqueue *sigqueue_alloc(void)
1228 {
1229 	struct sigqueue *q;
1230 
1231 	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1232 		q->flags |= SIGQUEUE_PREALLOC;
1233 	return(q);
1234 }
1235 
1236 void sigqueue_free(struct sigqueue *q)
1237 {
1238 	unsigned long flags;
1239 	spinlock_t *lock = &current->sighand->siglock;
1240 
1241 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1242 	/*
1243 	 * If the signal is still pending remove it from the
1244 	 * pending queue. We must hold ->siglock while testing
1245 	 * q->list to serialize with collect_signal().
1246 	 */
1247 	spin_lock_irqsave(lock, flags);
1248 	if (!list_empty(&q->list))
1249 		list_del_init(&q->list);
1250 	spin_unlock_irqrestore(lock, flags);
1251 
1252 	q->flags &= ~SIGQUEUE_PREALLOC;
1253 	__sigqueue_free(q);
1254 }
1255 
1256 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1257 {
1258 	int sig = q->info.si_signo;
1259 	struct sigpending *pending;
1260 	unsigned long flags;
1261 	int ret;
1262 
1263 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1264 
1265 	ret = -1;
1266 	if (!likely(lock_task_sighand(t, &flags)))
1267 		goto ret;
1268 
1269 	ret = 1; /* the signal is ignored */
1270 	if (!prepare_signal(sig, t))
1271 		goto out;
1272 
1273 	ret = 0;
1274 	if (unlikely(!list_empty(&q->list))) {
1275 		/*
1276 		 * If an SI_TIMER entry is already queue just increment
1277 		 * the overrun count.
1278 		 */
1279 		BUG_ON(q->info.si_code != SI_TIMER);
1280 		q->info.si_overrun++;
1281 		goto out;
1282 	}
1283 
1284 	signalfd_notify(t, sig);
1285 	pending = group ? &t->signal->shared_pending : &t->pending;
1286 	list_add_tail(&q->list, &pending->list);
1287 	sigaddset(&pending->signal, sig);
1288 	complete_signal(sig, t, group);
1289 out:
1290 	unlock_task_sighand(t, &flags);
1291 ret:
1292 	return ret;
1293 }
1294 
1295 /*
1296  * Wake up any threads in the parent blocked in wait* syscalls.
1297  */
1298 static inline void __wake_up_parent(struct task_struct *p,
1299 				    struct task_struct *parent)
1300 {
1301 	wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1302 }
1303 
1304 /*
1305  * Let a parent know about the death of a child.
1306  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1307  */
1308 
1309 void do_notify_parent(struct task_struct *tsk, int sig)
1310 {
1311 	struct siginfo info;
1312 	unsigned long flags;
1313 	struct sighand_struct *psig;
1314 
1315 	BUG_ON(sig == -1);
1316 
1317  	/* do_notify_parent_cldstop should have been called instead.  */
1318  	BUG_ON(task_is_stopped_or_traced(tsk));
1319 
1320 	BUG_ON(!tsk->ptrace &&
1321 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1322 
1323 	info.si_signo = sig;
1324 	info.si_errno = 0;
1325 	/*
1326 	 * we are under tasklist_lock here so our parent is tied to
1327 	 * us and cannot exit and release its namespace.
1328 	 *
1329 	 * the only it can is to switch its nsproxy with sys_unshare,
1330 	 * bu uncharing pid namespaces is not allowed, so we'll always
1331 	 * see relevant namespace
1332 	 *
1333 	 * write_lock() currently calls preempt_disable() which is the
1334 	 * same as rcu_read_lock(), but according to Oleg, this is not
1335 	 * correct to rely on this
1336 	 */
1337 	rcu_read_lock();
1338 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1339 	rcu_read_unlock();
1340 
1341 	info.si_uid = tsk->uid;
1342 
1343 	/* FIXME: find out whether or not this is supposed to be c*time. */
1344 	info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1345 						       tsk->signal->utime));
1346 	info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1347 						       tsk->signal->stime));
1348 
1349 	info.si_status = tsk->exit_code & 0x7f;
1350 	if (tsk->exit_code & 0x80)
1351 		info.si_code = CLD_DUMPED;
1352 	else if (tsk->exit_code & 0x7f)
1353 		info.si_code = CLD_KILLED;
1354 	else {
1355 		info.si_code = CLD_EXITED;
1356 		info.si_status = tsk->exit_code >> 8;
1357 	}
1358 
1359 	psig = tsk->parent->sighand;
1360 	spin_lock_irqsave(&psig->siglock, flags);
1361 	if (!tsk->ptrace && sig == SIGCHLD &&
1362 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1363 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1364 		/*
1365 		 * We are exiting and our parent doesn't care.  POSIX.1
1366 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1367 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1368 		 * automatically and not left for our parent's wait4 call.
1369 		 * Rather than having the parent do it as a magic kind of
1370 		 * signal handler, we just set this to tell do_exit that we
1371 		 * can be cleaned up without becoming a zombie.  Note that
1372 		 * we still call __wake_up_parent in this case, because a
1373 		 * blocked sys_wait4 might now return -ECHILD.
1374 		 *
1375 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1376 		 * is implementation-defined: we do (if you don't want
1377 		 * it, just use SIG_IGN instead).
1378 		 */
1379 		tsk->exit_signal = -1;
1380 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1381 			sig = 0;
1382 	}
1383 	if (valid_signal(sig) && sig > 0)
1384 		__group_send_sig_info(sig, &info, tsk->parent);
1385 	__wake_up_parent(tsk, tsk->parent);
1386 	spin_unlock_irqrestore(&psig->siglock, flags);
1387 }
1388 
1389 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1390 {
1391 	struct siginfo info;
1392 	unsigned long flags;
1393 	struct task_struct *parent;
1394 	struct sighand_struct *sighand;
1395 
1396 	if (tsk->ptrace & PT_PTRACED)
1397 		parent = tsk->parent;
1398 	else {
1399 		tsk = tsk->group_leader;
1400 		parent = tsk->real_parent;
1401 	}
1402 
1403 	info.si_signo = SIGCHLD;
1404 	info.si_errno = 0;
1405 	/*
1406 	 * see comment in do_notify_parent() abot the following 3 lines
1407 	 */
1408 	rcu_read_lock();
1409 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1410 	rcu_read_unlock();
1411 
1412 	info.si_uid = tsk->uid;
1413 
1414 	/* FIXME: find out whether or not this is supposed to be c*time. */
1415 	info.si_utime = cputime_to_jiffies(tsk->utime);
1416 	info.si_stime = cputime_to_jiffies(tsk->stime);
1417 
1418  	info.si_code = why;
1419  	switch (why) {
1420  	case CLD_CONTINUED:
1421  		info.si_status = SIGCONT;
1422  		break;
1423  	case CLD_STOPPED:
1424  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1425  		break;
1426  	case CLD_TRAPPED:
1427  		info.si_status = tsk->exit_code & 0x7f;
1428  		break;
1429  	default:
1430  		BUG();
1431  	}
1432 
1433 	sighand = parent->sighand;
1434 	spin_lock_irqsave(&sighand->siglock, flags);
1435 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1436 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1437 		__group_send_sig_info(SIGCHLD, &info, parent);
1438 	/*
1439 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1440 	 */
1441 	__wake_up_parent(tsk, parent);
1442 	spin_unlock_irqrestore(&sighand->siglock, flags);
1443 }
1444 
1445 static inline int may_ptrace_stop(void)
1446 {
1447 	if (!likely(current->ptrace & PT_PTRACED))
1448 		return 0;
1449 	/*
1450 	 * Are we in the middle of do_coredump?
1451 	 * If so and our tracer is also part of the coredump stopping
1452 	 * is a deadlock situation, and pointless because our tracer
1453 	 * is dead so don't allow us to stop.
1454 	 * If SIGKILL was already sent before the caller unlocked
1455 	 * ->siglock we must see ->core_waiters != 0. Otherwise it
1456 	 * is safe to enter schedule().
1457 	 */
1458 	if (unlikely(current->mm->core_waiters) &&
1459 	    unlikely(current->mm == current->parent->mm))
1460 		return 0;
1461 
1462 	return 1;
1463 }
1464 
1465 /*
1466  * Return nonzero if there is a SIGKILL that should be waking us up.
1467  * Called with the siglock held.
1468  */
1469 static int sigkill_pending(struct task_struct *tsk)
1470 {
1471 	return ((sigismember(&tsk->pending.signal, SIGKILL) ||
1472 		 sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
1473 		!unlikely(sigismember(&tsk->blocked, SIGKILL)));
1474 }
1475 
1476 /*
1477  * This must be called with current->sighand->siglock held.
1478  *
1479  * This should be the path for all ptrace stops.
1480  * We always set current->last_siginfo while stopped here.
1481  * That makes it a way to test a stopped process for
1482  * being ptrace-stopped vs being job-control-stopped.
1483  *
1484  * If we actually decide not to stop at all because the tracer
1485  * is gone, we keep current->exit_code unless clear_code.
1486  */
1487 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1488 {
1489 	int killed = 0;
1490 
1491 	if (arch_ptrace_stop_needed(exit_code, info)) {
1492 		/*
1493 		 * The arch code has something special to do before a
1494 		 * ptrace stop.  This is allowed to block, e.g. for faults
1495 		 * on user stack pages.  We can't keep the siglock while
1496 		 * calling arch_ptrace_stop, so we must release it now.
1497 		 * To preserve proper semantics, we must do this before
1498 		 * any signal bookkeeping like checking group_stop_count.
1499 		 * Meanwhile, a SIGKILL could come in before we retake the
1500 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1501 		 * So after regaining the lock, we must check for SIGKILL.
1502 		 */
1503 		spin_unlock_irq(&current->sighand->siglock);
1504 		arch_ptrace_stop(exit_code, info);
1505 		spin_lock_irq(&current->sighand->siglock);
1506 		killed = sigkill_pending(current);
1507 	}
1508 
1509 	/*
1510 	 * If there is a group stop in progress,
1511 	 * we must participate in the bookkeeping.
1512 	 */
1513 	if (current->signal->group_stop_count > 0)
1514 		--current->signal->group_stop_count;
1515 
1516 	current->last_siginfo = info;
1517 	current->exit_code = exit_code;
1518 
1519 	/* Let the debugger run.  */
1520 	__set_current_state(TASK_TRACED);
1521 	spin_unlock_irq(&current->sighand->siglock);
1522 	read_lock(&tasklist_lock);
1523 	if (!unlikely(killed) && may_ptrace_stop()) {
1524 		do_notify_parent_cldstop(current, CLD_TRAPPED);
1525 		read_unlock(&tasklist_lock);
1526 		schedule();
1527 	} else {
1528 		/*
1529 		 * By the time we got the lock, our tracer went away.
1530 		 * Don't drop the lock yet, another tracer may come.
1531 		 */
1532 		__set_current_state(TASK_RUNNING);
1533 		if (clear_code)
1534 			current->exit_code = 0;
1535 		read_unlock(&tasklist_lock);
1536 	}
1537 
1538 	/*
1539 	 * While in TASK_TRACED, we were considered "frozen enough".
1540 	 * Now that we woke up, it's crucial if we're supposed to be
1541 	 * frozen that we freeze now before running anything substantial.
1542 	 */
1543 	try_to_freeze();
1544 
1545 	/*
1546 	 * We are back.  Now reacquire the siglock before touching
1547 	 * last_siginfo, so that we are sure to have synchronized with
1548 	 * any signal-sending on another CPU that wants to examine it.
1549 	 */
1550 	spin_lock_irq(&current->sighand->siglock);
1551 	current->last_siginfo = NULL;
1552 
1553 	/*
1554 	 * Queued signals ignored us while we were stopped for tracing.
1555 	 * So check for any that we should take before resuming user mode.
1556 	 * This sets TIF_SIGPENDING, but never clears it.
1557 	 */
1558 	recalc_sigpending_tsk(current);
1559 }
1560 
1561 void ptrace_notify(int exit_code)
1562 {
1563 	siginfo_t info;
1564 
1565 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1566 
1567 	memset(&info, 0, sizeof info);
1568 	info.si_signo = SIGTRAP;
1569 	info.si_code = exit_code;
1570 	info.si_pid = task_pid_vnr(current);
1571 	info.si_uid = current->uid;
1572 
1573 	/* Let the debugger run.  */
1574 	spin_lock_irq(&current->sighand->siglock);
1575 	ptrace_stop(exit_code, 1, &info);
1576 	spin_unlock_irq(&current->sighand->siglock);
1577 }
1578 
1579 static void
1580 finish_stop(int stop_count)
1581 {
1582 	/*
1583 	 * If there are no other threads in the group, or if there is
1584 	 * a group stop in progress and we are the last to stop,
1585 	 * report to the parent.  When ptraced, every thread reports itself.
1586 	 */
1587 	if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1588 		read_lock(&tasklist_lock);
1589 		do_notify_parent_cldstop(current, CLD_STOPPED);
1590 		read_unlock(&tasklist_lock);
1591 	}
1592 
1593 	do {
1594 		schedule();
1595 	} while (try_to_freeze());
1596 	/*
1597 	 * Now we don't run again until continued.
1598 	 */
1599 	current->exit_code = 0;
1600 }
1601 
1602 /*
1603  * This performs the stopping for SIGSTOP and other stop signals.
1604  * We have to stop all threads in the thread group.
1605  * Returns nonzero if we've actually stopped and released the siglock.
1606  * Returns zero if we didn't stop and still hold the siglock.
1607  */
1608 static int do_signal_stop(int signr)
1609 {
1610 	struct signal_struct *sig = current->signal;
1611 	int stop_count;
1612 
1613 	if (sig->group_stop_count > 0) {
1614 		/*
1615 		 * There is a group stop in progress.  We don't need to
1616 		 * start another one.
1617 		 */
1618 		stop_count = --sig->group_stop_count;
1619 	} else {
1620 		struct task_struct *t;
1621 
1622 		if (unlikely((sig->flags & (SIGNAL_STOP_DEQUEUED | SIGNAL_UNKILLABLE))
1623 					 != SIGNAL_STOP_DEQUEUED) ||
1624 		    unlikely(signal_group_exit(sig)))
1625 			return 0;
1626 		/*
1627 		 * There is no group stop already in progress.
1628 		 * We must initiate one now.
1629 		 */
1630 		sig->group_exit_code = signr;
1631 
1632 		stop_count = 0;
1633 		for (t = next_thread(current); t != current; t = next_thread(t))
1634 			/*
1635 			 * Setting state to TASK_STOPPED for a group
1636 			 * stop is always done with the siglock held,
1637 			 * so this check has no races.
1638 			 */
1639 			if (!(t->flags & PF_EXITING) &&
1640 			    !task_is_stopped_or_traced(t)) {
1641 				stop_count++;
1642 				signal_wake_up(t, 0);
1643 			}
1644 		sig->group_stop_count = stop_count;
1645 	}
1646 
1647 	if (stop_count == 0)
1648 		sig->flags = SIGNAL_STOP_STOPPED;
1649 	current->exit_code = sig->group_exit_code;
1650 	__set_current_state(TASK_STOPPED);
1651 
1652 	spin_unlock_irq(&current->sighand->siglock);
1653 	finish_stop(stop_count);
1654 	return 1;
1655 }
1656 
1657 static int ptrace_signal(int signr, siginfo_t *info,
1658 			 struct pt_regs *regs, void *cookie)
1659 {
1660 	if (!(current->ptrace & PT_PTRACED))
1661 		return signr;
1662 
1663 	ptrace_signal_deliver(regs, cookie);
1664 
1665 	/* Let the debugger run.  */
1666 	ptrace_stop(signr, 0, info);
1667 
1668 	/* We're back.  Did the debugger cancel the sig?  */
1669 	signr = current->exit_code;
1670 	if (signr == 0)
1671 		return signr;
1672 
1673 	current->exit_code = 0;
1674 
1675 	/* Update the siginfo structure if the signal has
1676 	   changed.  If the debugger wanted something
1677 	   specific in the siginfo structure then it should
1678 	   have updated *info via PTRACE_SETSIGINFO.  */
1679 	if (signr != info->si_signo) {
1680 		info->si_signo = signr;
1681 		info->si_errno = 0;
1682 		info->si_code = SI_USER;
1683 		info->si_pid = task_pid_vnr(current->parent);
1684 		info->si_uid = current->parent->uid;
1685 	}
1686 
1687 	/* If the (new) signal is now blocked, requeue it.  */
1688 	if (sigismember(&current->blocked, signr)) {
1689 		specific_send_sig_info(signr, info, current);
1690 		signr = 0;
1691 	}
1692 
1693 	return signr;
1694 }
1695 
1696 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1697 			  struct pt_regs *regs, void *cookie)
1698 {
1699 	struct sighand_struct *sighand = current->sighand;
1700 	struct signal_struct *signal = current->signal;
1701 	int signr;
1702 
1703 relock:
1704 	/*
1705 	 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1706 	 * While in TASK_STOPPED, we were considered "frozen enough".
1707 	 * Now that we woke up, it's crucial if we're supposed to be
1708 	 * frozen that we freeze now before running anything substantial.
1709 	 */
1710 	try_to_freeze();
1711 
1712 	spin_lock_irq(&sighand->siglock);
1713 	/*
1714 	 * Every stopped thread goes here after wakeup. Check to see if
1715 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
1716 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1717 	 */
1718 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1719 		int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1720 				? CLD_CONTINUED : CLD_STOPPED;
1721 		signal->flags &= ~SIGNAL_CLD_MASK;
1722 		spin_unlock_irq(&sighand->siglock);
1723 
1724 		read_lock(&tasklist_lock);
1725 		do_notify_parent_cldstop(current->group_leader, why);
1726 		read_unlock(&tasklist_lock);
1727 		goto relock;
1728 	}
1729 
1730 	for (;;) {
1731 		struct k_sigaction *ka;
1732 
1733 		if (unlikely(signal->group_stop_count > 0) &&
1734 		    do_signal_stop(0))
1735 			goto relock;
1736 
1737 		signr = dequeue_signal(current, &current->blocked, info);
1738 		if (!signr)
1739 			break; /* will return 0 */
1740 
1741 		if (signr != SIGKILL) {
1742 			signr = ptrace_signal(signr, info, regs, cookie);
1743 			if (!signr)
1744 				continue;
1745 		}
1746 
1747 		ka = &sighand->action[signr-1];
1748 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1749 			continue;
1750 		if (ka->sa.sa_handler != SIG_DFL) {
1751 			/* Run the handler.  */
1752 			*return_ka = *ka;
1753 
1754 			if (ka->sa.sa_flags & SA_ONESHOT)
1755 				ka->sa.sa_handler = SIG_DFL;
1756 
1757 			break; /* will return non-zero "signr" value */
1758 		}
1759 
1760 		/*
1761 		 * Now we are doing the default action for this signal.
1762 		 */
1763 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1764 			continue;
1765 
1766 		/*
1767 		 * Global init gets no signals it doesn't want.
1768 		 */
1769 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1770 		    !signal_group_exit(signal))
1771 			continue;
1772 
1773 		if (sig_kernel_stop(signr)) {
1774 			/*
1775 			 * The default action is to stop all threads in
1776 			 * the thread group.  The job control signals
1777 			 * do nothing in an orphaned pgrp, but SIGSTOP
1778 			 * always works.  Note that siglock needs to be
1779 			 * dropped during the call to is_orphaned_pgrp()
1780 			 * because of lock ordering with tasklist_lock.
1781 			 * This allows an intervening SIGCONT to be posted.
1782 			 * We need to check for that and bail out if necessary.
1783 			 */
1784 			if (signr != SIGSTOP) {
1785 				spin_unlock_irq(&sighand->siglock);
1786 
1787 				/* signals can be posted during this window */
1788 
1789 				if (is_current_pgrp_orphaned())
1790 					goto relock;
1791 
1792 				spin_lock_irq(&sighand->siglock);
1793 			}
1794 
1795 			if (likely(do_signal_stop(signr))) {
1796 				/* It released the siglock.  */
1797 				goto relock;
1798 			}
1799 
1800 			/*
1801 			 * We didn't actually stop, due to a race
1802 			 * with SIGCONT or something like that.
1803 			 */
1804 			continue;
1805 		}
1806 
1807 		spin_unlock_irq(&sighand->siglock);
1808 
1809 		/*
1810 		 * Anything else is fatal, maybe with a core dump.
1811 		 */
1812 		current->flags |= PF_SIGNALED;
1813 
1814 		if (sig_kernel_coredump(signr)) {
1815 			if (print_fatal_signals)
1816 				print_fatal_signal(regs, signr);
1817 			/*
1818 			 * If it was able to dump core, this kills all
1819 			 * other threads in the group and synchronizes with
1820 			 * their demise.  If we lost the race with another
1821 			 * thread getting here, it set group_exit_code
1822 			 * first and our do_group_exit call below will use
1823 			 * that value and ignore the one we pass it.
1824 			 */
1825 			do_coredump((long)signr, signr, regs);
1826 		}
1827 
1828 		/*
1829 		 * Death signals, no core dump.
1830 		 */
1831 		do_group_exit(signr);
1832 		/* NOTREACHED */
1833 	}
1834 	spin_unlock_irq(&sighand->siglock);
1835 	return signr;
1836 }
1837 
1838 void exit_signals(struct task_struct *tsk)
1839 {
1840 	int group_stop = 0;
1841 	struct task_struct *t;
1842 
1843 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1844 		tsk->flags |= PF_EXITING;
1845 		return;
1846 	}
1847 
1848 	spin_lock_irq(&tsk->sighand->siglock);
1849 	/*
1850 	 * From now this task is not visible for group-wide signals,
1851 	 * see wants_signal(), do_signal_stop().
1852 	 */
1853 	tsk->flags |= PF_EXITING;
1854 	if (!signal_pending(tsk))
1855 		goto out;
1856 
1857 	/* It could be that __group_complete_signal() choose us to
1858 	 * notify about group-wide signal. Another thread should be
1859 	 * woken now to take the signal since we will not.
1860 	 */
1861 	for (t = tsk; (t = next_thread(t)) != tsk; )
1862 		if (!signal_pending(t) && !(t->flags & PF_EXITING))
1863 			recalc_sigpending_and_wake(t);
1864 
1865 	if (unlikely(tsk->signal->group_stop_count) &&
1866 			!--tsk->signal->group_stop_count) {
1867 		tsk->signal->flags = SIGNAL_STOP_STOPPED;
1868 		group_stop = 1;
1869 	}
1870 out:
1871 	spin_unlock_irq(&tsk->sighand->siglock);
1872 
1873 	if (unlikely(group_stop)) {
1874 		read_lock(&tasklist_lock);
1875 		do_notify_parent_cldstop(tsk, CLD_STOPPED);
1876 		read_unlock(&tasklist_lock);
1877 	}
1878 }
1879 
1880 EXPORT_SYMBOL(recalc_sigpending);
1881 EXPORT_SYMBOL_GPL(dequeue_signal);
1882 EXPORT_SYMBOL(flush_signals);
1883 EXPORT_SYMBOL(force_sig);
1884 EXPORT_SYMBOL(kill_proc);
1885 EXPORT_SYMBOL(ptrace_notify);
1886 EXPORT_SYMBOL(send_sig);
1887 EXPORT_SYMBOL(send_sig_info);
1888 EXPORT_SYMBOL(sigprocmask);
1889 EXPORT_SYMBOL(block_all_signals);
1890 EXPORT_SYMBOL(unblock_all_signals);
1891 
1892 
1893 /*
1894  * System call entry points.
1895  */
1896 
1897 asmlinkage long sys_restart_syscall(void)
1898 {
1899 	struct restart_block *restart = &current_thread_info()->restart_block;
1900 	return restart->fn(restart);
1901 }
1902 
1903 long do_no_restart_syscall(struct restart_block *param)
1904 {
1905 	return -EINTR;
1906 }
1907 
1908 /*
1909  * We don't need to get the kernel lock - this is all local to this
1910  * particular thread.. (and that's good, because this is _heavily_
1911  * used by various programs)
1912  */
1913 
1914 /*
1915  * This is also useful for kernel threads that want to temporarily
1916  * (or permanently) block certain signals.
1917  *
1918  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1919  * interface happily blocks "unblockable" signals like SIGKILL
1920  * and friends.
1921  */
1922 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1923 {
1924 	int error;
1925 
1926 	spin_lock_irq(&current->sighand->siglock);
1927 	if (oldset)
1928 		*oldset = current->blocked;
1929 
1930 	error = 0;
1931 	switch (how) {
1932 	case SIG_BLOCK:
1933 		sigorsets(&current->blocked, &current->blocked, set);
1934 		break;
1935 	case SIG_UNBLOCK:
1936 		signandsets(&current->blocked, &current->blocked, set);
1937 		break;
1938 	case SIG_SETMASK:
1939 		current->blocked = *set;
1940 		break;
1941 	default:
1942 		error = -EINVAL;
1943 	}
1944 	recalc_sigpending();
1945 	spin_unlock_irq(&current->sighand->siglock);
1946 
1947 	return error;
1948 }
1949 
1950 asmlinkage long
1951 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1952 {
1953 	int error = -EINVAL;
1954 	sigset_t old_set, new_set;
1955 
1956 	/* XXX: Don't preclude handling different sized sigset_t's.  */
1957 	if (sigsetsize != sizeof(sigset_t))
1958 		goto out;
1959 
1960 	if (set) {
1961 		error = -EFAULT;
1962 		if (copy_from_user(&new_set, set, sizeof(*set)))
1963 			goto out;
1964 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1965 
1966 		error = sigprocmask(how, &new_set, &old_set);
1967 		if (error)
1968 			goto out;
1969 		if (oset)
1970 			goto set_old;
1971 	} else if (oset) {
1972 		spin_lock_irq(&current->sighand->siglock);
1973 		old_set = current->blocked;
1974 		spin_unlock_irq(&current->sighand->siglock);
1975 
1976 	set_old:
1977 		error = -EFAULT;
1978 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
1979 			goto out;
1980 	}
1981 	error = 0;
1982 out:
1983 	return error;
1984 }
1985 
1986 long do_sigpending(void __user *set, unsigned long sigsetsize)
1987 {
1988 	long error = -EINVAL;
1989 	sigset_t pending;
1990 
1991 	if (sigsetsize > sizeof(sigset_t))
1992 		goto out;
1993 
1994 	spin_lock_irq(&current->sighand->siglock);
1995 	sigorsets(&pending, &current->pending.signal,
1996 		  &current->signal->shared_pending.signal);
1997 	spin_unlock_irq(&current->sighand->siglock);
1998 
1999 	/* Outside the lock because only this thread touches it.  */
2000 	sigandsets(&pending, &current->blocked, &pending);
2001 
2002 	error = -EFAULT;
2003 	if (!copy_to_user(set, &pending, sigsetsize))
2004 		error = 0;
2005 
2006 out:
2007 	return error;
2008 }
2009 
2010 asmlinkage long
2011 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2012 {
2013 	return do_sigpending(set, sigsetsize);
2014 }
2015 
2016 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2017 
2018 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2019 {
2020 	int err;
2021 
2022 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2023 		return -EFAULT;
2024 	if (from->si_code < 0)
2025 		return __copy_to_user(to, from, sizeof(siginfo_t))
2026 			? -EFAULT : 0;
2027 	/*
2028 	 * If you change siginfo_t structure, please be sure
2029 	 * this code is fixed accordingly.
2030 	 * Please remember to update the signalfd_copyinfo() function
2031 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2032 	 * It should never copy any pad contained in the structure
2033 	 * to avoid security leaks, but must copy the generic
2034 	 * 3 ints plus the relevant union member.
2035 	 */
2036 	err = __put_user(from->si_signo, &to->si_signo);
2037 	err |= __put_user(from->si_errno, &to->si_errno);
2038 	err |= __put_user((short)from->si_code, &to->si_code);
2039 	switch (from->si_code & __SI_MASK) {
2040 	case __SI_KILL:
2041 		err |= __put_user(from->si_pid, &to->si_pid);
2042 		err |= __put_user(from->si_uid, &to->si_uid);
2043 		break;
2044 	case __SI_TIMER:
2045 		 err |= __put_user(from->si_tid, &to->si_tid);
2046 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2047 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2048 		break;
2049 	case __SI_POLL:
2050 		err |= __put_user(from->si_band, &to->si_band);
2051 		err |= __put_user(from->si_fd, &to->si_fd);
2052 		break;
2053 	case __SI_FAULT:
2054 		err |= __put_user(from->si_addr, &to->si_addr);
2055 #ifdef __ARCH_SI_TRAPNO
2056 		err |= __put_user(from->si_trapno, &to->si_trapno);
2057 #endif
2058 		break;
2059 	case __SI_CHLD:
2060 		err |= __put_user(from->si_pid, &to->si_pid);
2061 		err |= __put_user(from->si_uid, &to->si_uid);
2062 		err |= __put_user(from->si_status, &to->si_status);
2063 		err |= __put_user(from->si_utime, &to->si_utime);
2064 		err |= __put_user(from->si_stime, &to->si_stime);
2065 		break;
2066 	case __SI_RT: /* This is not generated by the kernel as of now. */
2067 	case __SI_MESGQ: /* But this is */
2068 		err |= __put_user(from->si_pid, &to->si_pid);
2069 		err |= __put_user(from->si_uid, &to->si_uid);
2070 		err |= __put_user(from->si_ptr, &to->si_ptr);
2071 		break;
2072 	default: /* this is just in case for now ... */
2073 		err |= __put_user(from->si_pid, &to->si_pid);
2074 		err |= __put_user(from->si_uid, &to->si_uid);
2075 		break;
2076 	}
2077 	return err;
2078 }
2079 
2080 #endif
2081 
2082 asmlinkage long
2083 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2084 		    siginfo_t __user *uinfo,
2085 		    const struct timespec __user *uts,
2086 		    size_t sigsetsize)
2087 {
2088 	int ret, sig;
2089 	sigset_t these;
2090 	struct timespec ts;
2091 	siginfo_t info;
2092 	long timeout = 0;
2093 
2094 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2095 	if (sigsetsize != sizeof(sigset_t))
2096 		return -EINVAL;
2097 
2098 	if (copy_from_user(&these, uthese, sizeof(these)))
2099 		return -EFAULT;
2100 
2101 	/*
2102 	 * Invert the set of allowed signals to get those we
2103 	 * want to block.
2104 	 */
2105 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2106 	signotset(&these);
2107 
2108 	if (uts) {
2109 		if (copy_from_user(&ts, uts, sizeof(ts)))
2110 			return -EFAULT;
2111 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2112 		    || ts.tv_sec < 0)
2113 			return -EINVAL;
2114 	}
2115 
2116 	spin_lock_irq(&current->sighand->siglock);
2117 	sig = dequeue_signal(current, &these, &info);
2118 	if (!sig) {
2119 		timeout = MAX_SCHEDULE_TIMEOUT;
2120 		if (uts)
2121 			timeout = (timespec_to_jiffies(&ts)
2122 				   + (ts.tv_sec || ts.tv_nsec));
2123 
2124 		if (timeout) {
2125 			/* None ready -- temporarily unblock those we're
2126 			 * interested while we are sleeping in so that we'll
2127 			 * be awakened when they arrive.  */
2128 			current->real_blocked = current->blocked;
2129 			sigandsets(&current->blocked, &current->blocked, &these);
2130 			recalc_sigpending();
2131 			spin_unlock_irq(&current->sighand->siglock);
2132 
2133 			timeout = schedule_timeout_interruptible(timeout);
2134 
2135 			spin_lock_irq(&current->sighand->siglock);
2136 			sig = dequeue_signal(current, &these, &info);
2137 			current->blocked = current->real_blocked;
2138 			siginitset(&current->real_blocked, 0);
2139 			recalc_sigpending();
2140 		}
2141 	}
2142 	spin_unlock_irq(&current->sighand->siglock);
2143 
2144 	if (sig) {
2145 		ret = sig;
2146 		if (uinfo) {
2147 			if (copy_siginfo_to_user(uinfo, &info))
2148 				ret = -EFAULT;
2149 		}
2150 	} else {
2151 		ret = -EAGAIN;
2152 		if (timeout)
2153 			ret = -EINTR;
2154 	}
2155 
2156 	return ret;
2157 }
2158 
2159 asmlinkage long
2160 sys_kill(int pid, int sig)
2161 {
2162 	struct siginfo info;
2163 
2164 	info.si_signo = sig;
2165 	info.si_errno = 0;
2166 	info.si_code = SI_USER;
2167 	info.si_pid = task_tgid_vnr(current);
2168 	info.si_uid = current->uid;
2169 
2170 	return kill_something_info(sig, &info, pid);
2171 }
2172 
2173 static int do_tkill(int tgid, int pid, int sig)
2174 {
2175 	int error;
2176 	struct siginfo info;
2177 	struct task_struct *p;
2178 	unsigned long flags;
2179 
2180 	error = -ESRCH;
2181 	info.si_signo = sig;
2182 	info.si_errno = 0;
2183 	info.si_code = SI_TKILL;
2184 	info.si_pid = task_tgid_vnr(current);
2185 	info.si_uid = current->uid;
2186 
2187 	rcu_read_lock();
2188 	p = find_task_by_vpid(pid);
2189 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2190 		error = check_kill_permission(sig, &info, p);
2191 		/*
2192 		 * The null signal is a permissions and process existence
2193 		 * probe.  No signal is actually delivered.
2194 		 *
2195 		 * If lock_task_sighand() fails we pretend the task dies
2196 		 * after receiving the signal. The window is tiny, and the
2197 		 * signal is private anyway.
2198 		 */
2199 		if (!error && sig && lock_task_sighand(p, &flags)) {
2200 			error = specific_send_sig_info(sig, &info, p);
2201 			unlock_task_sighand(p, &flags);
2202 		}
2203 	}
2204 	rcu_read_unlock();
2205 
2206 	return error;
2207 }
2208 
2209 /**
2210  *  sys_tgkill - send signal to one specific thread
2211  *  @tgid: the thread group ID of the thread
2212  *  @pid: the PID of the thread
2213  *  @sig: signal to be sent
2214  *
2215  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2216  *  exists but it's not belonging to the target process anymore. This
2217  *  method solves the problem of threads exiting and PIDs getting reused.
2218  */
2219 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2220 {
2221 	/* This is only valid for single tasks */
2222 	if (pid <= 0 || tgid <= 0)
2223 		return -EINVAL;
2224 
2225 	return do_tkill(tgid, pid, sig);
2226 }
2227 
2228 /*
2229  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2230  */
2231 asmlinkage long
2232 sys_tkill(int pid, int sig)
2233 {
2234 	/* This is only valid for single tasks */
2235 	if (pid <= 0)
2236 		return -EINVAL;
2237 
2238 	return do_tkill(0, pid, sig);
2239 }
2240 
2241 asmlinkage long
2242 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2243 {
2244 	siginfo_t info;
2245 
2246 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2247 		return -EFAULT;
2248 
2249 	/* Not even root can pretend to send signals from the kernel.
2250 	   Nor can they impersonate a kill(), which adds source info.  */
2251 	if (info.si_code >= 0)
2252 		return -EPERM;
2253 	info.si_signo = sig;
2254 
2255 	/* POSIX.1b doesn't mention process groups.  */
2256 	return kill_proc_info(sig, &info, pid);
2257 }
2258 
2259 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2260 {
2261 	struct task_struct *t = current;
2262 	struct k_sigaction *k;
2263 	sigset_t mask;
2264 
2265 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2266 		return -EINVAL;
2267 
2268 	k = &t->sighand->action[sig-1];
2269 
2270 	spin_lock_irq(&current->sighand->siglock);
2271 	if (oact)
2272 		*oact = *k;
2273 
2274 	if (act) {
2275 		sigdelsetmask(&act->sa.sa_mask,
2276 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2277 		*k = *act;
2278 		/*
2279 		 * POSIX 3.3.1.3:
2280 		 *  "Setting a signal action to SIG_IGN for a signal that is
2281 		 *   pending shall cause the pending signal to be discarded,
2282 		 *   whether or not it is blocked."
2283 		 *
2284 		 *  "Setting a signal action to SIG_DFL for a signal that is
2285 		 *   pending and whose default action is to ignore the signal
2286 		 *   (for example, SIGCHLD), shall cause the pending signal to
2287 		 *   be discarded, whether or not it is blocked"
2288 		 */
2289 		if (__sig_ignored(t, sig)) {
2290 			sigemptyset(&mask);
2291 			sigaddset(&mask, sig);
2292 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2293 			do {
2294 				rm_from_queue_full(&mask, &t->pending);
2295 				t = next_thread(t);
2296 			} while (t != current);
2297 		}
2298 	}
2299 
2300 	spin_unlock_irq(&current->sighand->siglock);
2301 	return 0;
2302 }
2303 
2304 int
2305 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2306 {
2307 	stack_t oss;
2308 	int error;
2309 
2310 	if (uoss) {
2311 		oss.ss_sp = (void __user *) current->sas_ss_sp;
2312 		oss.ss_size = current->sas_ss_size;
2313 		oss.ss_flags = sas_ss_flags(sp);
2314 	}
2315 
2316 	if (uss) {
2317 		void __user *ss_sp;
2318 		size_t ss_size;
2319 		int ss_flags;
2320 
2321 		error = -EFAULT;
2322 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2323 		    || __get_user(ss_sp, &uss->ss_sp)
2324 		    || __get_user(ss_flags, &uss->ss_flags)
2325 		    || __get_user(ss_size, &uss->ss_size))
2326 			goto out;
2327 
2328 		error = -EPERM;
2329 		if (on_sig_stack(sp))
2330 			goto out;
2331 
2332 		error = -EINVAL;
2333 		/*
2334 		 *
2335 		 * Note - this code used to test ss_flags incorrectly
2336 		 *  	  old code may have been written using ss_flags==0
2337 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2338 		 *	  way that worked) - this fix preserves that older
2339 		 *	  mechanism
2340 		 */
2341 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2342 			goto out;
2343 
2344 		if (ss_flags == SS_DISABLE) {
2345 			ss_size = 0;
2346 			ss_sp = NULL;
2347 		} else {
2348 			error = -ENOMEM;
2349 			if (ss_size < MINSIGSTKSZ)
2350 				goto out;
2351 		}
2352 
2353 		current->sas_ss_sp = (unsigned long) ss_sp;
2354 		current->sas_ss_size = ss_size;
2355 	}
2356 
2357 	if (uoss) {
2358 		error = -EFAULT;
2359 		if (copy_to_user(uoss, &oss, sizeof(oss)))
2360 			goto out;
2361 	}
2362 
2363 	error = 0;
2364 out:
2365 	return error;
2366 }
2367 
2368 #ifdef __ARCH_WANT_SYS_SIGPENDING
2369 
2370 asmlinkage long
2371 sys_sigpending(old_sigset_t __user *set)
2372 {
2373 	return do_sigpending(set, sizeof(*set));
2374 }
2375 
2376 #endif
2377 
2378 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2379 /* Some platforms have their own version with special arguments others
2380    support only sys_rt_sigprocmask.  */
2381 
2382 asmlinkage long
2383 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2384 {
2385 	int error;
2386 	old_sigset_t old_set, new_set;
2387 
2388 	if (set) {
2389 		error = -EFAULT;
2390 		if (copy_from_user(&new_set, set, sizeof(*set)))
2391 			goto out;
2392 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2393 
2394 		spin_lock_irq(&current->sighand->siglock);
2395 		old_set = current->blocked.sig[0];
2396 
2397 		error = 0;
2398 		switch (how) {
2399 		default:
2400 			error = -EINVAL;
2401 			break;
2402 		case SIG_BLOCK:
2403 			sigaddsetmask(&current->blocked, new_set);
2404 			break;
2405 		case SIG_UNBLOCK:
2406 			sigdelsetmask(&current->blocked, new_set);
2407 			break;
2408 		case SIG_SETMASK:
2409 			current->blocked.sig[0] = new_set;
2410 			break;
2411 		}
2412 
2413 		recalc_sigpending();
2414 		spin_unlock_irq(&current->sighand->siglock);
2415 		if (error)
2416 			goto out;
2417 		if (oset)
2418 			goto set_old;
2419 	} else if (oset) {
2420 		old_set = current->blocked.sig[0];
2421 	set_old:
2422 		error = -EFAULT;
2423 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2424 			goto out;
2425 	}
2426 	error = 0;
2427 out:
2428 	return error;
2429 }
2430 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2431 
2432 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2433 asmlinkage long
2434 sys_rt_sigaction(int sig,
2435 		 const struct sigaction __user *act,
2436 		 struct sigaction __user *oact,
2437 		 size_t sigsetsize)
2438 {
2439 	struct k_sigaction new_sa, old_sa;
2440 	int ret = -EINVAL;
2441 
2442 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2443 	if (sigsetsize != sizeof(sigset_t))
2444 		goto out;
2445 
2446 	if (act) {
2447 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2448 			return -EFAULT;
2449 	}
2450 
2451 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2452 
2453 	if (!ret && oact) {
2454 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2455 			return -EFAULT;
2456 	}
2457 out:
2458 	return ret;
2459 }
2460 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2461 
2462 #ifdef __ARCH_WANT_SYS_SGETMASK
2463 
2464 /*
2465  * For backwards compatibility.  Functionality superseded by sigprocmask.
2466  */
2467 asmlinkage long
2468 sys_sgetmask(void)
2469 {
2470 	/* SMP safe */
2471 	return current->blocked.sig[0];
2472 }
2473 
2474 asmlinkage long
2475 sys_ssetmask(int newmask)
2476 {
2477 	int old;
2478 
2479 	spin_lock_irq(&current->sighand->siglock);
2480 	old = current->blocked.sig[0];
2481 
2482 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2483 						  sigmask(SIGSTOP)));
2484 	recalc_sigpending();
2485 	spin_unlock_irq(&current->sighand->siglock);
2486 
2487 	return old;
2488 }
2489 #endif /* __ARCH_WANT_SGETMASK */
2490 
2491 #ifdef __ARCH_WANT_SYS_SIGNAL
2492 /*
2493  * For backwards compatibility.  Functionality superseded by sigaction.
2494  */
2495 asmlinkage unsigned long
2496 sys_signal(int sig, __sighandler_t handler)
2497 {
2498 	struct k_sigaction new_sa, old_sa;
2499 	int ret;
2500 
2501 	new_sa.sa.sa_handler = handler;
2502 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2503 	sigemptyset(&new_sa.sa.sa_mask);
2504 
2505 	ret = do_sigaction(sig, &new_sa, &old_sa);
2506 
2507 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2508 }
2509 #endif /* __ARCH_WANT_SYS_SIGNAL */
2510 
2511 #ifdef __ARCH_WANT_SYS_PAUSE
2512 
2513 asmlinkage long
2514 sys_pause(void)
2515 {
2516 	current->state = TASK_INTERRUPTIBLE;
2517 	schedule();
2518 	return -ERESTARTNOHAND;
2519 }
2520 
2521 #endif
2522 
2523 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2524 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2525 {
2526 	sigset_t newset;
2527 
2528 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2529 	if (sigsetsize != sizeof(sigset_t))
2530 		return -EINVAL;
2531 
2532 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2533 		return -EFAULT;
2534 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2535 
2536 	spin_lock_irq(&current->sighand->siglock);
2537 	current->saved_sigmask = current->blocked;
2538 	current->blocked = newset;
2539 	recalc_sigpending();
2540 	spin_unlock_irq(&current->sighand->siglock);
2541 
2542 	current->state = TASK_INTERRUPTIBLE;
2543 	schedule();
2544 	set_restore_sigmask();
2545 	return -ERESTARTNOHAND;
2546 }
2547 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2548 
2549 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2550 {
2551 	return NULL;
2552 }
2553 
2554 void __init signals_init(void)
2555 {
2556 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2557 }
2558