xref: /linux/kernel/signal.c (revision aeb3f46252e26acdc60a1a8e31fb1ca6319d9a07)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
29 
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h"	/* audit_signal_info() */
35 
36 /*
37  * SLAB caches for signal bits.
38  */
39 
40 static struct kmem_cache *sigqueue_cachep;
41 
42 
43 static int sig_ignored(struct task_struct *t, int sig)
44 {
45 	void __user * handler;
46 
47 	/*
48 	 * Tracers always want to know about signals..
49 	 */
50 	if (t->ptrace & PT_PTRACED)
51 		return 0;
52 
53 	/*
54 	 * Blocked signals are never ignored, since the
55 	 * signal handler may change by the time it is
56 	 * unblocked.
57 	 */
58 	if (sigismember(&t->blocked, sig))
59 		return 0;
60 
61 	/* Is it explicitly or implicitly ignored? */
62 	handler = t->sighand->action[sig-1].sa.sa_handler;
63 	return   handler == SIG_IGN ||
64 		(handler == SIG_DFL && sig_kernel_ignore(sig));
65 }
66 
67 /*
68  * Re-calculate pending state from the set of locally pending
69  * signals, globally pending signals, and blocked signals.
70  */
71 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
72 {
73 	unsigned long ready;
74 	long i;
75 
76 	switch (_NSIG_WORDS) {
77 	default:
78 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
79 			ready |= signal->sig[i] &~ blocked->sig[i];
80 		break;
81 
82 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
83 		ready |= signal->sig[2] &~ blocked->sig[2];
84 		ready |= signal->sig[1] &~ blocked->sig[1];
85 		ready |= signal->sig[0] &~ blocked->sig[0];
86 		break;
87 
88 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
89 		ready |= signal->sig[0] &~ blocked->sig[0];
90 		break;
91 
92 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
93 	}
94 	return ready !=	0;
95 }
96 
97 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
98 
99 static int recalc_sigpending_tsk(struct task_struct *t)
100 {
101 	if (t->signal->group_stop_count > 0 ||
102 	    (freezing(t)) ||
103 	    PENDING(&t->pending, &t->blocked) ||
104 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
105 		set_tsk_thread_flag(t, TIF_SIGPENDING);
106 		return 1;
107 	}
108 	/*
109 	 * We must never clear the flag in another thread, or in current
110 	 * when it's possible the current syscall is returning -ERESTART*.
111 	 * So we don't clear it here, and only callers who know they should do.
112 	 */
113 	return 0;
114 }
115 
116 /*
117  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
118  * This is superfluous when called on current, the wakeup is a harmless no-op.
119  */
120 void recalc_sigpending_and_wake(struct task_struct *t)
121 {
122 	if (recalc_sigpending_tsk(t))
123 		signal_wake_up(t, 0);
124 }
125 
126 void recalc_sigpending(void)
127 {
128 	if (!recalc_sigpending_tsk(current))
129 		clear_thread_flag(TIF_SIGPENDING);
130 
131 }
132 
133 /* Given the mask, find the first available signal that should be serviced. */
134 
135 int next_signal(struct sigpending *pending, sigset_t *mask)
136 {
137 	unsigned long i, *s, *m, x;
138 	int sig = 0;
139 
140 	s = pending->signal.sig;
141 	m = mask->sig;
142 	switch (_NSIG_WORDS) {
143 	default:
144 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
145 			if ((x = *s &~ *m) != 0) {
146 				sig = ffz(~x) + i*_NSIG_BPW + 1;
147 				break;
148 			}
149 		break;
150 
151 	case 2: if ((x = s[0] &~ m[0]) != 0)
152 			sig = 1;
153 		else if ((x = s[1] &~ m[1]) != 0)
154 			sig = _NSIG_BPW + 1;
155 		else
156 			break;
157 		sig += ffz(~x);
158 		break;
159 
160 	case 1: if ((x = *s &~ *m) != 0)
161 			sig = ffz(~x) + 1;
162 		break;
163 	}
164 
165 	return sig;
166 }
167 
168 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
169 					 int override_rlimit)
170 {
171 	struct sigqueue *q = NULL;
172 	struct user_struct *user;
173 
174 	/*
175 	 * In order to avoid problems with "switch_user()", we want to make
176 	 * sure that the compiler doesn't re-load "t->user"
177 	 */
178 	user = t->user;
179 	barrier();
180 	atomic_inc(&user->sigpending);
181 	if (override_rlimit ||
182 	    atomic_read(&user->sigpending) <=
183 			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
184 		q = kmem_cache_alloc(sigqueue_cachep, flags);
185 	if (unlikely(q == NULL)) {
186 		atomic_dec(&user->sigpending);
187 	} else {
188 		INIT_LIST_HEAD(&q->list);
189 		q->flags = 0;
190 		q->user = get_uid(user);
191 	}
192 	return(q);
193 }
194 
195 static void __sigqueue_free(struct sigqueue *q)
196 {
197 	if (q->flags & SIGQUEUE_PREALLOC)
198 		return;
199 	atomic_dec(&q->user->sigpending);
200 	free_uid(q->user);
201 	kmem_cache_free(sigqueue_cachep, q);
202 }
203 
204 void flush_sigqueue(struct sigpending *queue)
205 {
206 	struct sigqueue *q;
207 
208 	sigemptyset(&queue->signal);
209 	while (!list_empty(&queue->list)) {
210 		q = list_entry(queue->list.next, struct sigqueue , list);
211 		list_del_init(&q->list);
212 		__sigqueue_free(q);
213 	}
214 }
215 
216 /*
217  * Flush all pending signals for a task.
218  */
219 void flush_signals(struct task_struct *t)
220 {
221 	unsigned long flags;
222 
223 	spin_lock_irqsave(&t->sighand->siglock, flags);
224 	clear_tsk_thread_flag(t,TIF_SIGPENDING);
225 	flush_sigqueue(&t->pending);
226 	flush_sigqueue(&t->signal->shared_pending);
227 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
228 }
229 
230 void ignore_signals(struct task_struct *t)
231 {
232 	int i;
233 
234 	for (i = 0; i < _NSIG; ++i)
235 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
236 
237 	flush_signals(t);
238 }
239 
240 /*
241  * Flush all handlers for a task.
242  */
243 
244 void
245 flush_signal_handlers(struct task_struct *t, int force_default)
246 {
247 	int i;
248 	struct k_sigaction *ka = &t->sighand->action[0];
249 	for (i = _NSIG ; i != 0 ; i--) {
250 		if (force_default || ka->sa.sa_handler != SIG_IGN)
251 			ka->sa.sa_handler = SIG_DFL;
252 		ka->sa.sa_flags = 0;
253 		sigemptyset(&ka->sa.sa_mask);
254 		ka++;
255 	}
256 }
257 
258 int unhandled_signal(struct task_struct *tsk, int sig)
259 {
260 	if (is_init(tsk))
261 		return 1;
262 	if (tsk->ptrace & PT_PTRACED)
263 		return 0;
264 	return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
265 		(tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
266 }
267 
268 
269 /* Notify the system that a driver wants to block all signals for this
270  * process, and wants to be notified if any signals at all were to be
271  * sent/acted upon.  If the notifier routine returns non-zero, then the
272  * signal will be acted upon after all.  If the notifier routine returns 0,
273  * then then signal will be blocked.  Only one block per process is
274  * allowed.  priv is a pointer to private data that the notifier routine
275  * can use to determine if the signal should be blocked or not.  */
276 
277 void
278 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
279 {
280 	unsigned long flags;
281 
282 	spin_lock_irqsave(&current->sighand->siglock, flags);
283 	current->notifier_mask = mask;
284 	current->notifier_data = priv;
285 	current->notifier = notifier;
286 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
287 }
288 
289 /* Notify the system that blocking has ended. */
290 
291 void
292 unblock_all_signals(void)
293 {
294 	unsigned long flags;
295 
296 	spin_lock_irqsave(&current->sighand->siglock, flags);
297 	current->notifier = NULL;
298 	current->notifier_data = NULL;
299 	recalc_sigpending();
300 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
301 }
302 
303 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
304 {
305 	struct sigqueue *q, *first = NULL;
306 	int still_pending = 0;
307 
308 	if (unlikely(!sigismember(&list->signal, sig)))
309 		return 0;
310 
311 	/*
312 	 * Collect the siginfo appropriate to this signal.  Check if
313 	 * there is another siginfo for the same signal.
314 	*/
315 	list_for_each_entry(q, &list->list, list) {
316 		if (q->info.si_signo == sig) {
317 			if (first) {
318 				still_pending = 1;
319 				break;
320 			}
321 			first = q;
322 		}
323 	}
324 	if (first) {
325 		list_del_init(&first->list);
326 		copy_siginfo(info, &first->info);
327 		__sigqueue_free(first);
328 		if (!still_pending)
329 			sigdelset(&list->signal, sig);
330 	} else {
331 
332 		/* Ok, it wasn't in the queue.  This must be
333 		   a fast-pathed signal or we must have been
334 		   out of queue space.  So zero out the info.
335 		 */
336 		sigdelset(&list->signal, sig);
337 		info->si_signo = sig;
338 		info->si_errno = 0;
339 		info->si_code = 0;
340 		info->si_pid = 0;
341 		info->si_uid = 0;
342 	}
343 	return 1;
344 }
345 
346 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
347 			siginfo_t *info)
348 {
349 	int sig = next_signal(pending, mask);
350 
351 	if (sig) {
352 		if (current->notifier) {
353 			if (sigismember(current->notifier_mask, sig)) {
354 				if (!(current->notifier)(current->notifier_data)) {
355 					clear_thread_flag(TIF_SIGPENDING);
356 					return 0;
357 				}
358 			}
359 		}
360 
361 		if (!collect_signal(sig, pending, info))
362 			sig = 0;
363 	}
364 
365 	return sig;
366 }
367 
368 /*
369  * Dequeue a signal and return the element to the caller, which is
370  * expected to free it.
371  *
372  * All callers have to hold the siglock.
373  */
374 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
375 {
376 	int signr = 0;
377 
378 	/* We only dequeue private signals from ourselves, we don't let
379 	 * signalfd steal them
380 	 */
381 	if (tsk == current)
382 		signr = __dequeue_signal(&tsk->pending, mask, info);
383 	if (!signr) {
384 		signr = __dequeue_signal(&tsk->signal->shared_pending,
385 					 mask, info);
386 		/*
387 		 * itimer signal ?
388 		 *
389 		 * itimers are process shared and we restart periodic
390 		 * itimers in the signal delivery path to prevent DoS
391 		 * attacks in the high resolution timer case. This is
392 		 * compliant with the old way of self restarting
393 		 * itimers, as the SIGALRM is a legacy signal and only
394 		 * queued once. Changing the restart behaviour to
395 		 * restart the timer in the signal dequeue path is
396 		 * reducing the timer noise on heavy loaded !highres
397 		 * systems too.
398 		 */
399 		if (unlikely(signr == SIGALRM)) {
400 			struct hrtimer *tmr = &tsk->signal->real_timer;
401 
402 			if (!hrtimer_is_queued(tmr) &&
403 			    tsk->signal->it_real_incr.tv64 != 0) {
404 				hrtimer_forward(tmr, tmr->base->get_time(),
405 						tsk->signal->it_real_incr);
406 				hrtimer_restart(tmr);
407 			}
408 		}
409 	}
410 	if (likely(tsk == current))
411 		recalc_sigpending();
412 	if (signr && unlikely(sig_kernel_stop(signr))) {
413 		/*
414 		 * Set a marker that we have dequeued a stop signal.  Our
415 		 * caller might release the siglock and then the pending
416 		 * stop signal it is about to process is no longer in the
417 		 * pending bitmasks, but must still be cleared by a SIGCONT
418 		 * (and overruled by a SIGKILL).  So those cases clear this
419 		 * shared flag after we've set it.  Note that this flag may
420 		 * remain set after the signal we return is ignored or
421 		 * handled.  That doesn't matter because its only purpose
422 		 * is to alert stop-signal processing code when another
423 		 * processor has come along and cleared the flag.
424 		 */
425 		if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
426 			tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
427 	}
428 	if ( signr &&
429 	     ((info->si_code & __SI_MASK) == __SI_TIMER) &&
430 	     info->si_sys_private){
431 		/*
432 		 * Release the siglock to ensure proper locking order
433 		 * of timer locks outside of siglocks.  Note, we leave
434 		 * irqs disabled here, since the posix-timers code is
435 		 * about to disable them again anyway.
436 		 */
437 		spin_unlock(&tsk->sighand->siglock);
438 		do_schedule_next_timer(info);
439 		spin_lock(&tsk->sighand->siglock);
440 	}
441 	return signr;
442 }
443 
444 /*
445  * Tell a process that it has a new active signal..
446  *
447  * NOTE! we rely on the previous spin_lock to
448  * lock interrupts for us! We can only be called with
449  * "siglock" held, and the local interrupt must
450  * have been disabled when that got acquired!
451  *
452  * No need to set need_resched since signal event passing
453  * goes through ->blocked
454  */
455 void signal_wake_up(struct task_struct *t, int resume)
456 {
457 	unsigned int mask;
458 
459 	set_tsk_thread_flag(t, TIF_SIGPENDING);
460 
461 	/*
462 	 * For SIGKILL, we want to wake it up in the stopped/traced case.
463 	 * We don't check t->state here because there is a race with it
464 	 * executing another processor and just now entering stopped state.
465 	 * By using wake_up_state, we ensure the process will wake up and
466 	 * handle its death signal.
467 	 */
468 	mask = TASK_INTERRUPTIBLE;
469 	if (resume)
470 		mask |= TASK_STOPPED | TASK_TRACED;
471 	if (!wake_up_state(t, mask))
472 		kick_process(t);
473 }
474 
475 /*
476  * Remove signals in mask from the pending set and queue.
477  * Returns 1 if any signals were found.
478  *
479  * All callers must be holding the siglock.
480  *
481  * This version takes a sigset mask and looks at all signals,
482  * not just those in the first mask word.
483  */
484 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
485 {
486 	struct sigqueue *q, *n;
487 	sigset_t m;
488 
489 	sigandsets(&m, mask, &s->signal);
490 	if (sigisemptyset(&m))
491 		return 0;
492 
493 	signandsets(&s->signal, &s->signal, mask);
494 	list_for_each_entry_safe(q, n, &s->list, list) {
495 		if (sigismember(mask, q->info.si_signo)) {
496 			list_del_init(&q->list);
497 			__sigqueue_free(q);
498 		}
499 	}
500 	return 1;
501 }
502 /*
503  * Remove signals in mask from the pending set and queue.
504  * Returns 1 if any signals were found.
505  *
506  * All callers must be holding the siglock.
507  */
508 static int rm_from_queue(unsigned long mask, struct sigpending *s)
509 {
510 	struct sigqueue *q, *n;
511 
512 	if (!sigtestsetmask(&s->signal, mask))
513 		return 0;
514 
515 	sigdelsetmask(&s->signal, mask);
516 	list_for_each_entry_safe(q, n, &s->list, list) {
517 		if (q->info.si_signo < SIGRTMIN &&
518 		    (mask & sigmask(q->info.si_signo))) {
519 			list_del_init(&q->list);
520 			__sigqueue_free(q);
521 		}
522 	}
523 	return 1;
524 }
525 
526 /*
527  * Bad permissions for sending the signal
528  */
529 static int check_kill_permission(int sig, struct siginfo *info,
530 				 struct task_struct *t)
531 {
532 	int error = -EINVAL;
533 	if (!valid_signal(sig))
534 		return error;
535 
536 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
537 	if (error)
538 		return error;
539 
540 	error = -EPERM;
541 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
542 	    && ((sig != SIGCONT) ||
543 		(process_session(current) != process_session(t)))
544 	    && (current->euid ^ t->suid) && (current->euid ^ t->uid)
545 	    && (current->uid ^ t->suid) && (current->uid ^ t->uid)
546 	    && !capable(CAP_KILL))
547 		return error;
548 
549 	return security_task_kill(t, info, sig, 0);
550 }
551 
552 /* forward decl */
553 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
554 
555 /*
556  * Handle magic process-wide effects of stop/continue signals.
557  * Unlike the signal actions, these happen immediately at signal-generation
558  * time regardless of blocking, ignoring, or handling.  This does the
559  * actual continuing for SIGCONT, but not the actual stopping for stop
560  * signals.  The process stop is done as a signal action for SIG_DFL.
561  */
562 static void handle_stop_signal(int sig, struct task_struct *p)
563 {
564 	struct task_struct *t;
565 
566 	if (p->signal->flags & SIGNAL_GROUP_EXIT)
567 		/*
568 		 * The process is in the middle of dying already.
569 		 */
570 		return;
571 
572 	if (sig_kernel_stop(sig)) {
573 		/*
574 		 * This is a stop signal.  Remove SIGCONT from all queues.
575 		 */
576 		rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
577 		t = p;
578 		do {
579 			rm_from_queue(sigmask(SIGCONT), &t->pending);
580 			t = next_thread(t);
581 		} while (t != p);
582 	} else if (sig == SIGCONT) {
583 		/*
584 		 * Remove all stop signals from all queues,
585 		 * and wake all threads.
586 		 */
587 		if (unlikely(p->signal->group_stop_count > 0)) {
588 			/*
589 			 * There was a group stop in progress.  We'll
590 			 * pretend it finished before we got here.  We are
591 			 * obliged to report it to the parent: if the
592 			 * SIGSTOP happened "after" this SIGCONT, then it
593 			 * would have cleared this pending SIGCONT.  If it
594 			 * happened "before" this SIGCONT, then the parent
595 			 * got the SIGCHLD about the stop finishing before
596 			 * the continue happened.  We do the notification
597 			 * now, and it's as if the stop had finished and
598 			 * the SIGCHLD was pending on entry to this kill.
599 			 */
600 			p->signal->group_stop_count = 0;
601 			p->signal->flags = SIGNAL_STOP_CONTINUED;
602 			spin_unlock(&p->sighand->siglock);
603 			do_notify_parent_cldstop(p, CLD_STOPPED);
604 			spin_lock(&p->sighand->siglock);
605 		}
606 		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
607 		t = p;
608 		do {
609 			unsigned int state;
610 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
611 
612 			/*
613 			 * If there is a handler for SIGCONT, we must make
614 			 * sure that no thread returns to user mode before
615 			 * we post the signal, in case it was the only
616 			 * thread eligible to run the signal handler--then
617 			 * it must not do anything between resuming and
618 			 * running the handler.  With the TIF_SIGPENDING
619 			 * flag set, the thread will pause and acquire the
620 			 * siglock that we hold now and until we've queued
621 			 * the pending signal.
622 			 *
623 			 * Wake up the stopped thread _after_ setting
624 			 * TIF_SIGPENDING
625 			 */
626 			state = TASK_STOPPED;
627 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
628 				set_tsk_thread_flag(t, TIF_SIGPENDING);
629 				state |= TASK_INTERRUPTIBLE;
630 			}
631 			wake_up_state(t, state);
632 
633 			t = next_thread(t);
634 		} while (t != p);
635 
636 		if (p->signal->flags & SIGNAL_STOP_STOPPED) {
637 			/*
638 			 * We were in fact stopped, and are now continued.
639 			 * Notify the parent with CLD_CONTINUED.
640 			 */
641 			p->signal->flags = SIGNAL_STOP_CONTINUED;
642 			p->signal->group_exit_code = 0;
643 			spin_unlock(&p->sighand->siglock);
644 			do_notify_parent_cldstop(p, CLD_CONTINUED);
645 			spin_lock(&p->sighand->siglock);
646 		} else {
647 			/*
648 			 * We are not stopped, but there could be a stop
649 			 * signal in the middle of being processed after
650 			 * being removed from the queue.  Clear that too.
651 			 */
652 			p->signal->flags = 0;
653 		}
654 	} else if (sig == SIGKILL) {
655 		/*
656 		 * Make sure that any pending stop signal already dequeued
657 		 * is undone by the wakeup for SIGKILL.
658 		 */
659 		p->signal->flags = 0;
660 	}
661 }
662 
663 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
664 			struct sigpending *signals)
665 {
666 	struct sigqueue * q = NULL;
667 	int ret = 0;
668 
669 	/*
670 	 * Deliver the signal to listening signalfds. This must be called
671 	 * with the sighand lock held.
672 	 */
673 	signalfd_notify(t, sig);
674 
675 	/*
676 	 * fast-pathed signals for kernel-internal things like SIGSTOP
677 	 * or SIGKILL.
678 	 */
679 	if (info == SEND_SIG_FORCED)
680 		goto out_set;
681 
682 	/* Real-time signals must be queued if sent by sigqueue, or
683 	   some other real-time mechanism.  It is implementation
684 	   defined whether kill() does so.  We attempt to do so, on
685 	   the principle of least surprise, but since kill is not
686 	   allowed to fail with EAGAIN when low on memory we just
687 	   make sure at least one signal gets delivered and don't
688 	   pass on the info struct.  */
689 
690 	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
691 					     (is_si_special(info) ||
692 					      info->si_code >= 0)));
693 	if (q) {
694 		list_add_tail(&q->list, &signals->list);
695 		switch ((unsigned long) info) {
696 		case (unsigned long) SEND_SIG_NOINFO:
697 			q->info.si_signo = sig;
698 			q->info.si_errno = 0;
699 			q->info.si_code = SI_USER;
700 			q->info.si_pid = current->pid;
701 			q->info.si_uid = current->uid;
702 			break;
703 		case (unsigned long) SEND_SIG_PRIV:
704 			q->info.si_signo = sig;
705 			q->info.si_errno = 0;
706 			q->info.si_code = SI_KERNEL;
707 			q->info.si_pid = 0;
708 			q->info.si_uid = 0;
709 			break;
710 		default:
711 			copy_siginfo(&q->info, info);
712 			break;
713 		}
714 	} else if (!is_si_special(info)) {
715 		if (sig >= SIGRTMIN && info->si_code != SI_USER)
716 		/*
717 		 * Queue overflow, abort.  We may abort if the signal was rt
718 		 * and sent by user using something other than kill().
719 		 */
720 			return -EAGAIN;
721 	}
722 
723 out_set:
724 	sigaddset(&signals->signal, sig);
725 	return ret;
726 }
727 
728 #define LEGACY_QUEUE(sigptr, sig) \
729 	(((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
730 
731 int print_fatal_signals;
732 
733 static void print_fatal_signal(struct pt_regs *regs, int signr)
734 {
735 	printk("%s/%d: potentially unexpected fatal signal %d.\n",
736 		current->comm, current->pid, signr);
737 
738 #ifdef __i386__
739 	printk("code at %08lx: ", regs->eip);
740 	{
741 		int i;
742 		for (i = 0; i < 16; i++) {
743 			unsigned char insn;
744 
745 			__get_user(insn, (unsigned char *)(regs->eip + i));
746 			printk("%02x ", insn);
747 		}
748 	}
749 #endif
750 	printk("\n");
751 	show_regs(regs);
752 }
753 
754 static int __init setup_print_fatal_signals(char *str)
755 {
756 	get_option (&str, &print_fatal_signals);
757 
758 	return 1;
759 }
760 
761 __setup("print-fatal-signals=", setup_print_fatal_signals);
762 
763 static int
764 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
765 {
766 	int ret = 0;
767 
768 	BUG_ON(!irqs_disabled());
769 	assert_spin_locked(&t->sighand->siglock);
770 
771 	/* Short-circuit ignored signals.  */
772 	if (sig_ignored(t, sig))
773 		goto out;
774 
775 	/* Support queueing exactly one non-rt signal, so that we
776 	   can get more detailed information about the cause of
777 	   the signal. */
778 	if (LEGACY_QUEUE(&t->pending, sig))
779 		goto out;
780 
781 	ret = send_signal(sig, info, t, &t->pending);
782 	if (!ret && !sigismember(&t->blocked, sig))
783 		signal_wake_up(t, sig == SIGKILL);
784 out:
785 	return ret;
786 }
787 
788 /*
789  * Force a signal that the process can't ignore: if necessary
790  * we unblock the signal and change any SIG_IGN to SIG_DFL.
791  *
792  * Note: If we unblock the signal, we always reset it to SIG_DFL,
793  * since we do not want to have a signal handler that was blocked
794  * be invoked when user space had explicitly blocked it.
795  *
796  * We don't want to have recursive SIGSEGV's etc, for example.
797  */
798 int
799 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
800 {
801 	unsigned long int flags;
802 	int ret, blocked, ignored;
803 	struct k_sigaction *action;
804 
805 	spin_lock_irqsave(&t->sighand->siglock, flags);
806 	action = &t->sighand->action[sig-1];
807 	ignored = action->sa.sa_handler == SIG_IGN;
808 	blocked = sigismember(&t->blocked, sig);
809 	if (blocked || ignored) {
810 		action->sa.sa_handler = SIG_DFL;
811 		if (blocked) {
812 			sigdelset(&t->blocked, sig);
813 			recalc_sigpending_and_wake(t);
814 		}
815 	}
816 	ret = specific_send_sig_info(sig, info, t);
817 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
818 
819 	return ret;
820 }
821 
822 void
823 force_sig_specific(int sig, struct task_struct *t)
824 {
825 	force_sig_info(sig, SEND_SIG_FORCED, t);
826 }
827 
828 /*
829  * Test if P wants to take SIG.  After we've checked all threads with this,
830  * it's equivalent to finding no threads not blocking SIG.  Any threads not
831  * blocking SIG were ruled out because they are not running and already
832  * have pending signals.  Such threads will dequeue from the shared queue
833  * as soon as they're available, so putting the signal on the shared queue
834  * will be equivalent to sending it to one such thread.
835  */
836 static inline int wants_signal(int sig, struct task_struct *p)
837 {
838 	if (sigismember(&p->blocked, sig))
839 		return 0;
840 	if (p->flags & PF_EXITING)
841 		return 0;
842 	if (sig == SIGKILL)
843 		return 1;
844 	if (p->state & (TASK_STOPPED | TASK_TRACED))
845 		return 0;
846 	return task_curr(p) || !signal_pending(p);
847 }
848 
849 static void
850 __group_complete_signal(int sig, struct task_struct *p)
851 {
852 	struct task_struct *t;
853 
854 	/*
855 	 * Now find a thread we can wake up to take the signal off the queue.
856 	 *
857 	 * If the main thread wants the signal, it gets first crack.
858 	 * Probably the least surprising to the average bear.
859 	 */
860 	if (wants_signal(sig, p))
861 		t = p;
862 	else if (thread_group_empty(p))
863 		/*
864 		 * There is just one thread and it does not need to be woken.
865 		 * It will dequeue unblocked signals before it runs again.
866 		 */
867 		return;
868 	else {
869 		/*
870 		 * Otherwise try to find a suitable thread.
871 		 */
872 		t = p->signal->curr_target;
873 		if (t == NULL)
874 			/* restart balancing at this thread */
875 			t = p->signal->curr_target = p;
876 
877 		while (!wants_signal(sig, t)) {
878 			t = next_thread(t);
879 			if (t == p->signal->curr_target)
880 				/*
881 				 * No thread needs to be woken.
882 				 * Any eligible threads will see
883 				 * the signal in the queue soon.
884 				 */
885 				return;
886 		}
887 		p->signal->curr_target = t;
888 	}
889 
890 	/*
891 	 * Found a killable thread.  If the signal will be fatal,
892 	 * then start taking the whole group down immediately.
893 	 */
894 	if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
895 	    !sigismember(&t->real_blocked, sig) &&
896 	    (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
897 		/*
898 		 * This signal will be fatal to the whole group.
899 		 */
900 		if (!sig_kernel_coredump(sig)) {
901 			/*
902 			 * Start a group exit and wake everybody up.
903 			 * This way we don't have other threads
904 			 * running and doing things after a slower
905 			 * thread has the fatal signal pending.
906 			 */
907 			p->signal->flags = SIGNAL_GROUP_EXIT;
908 			p->signal->group_exit_code = sig;
909 			p->signal->group_stop_count = 0;
910 			t = p;
911 			do {
912 				sigaddset(&t->pending.signal, SIGKILL);
913 				signal_wake_up(t, 1);
914 				t = next_thread(t);
915 			} while (t != p);
916 			return;
917 		}
918 
919 		/*
920 		 * There will be a core dump.  We make all threads other
921 		 * than the chosen one go into a group stop so that nothing
922 		 * happens until it gets scheduled, takes the signal off
923 		 * the shared queue, and does the core dump.  This is a
924 		 * little more complicated than strictly necessary, but it
925 		 * keeps the signal state that winds up in the core dump
926 		 * unchanged from the death state, e.g. which thread had
927 		 * the core-dump signal unblocked.
928 		 */
929 		rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
930 		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
931 		p->signal->group_stop_count = 0;
932 		p->signal->group_exit_task = t;
933 		t = p;
934 		do {
935 			p->signal->group_stop_count++;
936 			signal_wake_up(t, 0);
937 			t = next_thread(t);
938 		} while (t != p);
939 		wake_up_process(p->signal->group_exit_task);
940 		return;
941 	}
942 
943 	/*
944 	 * The signal is already in the shared-pending queue.
945 	 * Tell the chosen thread to wake up and dequeue it.
946 	 */
947 	signal_wake_up(t, sig == SIGKILL);
948 	return;
949 }
950 
951 int
952 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
953 {
954 	int ret = 0;
955 
956 	assert_spin_locked(&p->sighand->siglock);
957 	handle_stop_signal(sig, p);
958 
959 	/* Short-circuit ignored signals.  */
960 	if (sig_ignored(p, sig))
961 		return ret;
962 
963 	if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
964 		/* This is a non-RT signal and we already have one queued.  */
965 		return ret;
966 
967 	/*
968 	 * Put this signal on the shared-pending queue, or fail with EAGAIN.
969 	 * We always use the shared queue for process-wide signals,
970 	 * to avoid several races.
971 	 */
972 	ret = send_signal(sig, info, p, &p->signal->shared_pending);
973 	if (unlikely(ret))
974 		return ret;
975 
976 	__group_complete_signal(sig, p);
977 	return 0;
978 }
979 
980 /*
981  * Nuke all other threads in the group.
982  */
983 void zap_other_threads(struct task_struct *p)
984 {
985 	struct task_struct *t;
986 
987 	p->signal->flags = SIGNAL_GROUP_EXIT;
988 	p->signal->group_stop_count = 0;
989 
990 	if (thread_group_empty(p))
991 		return;
992 
993 	for (t = next_thread(p); t != p; t = next_thread(t)) {
994 		/*
995 		 * Don't bother with already dead threads
996 		 */
997 		if (t->exit_state)
998 			continue;
999 
1000 		/* SIGKILL will be handled before any pending SIGSTOP */
1001 		sigaddset(&t->pending.signal, SIGKILL);
1002 		signal_wake_up(t, 1);
1003 	}
1004 }
1005 
1006 /*
1007  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1008  */
1009 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1010 {
1011 	struct sighand_struct *sighand;
1012 
1013 	for (;;) {
1014 		sighand = rcu_dereference(tsk->sighand);
1015 		if (unlikely(sighand == NULL))
1016 			break;
1017 
1018 		spin_lock_irqsave(&sighand->siglock, *flags);
1019 		if (likely(sighand == tsk->sighand))
1020 			break;
1021 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1022 	}
1023 
1024 	return sighand;
1025 }
1026 
1027 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1028 {
1029 	unsigned long flags;
1030 	int ret;
1031 
1032 	ret = check_kill_permission(sig, info, p);
1033 
1034 	if (!ret && sig) {
1035 		ret = -ESRCH;
1036 		if (lock_task_sighand(p, &flags)) {
1037 			ret = __group_send_sig_info(sig, info, p);
1038 			unlock_task_sighand(p, &flags);
1039 		}
1040 	}
1041 
1042 	return ret;
1043 }
1044 
1045 /*
1046  * kill_pgrp_info() sends a signal to a process group: this is what the tty
1047  * control characters do (^C, ^Z etc)
1048  */
1049 
1050 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1051 {
1052 	struct task_struct *p = NULL;
1053 	int retval, success;
1054 
1055 	success = 0;
1056 	retval = -ESRCH;
1057 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1058 		int err = group_send_sig_info(sig, info, p);
1059 		success |= !err;
1060 		retval = err;
1061 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1062 	return success ? 0 : retval;
1063 }
1064 
1065 int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1066 {
1067 	int retval;
1068 
1069 	read_lock(&tasklist_lock);
1070 	retval = __kill_pgrp_info(sig, info, pgrp);
1071 	read_unlock(&tasklist_lock);
1072 
1073 	return retval;
1074 }
1075 
1076 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1077 {
1078 	int error;
1079 	struct task_struct *p;
1080 
1081 	rcu_read_lock();
1082 	if (unlikely(sig_needs_tasklist(sig)))
1083 		read_lock(&tasklist_lock);
1084 
1085 	p = pid_task(pid, PIDTYPE_PID);
1086 	error = -ESRCH;
1087 	if (p)
1088 		error = group_send_sig_info(sig, info, p);
1089 
1090 	if (unlikely(sig_needs_tasklist(sig)))
1091 		read_unlock(&tasklist_lock);
1092 	rcu_read_unlock();
1093 	return error;
1094 }
1095 
1096 int
1097 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1098 {
1099 	int error;
1100 	rcu_read_lock();
1101 	error = kill_pid_info(sig, info, find_pid(pid));
1102 	rcu_read_unlock();
1103 	return error;
1104 }
1105 
1106 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1107 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1108 		      uid_t uid, uid_t euid, u32 secid)
1109 {
1110 	int ret = -EINVAL;
1111 	struct task_struct *p;
1112 
1113 	if (!valid_signal(sig))
1114 		return ret;
1115 
1116 	read_lock(&tasklist_lock);
1117 	p = pid_task(pid, PIDTYPE_PID);
1118 	if (!p) {
1119 		ret = -ESRCH;
1120 		goto out_unlock;
1121 	}
1122 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1123 	    && (euid != p->suid) && (euid != p->uid)
1124 	    && (uid != p->suid) && (uid != p->uid)) {
1125 		ret = -EPERM;
1126 		goto out_unlock;
1127 	}
1128 	ret = security_task_kill(p, info, sig, secid);
1129 	if (ret)
1130 		goto out_unlock;
1131 	if (sig && p->sighand) {
1132 		unsigned long flags;
1133 		spin_lock_irqsave(&p->sighand->siglock, flags);
1134 		ret = __group_send_sig_info(sig, info, p);
1135 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1136 	}
1137 out_unlock:
1138 	read_unlock(&tasklist_lock);
1139 	return ret;
1140 }
1141 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1142 
1143 /*
1144  * kill_something_info() interprets pid in interesting ways just like kill(2).
1145  *
1146  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1147  * is probably wrong.  Should make it like BSD or SYSV.
1148  */
1149 
1150 static int kill_something_info(int sig, struct siginfo *info, int pid)
1151 {
1152 	int ret;
1153 	rcu_read_lock();
1154 	if (!pid) {
1155 		ret = kill_pgrp_info(sig, info, task_pgrp(current));
1156 	} else if (pid == -1) {
1157 		int retval = 0, count = 0;
1158 		struct task_struct * p;
1159 
1160 		read_lock(&tasklist_lock);
1161 		for_each_process(p) {
1162 			if (p->pid > 1 && p->tgid != current->tgid) {
1163 				int err = group_send_sig_info(sig, info, p);
1164 				++count;
1165 				if (err != -EPERM)
1166 					retval = err;
1167 			}
1168 		}
1169 		read_unlock(&tasklist_lock);
1170 		ret = count ? retval : -ESRCH;
1171 	} else if (pid < 0) {
1172 		ret = kill_pgrp_info(sig, info, find_pid(-pid));
1173 	} else {
1174 		ret = kill_pid_info(sig, info, find_pid(pid));
1175 	}
1176 	rcu_read_unlock();
1177 	return ret;
1178 }
1179 
1180 /*
1181  * These are for backward compatibility with the rest of the kernel source.
1182  */
1183 
1184 /*
1185  * These two are the most common entry points.  They send a signal
1186  * just to the specific thread.
1187  */
1188 int
1189 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1190 {
1191 	int ret;
1192 	unsigned long flags;
1193 
1194 	/*
1195 	 * Make sure legacy kernel users don't send in bad values
1196 	 * (normal paths check this in check_kill_permission).
1197 	 */
1198 	if (!valid_signal(sig))
1199 		return -EINVAL;
1200 
1201 	/*
1202 	 * We need the tasklist lock even for the specific
1203 	 * thread case (when we don't need to follow the group
1204 	 * lists) in order to avoid races with "p->sighand"
1205 	 * going away or changing from under us.
1206 	 */
1207 	read_lock(&tasklist_lock);
1208 	spin_lock_irqsave(&p->sighand->siglock, flags);
1209 	ret = specific_send_sig_info(sig, info, p);
1210 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1211 	read_unlock(&tasklist_lock);
1212 	return ret;
1213 }
1214 
1215 #define __si_special(priv) \
1216 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1217 
1218 int
1219 send_sig(int sig, struct task_struct *p, int priv)
1220 {
1221 	return send_sig_info(sig, __si_special(priv), p);
1222 }
1223 
1224 /*
1225  * This is the entry point for "process-wide" signals.
1226  * They will go to an appropriate thread in the thread group.
1227  */
1228 int
1229 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1230 {
1231 	int ret;
1232 	read_lock(&tasklist_lock);
1233 	ret = group_send_sig_info(sig, info, p);
1234 	read_unlock(&tasklist_lock);
1235 	return ret;
1236 }
1237 
1238 void
1239 force_sig(int sig, struct task_struct *p)
1240 {
1241 	force_sig_info(sig, SEND_SIG_PRIV, p);
1242 }
1243 
1244 /*
1245  * When things go south during signal handling, we
1246  * will force a SIGSEGV. And if the signal that caused
1247  * the problem was already a SIGSEGV, we'll want to
1248  * make sure we don't even try to deliver the signal..
1249  */
1250 int
1251 force_sigsegv(int sig, struct task_struct *p)
1252 {
1253 	if (sig == SIGSEGV) {
1254 		unsigned long flags;
1255 		spin_lock_irqsave(&p->sighand->siglock, flags);
1256 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1257 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1258 	}
1259 	force_sig(SIGSEGV, p);
1260 	return 0;
1261 }
1262 
1263 int kill_pgrp(struct pid *pid, int sig, int priv)
1264 {
1265 	return kill_pgrp_info(sig, __si_special(priv), pid);
1266 }
1267 EXPORT_SYMBOL(kill_pgrp);
1268 
1269 int kill_pid(struct pid *pid, int sig, int priv)
1270 {
1271 	return kill_pid_info(sig, __si_special(priv), pid);
1272 }
1273 EXPORT_SYMBOL(kill_pid);
1274 
1275 int
1276 kill_proc(pid_t pid, int sig, int priv)
1277 {
1278 	return kill_proc_info(sig, __si_special(priv), pid);
1279 }
1280 
1281 /*
1282  * These functions support sending signals using preallocated sigqueue
1283  * structures.  This is needed "because realtime applications cannot
1284  * afford to lose notifications of asynchronous events, like timer
1285  * expirations or I/O completions".  In the case of Posix Timers
1286  * we allocate the sigqueue structure from the timer_create.  If this
1287  * allocation fails we are able to report the failure to the application
1288  * with an EAGAIN error.
1289  */
1290 
1291 struct sigqueue *sigqueue_alloc(void)
1292 {
1293 	struct sigqueue *q;
1294 
1295 	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1296 		q->flags |= SIGQUEUE_PREALLOC;
1297 	return(q);
1298 }
1299 
1300 void sigqueue_free(struct sigqueue *q)
1301 {
1302 	unsigned long flags;
1303 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1304 	/*
1305 	 * If the signal is still pending remove it from the
1306 	 * pending queue.
1307 	 */
1308 	if (unlikely(!list_empty(&q->list))) {
1309 		spinlock_t *lock = &current->sighand->siglock;
1310 		read_lock(&tasklist_lock);
1311 		spin_lock_irqsave(lock, flags);
1312 		if (!list_empty(&q->list))
1313 			list_del_init(&q->list);
1314 		spin_unlock_irqrestore(lock, flags);
1315 		read_unlock(&tasklist_lock);
1316 	}
1317 	q->flags &= ~SIGQUEUE_PREALLOC;
1318 	__sigqueue_free(q);
1319 }
1320 
1321 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1322 {
1323 	unsigned long flags;
1324 	int ret = 0;
1325 
1326 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1327 
1328 	/*
1329 	 * The rcu based delayed sighand destroy makes it possible to
1330 	 * run this without tasklist lock held. The task struct itself
1331 	 * cannot go away as create_timer did get_task_struct().
1332 	 *
1333 	 * We return -1, when the task is marked exiting, so
1334 	 * posix_timer_event can redirect it to the group leader
1335 	 */
1336 	rcu_read_lock();
1337 
1338 	if (!likely(lock_task_sighand(p, &flags))) {
1339 		ret = -1;
1340 		goto out_err;
1341 	}
1342 
1343 	if (unlikely(!list_empty(&q->list))) {
1344 		/*
1345 		 * If an SI_TIMER entry is already queue just increment
1346 		 * the overrun count.
1347 		 */
1348 		BUG_ON(q->info.si_code != SI_TIMER);
1349 		q->info.si_overrun++;
1350 		goto out;
1351 	}
1352 	/* Short-circuit ignored signals.  */
1353 	if (sig_ignored(p, sig)) {
1354 		ret = 1;
1355 		goto out;
1356 	}
1357 	/*
1358 	 * Deliver the signal to listening signalfds. This must be called
1359 	 * with the sighand lock held.
1360 	 */
1361 	signalfd_notify(p, sig);
1362 
1363 	list_add_tail(&q->list, &p->pending.list);
1364 	sigaddset(&p->pending.signal, sig);
1365 	if (!sigismember(&p->blocked, sig))
1366 		signal_wake_up(p, sig == SIGKILL);
1367 
1368 out:
1369 	unlock_task_sighand(p, &flags);
1370 out_err:
1371 	rcu_read_unlock();
1372 
1373 	return ret;
1374 }
1375 
1376 int
1377 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1378 {
1379 	unsigned long flags;
1380 	int ret = 0;
1381 
1382 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1383 
1384 	read_lock(&tasklist_lock);
1385 	/* Since it_lock is held, p->sighand cannot be NULL. */
1386 	spin_lock_irqsave(&p->sighand->siglock, flags);
1387 	handle_stop_signal(sig, p);
1388 
1389 	/* Short-circuit ignored signals.  */
1390 	if (sig_ignored(p, sig)) {
1391 		ret = 1;
1392 		goto out;
1393 	}
1394 
1395 	if (unlikely(!list_empty(&q->list))) {
1396 		/*
1397 		 * If an SI_TIMER entry is already queue just increment
1398 		 * the overrun count.  Other uses should not try to
1399 		 * send the signal multiple times.
1400 		 */
1401 		BUG_ON(q->info.si_code != SI_TIMER);
1402 		q->info.si_overrun++;
1403 		goto out;
1404 	}
1405 	/*
1406 	 * Deliver the signal to listening signalfds. This must be called
1407 	 * with the sighand lock held.
1408 	 */
1409 	signalfd_notify(p, sig);
1410 
1411 	/*
1412 	 * Put this signal on the shared-pending queue.
1413 	 * We always use the shared queue for process-wide signals,
1414 	 * to avoid several races.
1415 	 */
1416 	list_add_tail(&q->list, &p->signal->shared_pending.list);
1417 	sigaddset(&p->signal->shared_pending.signal, sig);
1418 
1419 	__group_complete_signal(sig, p);
1420 out:
1421 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1422 	read_unlock(&tasklist_lock);
1423 	return ret;
1424 }
1425 
1426 /*
1427  * Wake up any threads in the parent blocked in wait* syscalls.
1428  */
1429 static inline void __wake_up_parent(struct task_struct *p,
1430 				    struct task_struct *parent)
1431 {
1432 	wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1433 }
1434 
1435 /*
1436  * Let a parent know about the death of a child.
1437  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1438  */
1439 
1440 void do_notify_parent(struct task_struct *tsk, int sig)
1441 {
1442 	struct siginfo info;
1443 	unsigned long flags;
1444 	struct sighand_struct *psig;
1445 
1446 	BUG_ON(sig == -1);
1447 
1448  	/* do_notify_parent_cldstop should have been called instead.  */
1449  	BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1450 
1451 	BUG_ON(!tsk->ptrace &&
1452 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1453 
1454 	info.si_signo = sig;
1455 	info.si_errno = 0;
1456 	info.si_pid = tsk->pid;
1457 	info.si_uid = tsk->uid;
1458 
1459 	/* FIXME: find out whether or not this is supposed to be c*time. */
1460 	info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1461 						       tsk->signal->utime));
1462 	info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1463 						       tsk->signal->stime));
1464 
1465 	info.si_status = tsk->exit_code & 0x7f;
1466 	if (tsk->exit_code & 0x80)
1467 		info.si_code = CLD_DUMPED;
1468 	else if (tsk->exit_code & 0x7f)
1469 		info.si_code = CLD_KILLED;
1470 	else {
1471 		info.si_code = CLD_EXITED;
1472 		info.si_status = tsk->exit_code >> 8;
1473 	}
1474 
1475 	psig = tsk->parent->sighand;
1476 	spin_lock_irqsave(&psig->siglock, flags);
1477 	if (!tsk->ptrace && sig == SIGCHLD &&
1478 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1479 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1480 		/*
1481 		 * We are exiting and our parent doesn't care.  POSIX.1
1482 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1483 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1484 		 * automatically and not left for our parent's wait4 call.
1485 		 * Rather than having the parent do it as a magic kind of
1486 		 * signal handler, we just set this to tell do_exit that we
1487 		 * can be cleaned up without becoming a zombie.  Note that
1488 		 * we still call __wake_up_parent in this case, because a
1489 		 * blocked sys_wait4 might now return -ECHILD.
1490 		 *
1491 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1492 		 * is implementation-defined: we do (if you don't want
1493 		 * it, just use SIG_IGN instead).
1494 		 */
1495 		tsk->exit_signal = -1;
1496 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1497 			sig = 0;
1498 	}
1499 	if (valid_signal(sig) && sig > 0)
1500 		__group_send_sig_info(sig, &info, tsk->parent);
1501 	__wake_up_parent(tsk, tsk->parent);
1502 	spin_unlock_irqrestore(&psig->siglock, flags);
1503 }
1504 
1505 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1506 {
1507 	struct siginfo info;
1508 	unsigned long flags;
1509 	struct task_struct *parent;
1510 	struct sighand_struct *sighand;
1511 
1512 	if (tsk->ptrace & PT_PTRACED)
1513 		parent = tsk->parent;
1514 	else {
1515 		tsk = tsk->group_leader;
1516 		parent = tsk->real_parent;
1517 	}
1518 
1519 	info.si_signo = SIGCHLD;
1520 	info.si_errno = 0;
1521 	info.si_pid = tsk->pid;
1522 	info.si_uid = tsk->uid;
1523 
1524 	/* FIXME: find out whether or not this is supposed to be c*time. */
1525 	info.si_utime = cputime_to_jiffies(tsk->utime);
1526 	info.si_stime = cputime_to_jiffies(tsk->stime);
1527 
1528  	info.si_code = why;
1529  	switch (why) {
1530  	case CLD_CONTINUED:
1531  		info.si_status = SIGCONT;
1532  		break;
1533  	case CLD_STOPPED:
1534  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1535  		break;
1536  	case CLD_TRAPPED:
1537  		info.si_status = tsk->exit_code & 0x7f;
1538  		break;
1539  	default:
1540  		BUG();
1541  	}
1542 
1543 	sighand = parent->sighand;
1544 	spin_lock_irqsave(&sighand->siglock, flags);
1545 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1546 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1547 		__group_send_sig_info(SIGCHLD, &info, parent);
1548 	/*
1549 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1550 	 */
1551 	__wake_up_parent(tsk, parent);
1552 	spin_unlock_irqrestore(&sighand->siglock, flags);
1553 }
1554 
1555 static inline int may_ptrace_stop(void)
1556 {
1557 	if (!likely(current->ptrace & PT_PTRACED))
1558 		return 0;
1559 
1560 	if (unlikely(current->parent == current->real_parent &&
1561 		    (current->ptrace & PT_ATTACHED)))
1562 		return 0;
1563 
1564 	if (unlikely(current->signal == current->parent->signal) &&
1565 	    unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
1566 		return 0;
1567 
1568 	/*
1569 	 * Are we in the middle of do_coredump?
1570 	 * If so and our tracer is also part of the coredump stopping
1571 	 * is a deadlock situation, and pointless because our tracer
1572 	 * is dead so don't allow us to stop.
1573 	 * If SIGKILL was already sent before the caller unlocked
1574 	 * ->siglock we must see ->core_waiters != 0. Otherwise it
1575 	 * is safe to enter schedule().
1576 	 */
1577 	if (unlikely(current->mm->core_waiters) &&
1578 	    unlikely(current->mm == current->parent->mm))
1579 		return 0;
1580 
1581 	return 1;
1582 }
1583 
1584 /*
1585  * This must be called with current->sighand->siglock held.
1586  *
1587  * This should be the path for all ptrace stops.
1588  * We always set current->last_siginfo while stopped here.
1589  * That makes it a way to test a stopped process for
1590  * being ptrace-stopped vs being job-control-stopped.
1591  *
1592  * If we actually decide not to stop at all because the tracer is gone,
1593  * we leave nostop_code in current->exit_code.
1594  */
1595 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1596 {
1597 	/*
1598 	 * If there is a group stop in progress,
1599 	 * we must participate in the bookkeeping.
1600 	 */
1601 	if (current->signal->group_stop_count > 0)
1602 		--current->signal->group_stop_count;
1603 
1604 	current->last_siginfo = info;
1605 	current->exit_code = exit_code;
1606 
1607 	/* Let the debugger run.  */
1608 	set_current_state(TASK_TRACED);
1609 	spin_unlock_irq(&current->sighand->siglock);
1610 	try_to_freeze();
1611 	read_lock(&tasklist_lock);
1612 	if (may_ptrace_stop()) {
1613 		do_notify_parent_cldstop(current, CLD_TRAPPED);
1614 		read_unlock(&tasklist_lock);
1615 		schedule();
1616 	} else {
1617 		/*
1618 		 * By the time we got the lock, our tracer went away.
1619 		 * Don't stop here.
1620 		 */
1621 		read_unlock(&tasklist_lock);
1622 		set_current_state(TASK_RUNNING);
1623 		current->exit_code = nostop_code;
1624 	}
1625 
1626 	/*
1627 	 * We are back.  Now reacquire the siglock before touching
1628 	 * last_siginfo, so that we are sure to have synchronized with
1629 	 * any signal-sending on another CPU that wants to examine it.
1630 	 */
1631 	spin_lock_irq(&current->sighand->siglock);
1632 	current->last_siginfo = NULL;
1633 
1634 	/*
1635 	 * Queued signals ignored us while we were stopped for tracing.
1636 	 * So check for any that we should take before resuming user mode.
1637 	 * This sets TIF_SIGPENDING, but never clears it.
1638 	 */
1639 	recalc_sigpending_tsk(current);
1640 }
1641 
1642 void ptrace_notify(int exit_code)
1643 {
1644 	siginfo_t info;
1645 
1646 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1647 
1648 	memset(&info, 0, sizeof info);
1649 	info.si_signo = SIGTRAP;
1650 	info.si_code = exit_code;
1651 	info.si_pid = current->pid;
1652 	info.si_uid = current->uid;
1653 
1654 	/* Let the debugger run.  */
1655 	spin_lock_irq(&current->sighand->siglock);
1656 	ptrace_stop(exit_code, 0, &info);
1657 	spin_unlock_irq(&current->sighand->siglock);
1658 }
1659 
1660 static void
1661 finish_stop(int stop_count)
1662 {
1663 	/*
1664 	 * If there are no other threads in the group, or if there is
1665 	 * a group stop in progress and we are the last to stop,
1666 	 * report to the parent.  When ptraced, every thread reports itself.
1667 	 */
1668 	if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1669 		read_lock(&tasklist_lock);
1670 		do_notify_parent_cldstop(current, CLD_STOPPED);
1671 		read_unlock(&tasklist_lock);
1672 	}
1673 
1674 	do {
1675 		schedule();
1676 	} while (try_to_freeze());
1677 	/*
1678 	 * Now we don't run again until continued.
1679 	 */
1680 	current->exit_code = 0;
1681 }
1682 
1683 /*
1684  * This performs the stopping for SIGSTOP and other stop signals.
1685  * We have to stop all threads in the thread group.
1686  * Returns nonzero if we've actually stopped and released the siglock.
1687  * Returns zero if we didn't stop and still hold the siglock.
1688  */
1689 static int do_signal_stop(int signr)
1690 {
1691 	struct signal_struct *sig = current->signal;
1692 	int stop_count;
1693 
1694 	if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1695 		return 0;
1696 
1697 	if (sig->group_stop_count > 0) {
1698 		/*
1699 		 * There is a group stop in progress.  We don't need to
1700 		 * start another one.
1701 		 */
1702 		stop_count = --sig->group_stop_count;
1703 	} else {
1704 		/*
1705 		 * There is no group stop already in progress.
1706 		 * We must initiate one now.
1707 		 */
1708 		struct task_struct *t;
1709 
1710 		sig->group_exit_code = signr;
1711 
1712 		stop_count = 0;
1713 		for (t = next_thread(current); t != current; t = next_thread(t))
1714 			/*
1715 			 * Setting state to TASK_STOPPED for a group
1716 			 * stop is always done with the siglock held,
1717 			 * so this check has no races.
1718 			 */
1719 			if (!t->exit_state &&
1720 			    !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1721 				stop_count++;
1722 				signal_wake_up(t, 0);
1723 			}
1724 		sig->group_stop_count = stop_count;
1725 	}
1726 
1727 	if (stop_count == 0)
1728 		sig->flags = SIGNAL_STOP_STOPPED;
1729 	current->exit_code = sig->group_exit_code;
1730 	__set_current_state(TASK_STOPPED);
1731 
1732 	spin_unlock_irq(&current->sighand->siglock);
1733 	finish_stop(stop_count);
1734 	return 1;
1735 }
1736 
1737 /*
1738  * Do appropriate magic when group_stop_count > 0.
1739  * We return nonzero if we stopped, after releasing the siglock.
1740  * We return zero if we still hold the siglock and should look
1741  * for another signal without checking group_stop_count again.
1742  */
1743 static int handle_group_stop(void)
1744 {
1745 	int stop_count;
1746 
1747 	if (current->signal->group_exit_task == current) {
1748 		/*
1749 		 * Group stop is so we can do a core dump,
1750 		 * We are the initiating thread, so get on with it.
1751 		 */
1752 		current->signal->group_exit_task = NULL;
1753 		return 0;
1754 	}
1755 
1756 	if (current->signal->flags & SIGNAL_GROUP_EXIT)
1757 		/*
1758 		 * Group stop is so another thread can do a core dump,
1759 		 * or else we are racing against a death signal.
1760 		 * Just punt the stop so we can get the next signal.
1761 		 */
1762 		return 0;
1763 
1764 	/*
1765 	 * There is a group stop in progress.  We stop
1766 	 * without any associated signal being in our queue.
1767 	 */
1768 	stop_count = --current->signal->group_stop_count;
1769 	if (stop_count == 0)
1770 		current->signal->flags = SIGNAL_STOP_STOPPED;
1771 	current->exit_code = current->signal->group_exit_code;
1772 	set_current_state(TASK_STOPPED);
1773 	spin_unlock_irq(&current->sighand->siglock);
1774 	finish_stop(stop_count);
1775 	return 1;
1776 }
1777 
1778 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1779 			  struct pt_regs *regs, void *cookie)
1780 {
1781 	sigset_t *mask = &current->blocked;
1782 	int signr = 0;
1783 
1784 	try_to_freeze();
1785 
1786 relock:
1787 	spin_lock_irq(&current->sighand->siglock);
1788 	for (;;) {
1789 		struct k_sigaction *ka;
1790 
1791 		if (unlikely(current->signal->group_stop_count > 0) &&
1792 		    handle_group_stop())
1793 			goto relock;
1794 
1795 		signr = dequeue_signal(current, mask, info);
1796 
1797 		if (!signr)
1798 			break; /* will return 0 */
1799 
1800 		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1801 			ptrace_signal_deliver(regs, cookie);
1802 
1803 			/* Let the debugger run.  */
1804 			ptrace_stop(signr, signr, info);
1805 
1806 			/* We're back.  Did the debugger cancel the sig?  */
1807 			signr = current->exit_code;
1808 			if (signr == 0)
1809 				continue;
1810 
1811 			current->exit_code = 0;
1812 
1813 			/* Update the siginfo structure if the signal has
1814 			   changed.  If the debugger wanted something
1815 			   specific in the siginfo structure then it should
1816 			   have updated *info via PTRACE_SETSIGINFO.  */
1817 			if (signr != info->si_signo) {
1818 				info->si_signo = signr;
1819 				info->si_errno = 0;
1820 				info->si_code = SI_USER;
1821 				info->si_pid = current->parent->pid;
1822 				info->si_uid = current->parent->uid;
1823 			}
1824 
1825 			/* If the (new) signal is now blocked, requeue it.  */
1826 			if (sigismember(&current->blocked, signr)) {
1827 				specific_send_sig_info(signr, info, current);
1828 				continue;
1829 			}
1830 		}
1831 
1832 		ka = &current->sighand->action[signr-1];
1833 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1834 			continue;
1835 		if (ka->sa.sa_handler != SIG_DFL) {
1836 			/* Run the handler.  */
1837 			*return_ka = *ka;
1838 
1839 			if (ka->sa.sa_flags & SA_ONESHOT)
1840 				ka->sa.sa_handler = SIG_DFL;
1841 
1842 			break; /* will return non-zero "signr" value */
1843 		}
1844 
1845 		/*
1846 		 * Now we are doing the default action for this signal.
1847 		 */
1848 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1849 			continue;
1850 
1851 		/*
1852 		 * Init of a pid space gets no signals it doesn't want from
1853 		 * within that pid space. It can of course get signals from
1854 		 * its parent pid space.
1855 		 */
1856 		if (current == child_reaper(current))
1857 			continue;
1858 
1859 		if (sig_kernel_stop(signr)) {
1860 			/*
1861 			 * The default action is to stop all threads in
1862 			 * the thread group.  The job control signals
1863 			 * do nothing in an orphaned pgrp, but SIGSTOP
1864 			 * always works.  Note that siglock needs to be
1865 			 * dropped during the call to is_orphaned_pgrp()
1866 			 * because of lock ordering with tasklist_lock.
1867 			 * This allows an intervening SIGCONT to be posted.
1868 			 * We need to check for that and bail out if necessary.
1869 			 */
1870 			if (signr != SIGSTOP) {
1871 				spin_unlock_irq(&current->sighand->siglock);
1872 
1873 				/* signals can be posted during this window */
1874 
1875 				if (is_current_pgrp_orphaned())
1876 					goto relock;
1877 
1878 				spin_lock_irq(&current->sighand->siglock);
1879 			}
1880 
1881 			if (likely(do_signal_stop(signr))) {
1882 				/* It released the siglock.  */
1883 				goto relock;
1884 			}
1885 
1886 			/*
1887 			 * We didn't actually stop, due to a race
1888 			 * with SIGCONT or something like that.
1889 			 */
1890 			continue;
1891 		}
1892 
1893 		spin_unlock_irq(&current->sighand->siglock);
1894 
1895 		/*
1896 		 * Anything else is fatal, maybe with a core dump.
1897 		 */
1898 		current->flags |= PF_SIGNALED;
1899 		if ((signr != SIGKILL) && print_fatal_signals)
1900 			print_fatal_signal(regs, signr);
1901 		if (sig_kernel_coredump(signr)) {
1902 			/*
1903 			 * If it was able to dump core, this kills all
1904 			 * other threads in the group and synchronizes with
1905 			 * their demise.  If we lost the race with another
1906 			 * thread getting here, it set group_exit_code
1907 			 * first and our do_group_exit call below will use
1908 			 * that value and ignore the one we pass it.
1909 			 */
1910 			do_coredump((long)signr, signr, regs);
1911 		}
1912 
1913 		/*
1914 		 * Death signals, no core dump.
1915 		 */
1916 		do_group_exit(signr);
1917 		/* NOTREACHED */
1918 	}
1919 	spin_unlock_irq(&current->sighand->siglock);
1920 	return signr;
1921 }
1922 
1923 EXPORT_SYMBOL(recalc_sigpending);
1924 EXPORT_SYMBOL_GPL(dequeue_signal);
1925 EXPORT_SYMBOL(flush_signals);
1926 EXPORT_SYMBOL(force_sig);
1927 EXPORT_SYMBOL(kill_proc);
1928 EXPORT_SYMBOL(ptrace_notify);
1929 EXPORT_SYMBOL(send_sig);
1930 EXPORT_SYMBOL(send_sig_info);
1931 EXPORT_SYMBOL(sigprocmask);
1932 EXPORT_SYMBOL(block_all_signals);
1933 EXPORT_SYMBOL(unblock_all_signals);
1934 
1935 
1936 /*
1937  * System call entry points.
1938  */
1939 
1940 asmlinkage long sys_restart_syscall(void)
1941 {
1942 	struct restart_block *restart = &current_thread_info()->restart_block;
1943 	return restart->fn(restart);
1944 }
1945 
1946 long do_no_restart_syscall(struct restart_block *param)
1947 {
1948 	return -EINTR;
1949 }
1950 
1951 /*
1952  * We don't need to get the kernel lock - this is all local to this
1953  * particular thread.. (and that's good, because this is _heavily_
1954  * used by various programs)
1955  */
1956 
1957 /*
1958  * This is also useful for kernel threads that want to temporarily
1959  * (or permanently) block certain signals.
1960  *
1961  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1962  * interface happily blocks "unblockable" signals like SIGKILL
1963  * and friends.
1964  */
1965 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1966 {
1967 	int error;
1968 
1969 	spin_lock_irq(&current->sighand->siglock);
1970 	if (oldset)
1971 		*oldset = current->blocked;
1972 
1973 	error = 0;
1974 	switch (how) {
1975 	case SIG_BLOCK:
1976 		sigorsets(&current->blocked, &current->blocked, set);
1977 		break;
1978 	case SIG_UNBLOCK:
1979 		signandsets(&current->blocked, &current->blocked, set);
1980 		break;
1981 	case SIG_SETMASK:
1982 		current->blocked = *set;
1983 		break;
1984 	default:
1985 		error = -EINVAL;
1986 	}
1987 	recalc_sigpending();
1988 	spin_unlock_irq(&current->sighand->siglock);
1989 
1990 	return error;
1991 }
1992 
1993 asmlinkage long
1994 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1995 {
1996 	int error = -EINVAL;
1997 	sigset_t old_set, new_set;
1998 
1999 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2000 	if (sigsetsize != sizeof(sigset_t))
2001 		goto out;
2002 
2003 	if (set) {
2004 		error = -EFAULT;
2005 		if (copy_from_user(&new_set, set, sizeof(*set)))
2006 			goto out;
2007 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2008 
2009 		error = sigprocmask(how, &new_set, &old_set);
2010 		if (error)
2011 			goto out;
2012 		if (oset)
2013 			goto set_old;
2014 	} else if (oset) {
2015 		spin_lock_irq(&current->sighand->siglock);
2016 		old_set = current->blocked;
2017 		spin_unlock_irq(&current->sighand->siglock);
2018 
2019 	set_old:
2020 		error = -EFAULT;
2021 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2022 			goto out;
2023 	}
2024 	error = 0;
2025 out:
2026 	return error;
2027 }
2028 
2029 long do_sigpending(void __user *set, unsigned long sigsetsize)
2030 {
2031 	long error = -EINVAL;
2032 	sigset_t pending;
2033 
2034 	if (sigsetsize > sizeof(sigset_t))
2035 		goto out;
2036 
2037 	spin_lock_irq(&current->sighand->siglock);
2038 	sigorsets(&pending, &current->pending.signal,
2039 		  &current->signal->shared_pending.signal);
2040 	spin_unlock_irq(&current->sighand->siglock);
2041 
2042 	/* Outside the lock because only this thread touches it.  */
2043 	sigandsets(&pending, &current->blocked, &pending);
2044 
2045 	error = -EFAULT;
2046 	if (!copy_to_user(set, &pending, sigsetsize))
2047 		error = 0;
2048 
2049 out:
2050 	return error;
2051 }
2052 
2053 asmlinkage long
2054 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2055 {
2056 	return do_sigpending(set, sigsetsize);
2057 }
2058 
2059 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2060 
2061 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2062 {
2063 	int err;
2064 
2065 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2066 		return -EFAULT;
2067 	if (from->si_code < 0)
2068 		return __copy_to_user(to, from, sizeof(siginfo_t))
2069 			? -EFAULT : 0;
2070 	/*
2071 	 * If you change siginfo_t structure, please be sure
2072 	 * this code is fixed accordingly.
2073 	 * Please remember to update the signalfd_copyinfo() function
2074 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2075 	 * It should never copy any pad contained in the structure
2076 	 * to avoid security leaks, but must copy the generic
2077 	 * 3 ints plus the relevant union member.
2078 	 */
2079 	err = __put_user(from->si_signo, &to->si_signo);
2080 	err |= __put_user(from->si_errno, &to->si_errno);
2081 	err |= __put_user((short)from->si_code, &to->si_code);
2082 	switch (from->si_code & __SI_MASK) {
2083 	case __SI_KILL:
2084 		err |= __put_user(from->si_pid, &to->si_pid);
2085 		err |= __put_user(from->si_uid, &to->si_uid);
2086 		break;
2087 	case __SI_TIMER:
2088 		 err |= __put_user(from->si_tid, &to->si_tid);
2089 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2090 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2091 		break;
2092 	case __SI_POLL:
2093 		err |= __put_user(from->si_band, &to->si_band);
2094 		err |= __put_user(from->si_fd, &to->si_fd);
2095 		break;
2096 	case __SI_FAULT:
2097 		err |= __put_user(from->si_addr, &to->si_addr);
2098 #ifdef __ARCH_SI_TRAPNO
2099 		err |= __put_user(from->si_trapno, &to->si_trapno);
2100 #endif
2101 		break;
2102 	case __SI_CHLD:
2103 		err |= __put_user(from->si_pid, &to->si_pid);
2104 		err |= __put_user(from->si_uid, &to->si_uid);
2105 		err |= __put_user(from->si_status, &to->si_status);
2106 		err |= __put_user(from->si_utime, &to->si_utime);
2107 		err |= __put_user(from->si_stime, &to->si_stime);
2108 		break;
2109 	case __SI_RT: /* This is not generated by the kernel as of now. */
2110 	case __SI_MESGQ: /* But this is */
2111 		err |= __put_user(from->si_pid, &to->si_pid);
2112 		err |= __put_user(from->si_uid, &to->si_uid);
2113 		err |= __put_user(from->si_ptr, &to->si_ptr);
2114 		break;
2115 	default: /* this is just in case for now ... */
2116 		err |= __put_user(from->si_pid, &to->si_pid);
2117 		err |= __put_user(from->si_uid, &to->si_uid);
2118 		break;
2119 	}
2120 	return err;
2121 }
2122 
2123 #endif
2124 
2125 asmlinkage long
2126 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2127 		    siginfo_t __user *uinfo,
2128 		    const struct timespec __user *uts,
2129 		    size_t sigsetsize)
2130 {
2131 	int ret, sig;
2132 	sigset_t these;
2133 	struct timespec ts;
2134 	siginfo_t info;
2135 	long timeout = 0;
2136 
2137 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2138 	if (sigsetsize != sizeof(sigset_t))
2139 		return -EINVAL;
2140 
2141 	if (copy_from_user(&these, uthese, sizeof(these)))
2142 		return -EFAULT;
2143 
2144 	/*
2145 	 * Invert the set of allowed signals to get those we
2146 	 * want to block.
2147 	 */
2148 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2149 	signotset(&these);
2150 
2151 	if (uts) {
2152 		if (copy_from_user(&ts, uts, sizeof(ts)))
2153 			return -EFAULT;
2154 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2155 		    || ts.tv_sec < 0)
2156 			return -EINVAL;
2157 	}
2158 
2159 	spin_lock_irq(&current->sighand->siglock);
2160 	sig = dequeue_signal(current, &these, &info);
2161 	if (!sig) {
2162 		timeout = MAX_SCHEDULE_TIMEOUT;
2163 		if (uts)
2164 			timeout = (timespec_to_jiffies(&ts)
2165 				   + (ts.tv_sec || ts.tv_nsec));
2166 
2167 		if (timeout) {
2168 			/* None ready -- temporarily unblock those we're
2169 			 * interested while we are sleeping in so that we'll
2170 			 * be awakened when they arrive.  */
2171 			current->real_blocked = current->blocked;
2172 			sigandsets(&current->blocked, &current->blocked, &these);
2173 			recalc_sigpending();
2174 			spin_unlock_irq(&current->sighand->siglock);
2175 
2176 			timeout = schedule_timeout_interruptible(timeout);
2177 
2178 			spin_lock_irq(&current->sighand->siglock);
2179 			sig = dequeue_signal(current, &these, &info);
2180 			current->blocked = current->real_blocked;
2181 			siginitset(&current->real_blocked, 0);
2182 			recalc_sigpending();
2183 		}
2184 	}
2185 	spin_unlock_irq(&current->sighand->siglock);
2186 
2187 	if (sig) {
2188 		ret = sig;
2189 		if (uinfo) {
2190 			if (copy_siginfo_to_user(uinfo, &info))
2191 				ret = -EFAULT;
2192 		}
2193 	} else {
2194 		ret = -EAGAIN;
2195 		if (timeout)
2196 			ret = -EINTR;
2197 	}
2198 
2199 	return ret;
2200 }
2201 
2202 asmlinkage long
2203 sys_kill(int pid, int sig)
2204 {
2205 	struct siginfo info;
2206 
2207 	info.si_signo = sig;
2208 	info.si_errno = 0;
2209 	info.si_code = SI_USER;
2210 	info.si_pid = current->tgid;
2211 	info.si_uid = current->uid;
2212 
2213 	return kill_something_info(sig, &info, pid);
2214 }
2215 
2216 static int do_tkill(int tgid, int pid, int sig)
2217 {
2218 	int error;
2219 	struct siginfo info;
2220 	struct task_struct *p;
2221 
2222 	error = -ESRCH;
2223 	info.si_signo = sig;
2224 	info.si_errno = 0;
2225 	info.si_code = SI_TKILL;
2226 	info.si_pid = current->tgid;
2227 	info.si_uid = current->uid;
2228 
2229 	read_lock(&tasklist_lock);
2230 	p = find_task_by_pid(pid);
2231 	if (p && (tgid <= 0 || p->tgid == tgid)) {
2232 		error = check_kill_permission(sig, &info, p);
2233 		/*
2234 		 * The null signal is a permissions and process existence
2235 		 * probe.  No signal is actually delivered.
2236 		 */
2237 		if (!error && sig && p->sighand) {
2238 			spin_lock_irq(&p->sighand->siglock);
2239 			handle_stop_signal(sig, p);
2240 			error = specific_send_sig_info(sig, &info, p);
2241 			spin_unlock_irq(&p->sighand->siglock);
2242 		}
2243 	}
2244 	read_unlock(&tasklist_lock);
2245 
2246 	return error;
2247 }
2248 
2249 /**
2250  *  sys_tgkill - send signal to one specific thread
2251  *  @tgid: the thread group ID of the thread
2252  *  @pid: the PID of the thread
2253  *  @sig: signal to be sent
2254  *
2255  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2256  *  exists but it's not belonging to the target process anymore. This
2257  *  method solves the problem of threads exiting and PIDs getting reused.
2258  */
2259 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2260 {
2261 	/* This is only valid for single tasks */
2262 	if (pid <= 0 || tgid <= 0)
2263 		return -EINVAL;
2264 
2265 	return do_tkill(tgid, pid, sig);
2266 }
2267 
2268 /*
2269  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2270  */
2271 asmlinkage long
2272 sys_tkill(int pid, int sig)
2273 {
2274 	/* This is only valid for single tasks */
2275 	if (pid <= 0)
2276 		return -EINVAL;
2277 
2278 	return do_tkill(0, pid, sig);
2279 }
2280 
2281 asmlinkage long
2282 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2283 {
2284 	siginfo_t info;
2285 
2286 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2287 		return -EFAULT;
2288 
2289 	/* Not even root can pretend to send signals from the kernel.
2290 	   Nor can they impersonate a kill(), which adds source info.  */
2291 	if (info.si_code >= 0)
2292 		return -EPERM;
2293 	info.si_signo = sig;
2294 
2295 	/* POSIX.1b doesn't mention process groups.  */
2296 	return kill_proc_info(sig, &info, pid);
2297 }
2298 
2299 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2300 {
2301 	struct k_sigaction *k;
2302 	sigset_t mask;
2303 
2304 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2305 		return -EINVAL;
2306 
2307 	k = &current->sighand->action[sig-1];
2308 
2309 	spin_lock_irq(&current->sighand->siglock);
2310 	if (signal_pending(current)) {
2311 		/*
2312 		 * If there might be a fatal signal pending on multiple
2313 		 * threads, make sure we take it before changing the action.
2314 		 */
2315 		spin_unlock_irq(&current->sighand->siglock);
2316 		return -ERESTARTNOINTR;
2317 	}
2318 
2319 	if (oact)
2320 		*oact = *k;
2321 
2322 	if (act) {
2323 		sigdelsetmask(&act->sa.sa_mask,
2324 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2325 		*k = *act;
2326 		/*
2327 		 * POSIX 3.3.1.3:
2328 		 *  "Setting a signal action to SIG_IGN for a signal that is
2329 		 *   pending shall cause the pending signal to be discarded,
2330 		 *   whether or not it is blocked."
2331 		 *
2332 		 *  "Setting a signal action to SIG_DFL for a signal that is
2333 		 *   pending and whose default action is to ignore the signal
2334 		 *   (for example, SIGCHLD), shall cause the pending signal to
2335 		 *   be discarded, whether or not it is blocked"
2336 		 */
2337 		if (act->sa.sa_handler == SIG_IGN ||
2338 		   (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2339 			struct task_struct *t = current;
2340 			sigemptyset(&mask);
2341 			sigaddset(&mask, sig);
2342 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2343 			do {
2344 				rm_from_queue_full(&mask, &t->pending);
2345 				recalc_sigpending_and_wake(t);
2346 				t = next_thread(t);
2347 			} while (t != current);
2348 		}
2349 	}
2350 
2351 	spin_unlock_irq(&current->sighand->siglock);
2352 	return 0;
2353 }
2354 
2355 int
2356 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2357 {
2358 	stack_t oss;
2359 	int error;
2360 
2361 	if (uoss) {
2362 		oss.ss_sp = (void __user *) current->sas_ss_sp;
2363 		oss.ss_size = current->sas_ss_size;
2364 		oss.ss_flags = sas_ss_flags(sp);
2365 	}
2366 
2367 	if (uss) {
2368 		void __user *ss_sp;
2369 		size_t ss_size;
2370 		int ss_flags;
2371 
2372 		error = -EFAULT;
2373 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2374 		    || __get_user(ss_sp, &uss->ss_sp)
2375 		    || __get_user(ss_flags, &uss->ss_flags)
2376 		    || __get_user(ss_size, &uss->ss_size))
2377 			goto out;
2378 
2379 		error = -EPERM;
2380 		if (on_sig_stack(sp))
2381 			goto out;
2382 
2383 		error = -EINVAL;
2384 		/*
2385 		 *
2386 		 * Note - this code used to test ss_flags incorrectly
2387 		 *  	  old code may have been written using ss_flags==0
2388 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2389 		 *	  way that worked) - this fix preserves that older
2390 		 *	  mechanism
2391 		 */
2392 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2393 			goto out;
2394 
2395 		if (ss_flags == SS_DISABLE) {
2396 			ss_size = 0;
2397 			ss_sp = NULL;
2398 		} else {
2399 			error = -ENOMEM;
2400 			if (ss_size < MINSIGSTKSZ)
2401 				goto out;
2402 		}
2403 
2404 		current->sas_ss_sp = (unsigned long) ss_sp;
2405 		current->sas_ss_size = ss_size;
2406 	}
2407 
2408 	if (uoss) {
2409 		error = -EFAULT;
2410 		if (copy_to_user(uoss, &oss, sizeof(oss)))
2411 			goto out;
2412 	}
2413 
2414 	error = 0;
2415 out:
2416 	return error;
2417 }
2418 
2419 #ifdef __ARCH_WANT_SYS_SIGPENDING
2420 
2421 asmlinkage long
2422 sys_sigpending(old_sigset_t __user *set)
2423 {
2424 	return do_sigpending(set, sizeof(*set));
2425 }
2426 
2427 #endif
2428 
2429 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2430 /* Some platforms have their own version with special arguments others
2431    support only sys_rt_sigprocmask.  */
2432 
2433 asmlinkage long
2434 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2435 {
2436 	int error;
2437 	old_sigset_t old_set, new_set;
2438 
2439 	if (set) {
2440 		error = -EFAULT;
2441 		if (copy_from_user(&new_set, set, sizeof(*set)))
2442 			goto out;
2443 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2444 
2445 		spin_lock_irq(&current->sighand->siglock);
2446 		old_set = current->blocked.sig[0];
2447 
2448 		error = 0;
2449 		switch (how) {
2450 		default:
2451 			error = -EINVAL;
2452 			break;
2453 		case SIG_BLOCK:
2454 			sigaddsetmask(&current->blocked, new_set);
2455 			break;
2456 		case SIG_UNBLOCK:
2457 			sigdelsetmask(&current->blocked, new_set);
2458 			break;
2459 		case SIG_SETMASK:
2460 			current->blocked.sig[0] = new_set;
2461 			break;
2462 		}
2463 
2464 		recalc_sigpending();
2465 		spin_unlock_irq(&current->sighand->siglock);
2466 		if (error)
2467 			goto out;
2468 		if (oset)
2469 			goto set_old;
2470 	} else if (oset) {
2471 		old_set = current->blocked.sig[0];
2472 	set_old:
2473 		error = -EFAULT;
2474 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2475 			goto out;
2476 	}
2477 	error = 0;
2478 out:
2479 	return error;
2480 }
2481 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2482 
2483 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2484 asmlinkage long
2485 sys_rt_sigaction(int sig,
2486 		 const struct sigaction __user *act,
2487 		 struct sigaction __user *oact,
2488 		 size_t sigsetsize)
2489 {
2490 	struct k_sigaction new_sa, old_sa;
2491 	int ret = -EINVAL;
2492 
2493 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2494 	if (sigsetsize != sizeof(sigset_t))
2495 		goto out;
2496 
2497 	if (act) {
2498 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2499 			return -EFAULT;
2500 	}
2501 
2502 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2503 
2504 	if (!ret && oact) {
2505 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2506 			return -EFAULT;
2507 	}
2508 out:
2509 	return ret;
2510 }
2511 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2512 
2513 #ifdef __ARCH_WANT_SYS_SGETMASK
2514 
2515 /*
2516  * For backwards compatibility.  Functionality superseded by sigprocmask.
2517  */
2518 asmlinkage long
2519 sys_sgetmask(void)
2520 {
2521 	/* SMP safe */
2522 	return current->blocked.sig[0];
2523 }
2524 
2525 asmlinkage long
2526 sys_ssetmask(int newmask)
2527 {
2528 	int old;
2529 
2530 	spin_lock_irq(&current->sighand->siglock);
2531 	old = current->blocked.sig[0];
2532 
2533 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2534 						  sigmask(SIGSTOP)));
2535 	recalc_sigpending();
2536 	spin_unlock_irq(&current->sighand->siglock);
2537 
2538 	return old;
2539 }
2540 #endif /* __ARCH_WANT_SGETMASK */
2541 
2542 #ifdef __ARCH_WANT_SYS_SIGNAL
2543 /*
2544  * For backwards compatibility.  Functionality superseded by sigaction.
2545  */
2546 asmlinkage unsigned long
2547 sys_signal(int sig, __sighandler_t handler)
2548 {
2549 	struct k_sigaction new_sa, old_sa;
2550 	int ret;
2551 
2552 	new_sa.sa.sa_handler = handler;
2553 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2554 	sigemptyset(&new_sa.sa.sa_mask);
2555 
2556 	ret = do_sigaction(sig, &new_sa, &old_sa);
2557 
2558 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2559 }
2560 #endif /* __ARCH_WANT_SYS_SIGNAL */
2561 
2562 #ifdef __ARCH_WANT_SYS_PAUSE
2563 
2564 asmlinkage long
2565 sys_pause(void)
2566 {
2567 	current->state = TASK_INTERRUPTIBLE;
2568 	schedule();
2569 	return -ERESTARTNOHAND;
2570 }
2571 
2572 #endif
2573 
2574 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2575 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2576 {
2577 	sigset_t newset;
2578 
2579 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2580 	if (sigsetsize != sizeof(sigset_t))
2581 		return -EINVAL;
2582 
2583 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2584 		return -EFAULT;
2585 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2586 
2587 	spin_lock_irq(&current->sighand->siglock);
2588 	current->saved_sigmask = current->blocked;
2589 	current->blocked = newset;
2590 	recalc_sigpending();
2591 	spin_unlock_irq(&current->sighand->siglock);
2592 
2593 	current->state = TASK_INTERRUPTIBLE;
2594 	schedule();
2595 	set_thread_flag(TIF_RESTORE_SIGMASK);
2596 	return -ERESTARTNOHAND;
2597 }
2598 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2599 
2600 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2601 {
2602 	return NULL;
2603 }
2604 
2605 void __init signals_init(void)
2606 {
2607 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2608 }
2609