xref: /linux/kernel/signal.c (revision a17627ef8833ac30622a7b39b7be390e1b174405)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
29 
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h"	/* audit_signal_info() */
35 
36 /*
37  * SLAB caches for signal bits.
38  */
39 
40 static struct kmem_cache *sigqueue_cachep;
41 
42 
43 static int sig_ignored(struct task_struct *t, int sig)
44 {
45 	void __user * handler;
46 
47 	/*
48 	 * Tracers always want to know about signals..
49 	 */
50 	if (t->ptrace & PT_PTRACED)
51 		return 0;
52 
53 	/*
54 	 * Blocked signals are never ignored, since the
55 	 * signal handler may change by the time it is
56 	 * unblocked.
57 	 */
58 	if (sigismember(&t->blocked, sig))
59 		return 0;
60 
61 	/* Is it explicitly or implicitly ignored? */
62 	handler = t->sighand->action[sig-1].sa.sa_handler;
63 	return   handler == SIG_IGN ||
64 		(handler == SIG_DFL && sig_kernel_ignore(sig));
65 }
66 
67 /*
68  * Re-calculate pending state from the set of locally pending
69  * signals, globally pending signals, and blocked signals.
70  */
71 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
72 {
73 	unsigned long ready;
74 	long i;
75 
76 	switch (_NSIG_WORDS) {
77 	default:
78 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
79 			ready |= signal->sig[i] &~ blocked->sig[i];
80 		break;
81 
82 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
83 		ready |= signal->sig[2] &~ blocked->sig[2];
84 		ready |= signal->sig[1] &~ blocked->sig[1];
85 		ready |= signal->sig[0] &~ blocked->sig[0];
86 		break;
87 
88 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
89 		ready |= signal->sig[0] &~ blocked->sig[0];
90 		break;
91 
92 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
93 	}
94 	return ready !=	0;
95 }
96 
97 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
98 
99 static int recalc_sigpending_tsk(struct task_struct *t)
100 {
101 	if (t->signal->group_stop_count > 0 ||
102 	    (freezing(t)) ||
103 	    PENDING(&t->pending, &t->blocked) ||
104 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
105 		set_tsk_thread_flag(t, TIF_SIGPENDING);
106 		return 1;
107 	}
108 	/*
109 	 * We must never clear the flag in another thread, or in current
110 	 * when it's possible the current syscall is returning -ERESTART*.
111 	 * So we don't clear it here, and only callers who know they should do.
112 	 */
113 	return 0;
114 }
115 
116 /*
117  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
118  * This is superfluous when called on current, the wakeup is a harmless no-op.
119  */
120 void recalc_sigpending_and_wake(struct task_struct *t)
121 {
122 	if (recalc_sigpending_tsk(t))
123 		signal_wake_up(t, 0);
124 }
125 
126 void recalc_sigpending(void)
127 {
128 	if (!recalc_sigpending_tsk(current))
129 		clear_thread_flag(TIF_SIGPENDING);
130 
131 }
132 
133 /* Given the mask, find the first available signal that should be serviced. */
134 
135 int next_signal(struct sigpending *pending, sigset_t *mask)
136 {
137 	unsigned long i, *s, *m, x;
138 	int sig = 0;
139 
140 	s = pending->signal.sig;
141 	m = mask->sig;
142 	switch (_NSIG_WORDS) {
143 	default:
144 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
145 			if ((x = *s &~ *m) != 0) {
146 				sig = ffz(~x) + i*_NSIG_BPW + 1;
147 				break;
148 			}
149 		break;
150 
151 	case 2: if ((x = s[0] &~ m[0]) != 0)
152 			sig = 1;
153 		else if ((x = s[1] &~ m[1]) != 0)
154 			sig = _NSIG_BPW + 1;
155 		else
156 			break;
157 		sig += ffz(~x);
158 		break;
159 
160 	case 1: if ((x = *s &~ *m) != 0)
161 			sig = ffz(~x) + 1;
162 		break;
163 	}
164 
165 	return sig;
166 }
167 
168 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
169 					 int override_rlimit)
170 {
171 	struct sigqueue *q = NULL;
172 	struct user_struct *user;
173 
174 	/*
175 	 * In order to avoid problems with "switch_user()", we want to make
176 	 * sure that the compiler doesn't re-load "t->user"
177 	 */
178 	user = t->user;
179 	barrier();
180 	atomic_inc(&user->sigpending);
181 	if (override_rlimit ||
182 	    atomic_read(&user->sigpending) <=
183 			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
184 		q = kmem_cache_alloc(sigqueue_cachep, flags);
185 	if (unlikely(q == NULL)) {
186 		atomic_dec(&user->sigpending);
187 	} else {
188 		INIT_LIST_HEAD(&q->list);
189 		q->flags = 0;
190 		q->user = get_uid(user);
191 	}
192 	return(q);
193 }
194 
195 static void __sigqueue_free(struct sigqueue *q)
196 {
197 	if (q->flags & SIGQUEUE_PREALLOC)
198 		return;
199 	atomic_dec(&q->user->sigpending);
200 	free_uid(q->user);
201 	kmem_cache_free(sigqueue_cachep, q);
202 }
203 
204 void flush_sigqueue(struct sigpending *queue)
205 {
206 	struct sigqueue *q;
207 
208 	sigemptyset(&queue->signal);
209 	while (!list_empty(&queue->list)) {
210 		q = list_entry(queue->list.next, struct sigqueue , list);
211 		list_del_init(&q->list);
212 		__sigqueue_free(q);
213 	}
214 }
215 
216 /*
217  * Flush all pending signals for a task.
218  */
219 void flush_signals(struct task_struct *t)
220 {
221 	unsigned long flags;
222 
223 	spin_lock_irqsave(&t->sighand->siglock, flags);
224 	clear_tsk_thread_flag(t,TIF_SIGPENDING);
225 	flush_sigqueue(&t->pending);
226 	flush_sigqueue(&t->signal->shared_pending);
227 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
228 }
229 
230 void ignore_signals(struct task_struct *t)
231 {
232 	int i;
233 
234 	for (i = 0; i < _NSIG; ++i)
235 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
236 
237 	flush_signals(t);
238 }
239 
240 /*
241  * Flush all handlers for a task.
242  */
243 
244 void
245 flush_signal_handlers(struct task_struct *t, int force_default)
246 {
247 	int i;
248 	struct k_sigaction *ka = &t->sighand->action[0];
249 	for (i = _NSIG ; i != 0 ; i--) {
250 		if (force_default || ka->sa.sa_handler != SIG_IGN)
251 			ka->sa.sa_handler = SIG_DFL;
252 		ka->sa.sa_flags = 0;
253 		sigemptyset(&ka->sa.sa_mask);
254 		ka++;
255 	}
256 }
257 
258 
259 /* Notify the system that a driver wants to block all signals for this
260  * process, and wants to be notified if any signals at all were to be
261  * sent/acted upon.  If the notifier routine returns non-zero, then the
262  * signal will be acted upon after all.  If the notifier routine returns 0,
263  * then then signal will be blocked.  Only one block per process is
264  * allowed.  priv is a pointer to private data that the notifier routine
265  * can use to determine if the signal should be blocked or not.  */
266 
267 void
268 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
269 {
270 	unsigned long flags;
271 
272 	spin_lock_irqsave(&current->sighand->siglock, flags);
273 	current->notifier_mask = mask;
274 	current->notifier_data = priv;
275 	current->notifier = notifier;
276 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
277 }
278 
279 /* Notify the system that blocking has ended. */
280 
281 void
282 unblock_all_signals(void)
283 {
284 	unsigned long flags;
285 
286 	spin_lock_irqsave(&current->sighand->siglock, flags);
287 	current->notifier = NULL;
288 	current->notifier_data = NULL;
289 	recalc_sigpending();
290 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
291 }
292 
293 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
294 {
295 	struct sigqueue *q, *first = NULL;
296 	int still_pending = 0;
297 
298 	if (unlikely(!sigismember(&list->signal, sig)))
299 		return 0;
300 
301 	/*
302 	 * Collect the siginfo appropriate to this signal.  Check if
303 	 * there is another siginfo for the same signal.
304 	*/
305 	list_for_each_entry(q, &list->list, list) {
306 		if (q->info.si_signo == sig) {
307 			if (first) {
308 				still_pending = 1;
309 				break;
310 			}
311 			first = q;
312 		}
313 	}
314 	if (first) {
315 		list_del_init(&first->list);
316 		copy_siginfo(info, &first->info);
317 		__sigqueue_free(first);
318 		if (!still_pending)
319 			sigdelset(&list->signal, sig);
320 	} else {
321 
322 		/* Ok, it wasn't in the queue.  This must be
323 		   a fast-pathed signal or we must have been
324 		   out of queue space.  So zero out the info.
325 		 */
326 		sigdelset(&list->signal, sig);
327 		info->si_signo = sig;
328 		info->si_errno = 0;
329 		info->si_code = 0;
330 		info->si_pid = 0;
331 		info->si_uid = 0;
332 	}
333 	return 1;
334 }
335 
336 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
337 			siginfo_t *info)
338 {
339 	int sig = next_signal(pending, mask);
340 
341 	if (sig) {
342 		if (current->notifier) {
343 			if (sigismember(current->notifier_mask, sig)) {
344 				if (!(current->notifier)(current->notifier_data)) {
345 					clear_thread_flag(TIF_SIGPENDING);
346 					return 0;
347 				}
348 			}
349 		}
350 
351 		if (!collect_signal(sig, pending, info))
352 			sig = 0;
353 	}
354 
355 	return sig;
356 }
357 
358 /*
359  * Dequeue a signal and return the element to the caller, which is
360  * expected to free it.
361  *
362  * All callers have to hold the siglock.
363  */
364 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
365 {
366 	int signr = __dequeue_signal(&tsk->pending, mask, info);
367 	if (!signr) {
368 		signr = __dequeue_signal(&tsk->signal->shared_pending,
369 					 mask, info);
370 		/*
371 		 * itimer signal ?
372 		 *
373 		 * itimers are process shared and we restart periodic
374 		 * itimers in the signal delivery path to prevent DoS
375 		 * attacks in the high resolution timer case. This is
376 		 * compliant with the old way of self restarting
377 		 * itimers, as the SIGALRM is a legacy signal and only
378 		 * queued once. Changing the restart behaviour to
379 		 * restart the timer in the signal dequeue path is
380 		 * reducing the timer noise on heavy loaded !highres
381 		 * systems too.
382 		 */
383 		if (unlikely(signr == SIGALRM)) {
384 			struct hrtimer *tmr = &tsk->signal->real_timer;
385 
386 			if (!hrtimer_is_queued(tmr) &&
387 			    tsk->signal->it_real_incr.tv64 != 0) {
388 				hrtimer_forward(tmr, tmr->base->get_time(),
389 						tsk->signal->it_real_incr);
390 				hrtimer_restart(tmr);
391 			}
392 		}
393 	}
394 	if (likely(tsk == current))
395 		recalc_sigpending();
396 	if (signr && unlikely(sig_kernel_stop(signr))) {
397 		/*
398 		 * Set a marker that we have dequeued a stop signal.  Our
399 		 * caller might release the siglock and then the pending
400 		 * stop signal it is about to process is no longer in the
401 		 * pending bitmasks, but must still be cleared by a SIGCONT
402 		 * (and overruled by a SIGKILL).  So those cases clear this
403 		 * shared flag after we've set it.  Note that this flag may
404 		 * remain set after the signal we return is ignored or
405 		 * handled.  That doesn't matter because its only purpose
406 		 * is to alert stop-signal processing code when another
407 		 * processor has come along and cleared the flag.
408 		 */
409 		if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
410 			tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
411 	}
412 	if ( signr &&
413 	     ((info->si_code & __SI_MASK) == __SI_TIMER) &&
414 	     info->si_sys_private){
415 		/*
416 		 * Release the siglock to ensure proper locking order
417 		 * of timer locks outside of siglocks.  Note, we leave
418 		 * irqs disabled here, since the posix-timers code is
419 		 * about to disable them again anyway.
420 		 */
421 		spin_unlock(&tsk->sighand->siglock);
422 		do_schedule_next_timer(info);
423 		spin_lock(&tsk->sighand->siglock);
424 	}
425 	return signr;
426 }
427 
428 /*
429  * Tell a process that it has a new active signal..
430  *
431  * NOTE! we rely on the previous spin_lock to
432  * lock interrupts for us! We can only be called with
433  * "siglock" held, and the local interrupt must
434  * have been disabled when that got acquired!
435  *
436  * No need to set need_resched since signal event passing
437  * goes through ->blocked
438  */
439 void signal_wake_up(struct task_struct *t, int resume)
440 {
441 	unsigned int mask;
442 
443 	set_tsk_thread_flag(t, TIF_SIGPENDING);
444 
445 	/*
446 	 * For SIGKILL, we want to wake it up in the stopped/traced case.
447 	 * We don't check t->state here because there is a race with it
448 	 * executing another processor and just now entering stopped state.
449 	 * By using wake_up_state, we ensure the process will wake up and
450 	 * handle its death signal.
451 	 */
452 	mask = TASK_INTERRUPTIBLE;
453 	if (resume)
454 		mask |= TASK_STOPPED | TASK_TRACED;
455 	if (!wake_up_state(t, mask))
456 		kick_process(t);
457 }
458 
459 /*
460  * Remove signals in mask from the pending set and queue.
461  * Returns 1 if any signals were found.
462  *
463  * All callers must be holding the siglock.
464  *
465  * This version takes a sigset mask and looks at all signals,
466  * not just those in the first mask word.
467  */
468 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
469 {
470 	struct sigqueue *q, *n;
471 	sigset_t m;
472 
473 	sigandsets(&m, mask, &s->signal);
474 	if (sigisemptyset(&m))
475 		return 0;
476 
477 	signandsets(&s->signal, &s->signal, mask);
478 	list_for_each_entry_safe(q, n, &s->list, list) {
479 		if (sigismember(mask, q->info.si_signo)) {
480 			list_del_init(&q->list);
481 			__sigqueue_free(q);
482 		}
483 	}
484 	return 1;
485 }
486 /*
487  * Remove signals in mask from the pending set and queue.
488  * Returns 1 if any signals were found.
489  *
490  * All callers must be holding the siglock.
491  */
492 static int rm_from_queue(unsigned long mask, struct sigpending *s)
493 {
494 	struct sigqueue *q, *n;
495 
496 	if (!sigtestsetmask(&s->signal, mask))
497 		return 0;
498 
499 	sigdelsetmask(&s->signal, mask);
500 	list_for_each_entry_safe(q, n, &s->list, list) {
501 		if (q->info.si_signo < SIGRTMIN &&
502 		    (mask & sigmask(q->info.si_signo))) {
503 			list_del_init(&q->list);
504 			__sigqueue_free(q);
505 		}
506 	}
507 	return 1;
508 }
509 
510 /*
511  * Bad permissions for sending the signal
512  */
513 static int check_kill_permission(int sig, struct siginfo *info,
514 				 struct task_struct *t)
515 {
516 	int error = -EINVAL;
517 	if (!valid_signal(sig))
518 		return error;
519 
520 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
521 	if (error)
522 		return error;
523 
524 	error = -EPERM;
525 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
526 	    && ((sig != SIGCONT) ||
527 		(process_session(current) != process_session(t)))
528 	    && (current->euid ^ t->suid) && (current->euid ^ t->uid)
529 	    && (current->uid ^ t->suid) && (current->uid ^ t->uid)
530 	    && !capable(CAP_KILL))
531 		return error;
532 
533 	return security_task_kill(t, info, sig, 0);
534 }
535 
536 /* forward decl */
537 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
538 
539 /*
540  * Handle magic process-wide effects of stop/continue signals.
541  * Unlike the signal actions, these happen immediately at signal-generation
542  * time regardless of blocking, ignoring, or handling.  This does the
543  * actual continuing for SIGCONT, but not the actual stopping for stop
544  * signals.  The process stop is done as a signal action for SIG_DFL.
545  */
546 static void handle_stop_signal(int sig, struct task_struct *p)
547 {
548 	struct task_struct *t;
549 
550 	if (p->signal->flags & SIGNAL_GROUP_EXIT)
551 		/*
552 		 * The process is in the middle of dying already.
553 		 */
554 		return;
555 
556 	if (sig_kernel_stop(sig)) {
557 		/*
558 		 * This is a stop signal.  Remove SIGCONT from all queues.
559 		 */
560 		rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
561 		t = p;
562 		do {
563 			rm_from_queue(sigmask(SIGCONT), &t->pending);
564 			t = next_thread(t);
565 		} while (t != p);
566 	} else if (sig == SIGCONT) {
567 		/*
568 		 * Remove all stop signals from all queues,
569 		 * and wake all threads.
570 		 */
571 		if (unlikely(p->signal->group_stop_count > 0)) {
572 			/*
573 			 * There was a group stop in progress.  We'll
574 			 * pretend it finished before we got here.  We are
575 			 * obliged to report it to the parent: if the
576 			 * SIGSTOP happened "after" this SIGCONT, then it
577 			 * would have cleared this pending SIGCONT.  If it
578 			 * happened "before" this SIGCONT, then the parent
579 			 * got the SIGCHLD about the stop finishing before
580 			 * the continue happened.  We do the notification
581 			 * now, and it's as if the stop had finished and
582 			 * the SIGCHLD was pending on entry to this kill.
583 			 */
584 			p->signal->group_stop_count = 0;
585 			p->signal->flags = SIGNAL_STOP_CONTINUED;
586 			spin_unlock(&p->sighand->siglock);
587 			do_notify_parent_cldstop(p, CLD_STOPPED);
588 			spin_lock(&p->sighand->siglock);
589 		}
590 		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
591 		t = p;
592 		do {
593 			unsigned int state;
594 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
595 
596 			/*
597 			 * If there is a handler for SIGCONT, we must make
598 			 * sure that no thread returns to user mode before
599 			 * we post the signal, in case it was the only
600 			 * thread eligible to run the signal handler--then
601 			 * it must not do anything between resuming and
602 			 * running the handler.  With the TIF_SIGPENDING
603 			 * flag set, the thread will pause and acquire the
604 			 * siglock that we hold now and until we've queued
605 			 * the pending signal.
606 			 *
607 			 * Wake up the stopped thread _after_ setting
608 			 * TIF_SIGPENDING
609 			 */
610 			state = TASK_STOPPED;
611 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
612 				set_tsk_thread_flag(t, TIF_SIGPENDING);
613 				state |= TASK_INTERRUPTIBLE;
614 			}
615 			wake_up_state(t, state);
616 
617 			t = next_thread(t);
618 		} while (t != p);
619 
620 		if (p->signal->flags & SIGNAL_STOP_STOPPED) {
621 			/*
622 			 * We were in fact stopped, and are now continued.
623 			 * Notify the parent with CLD_CONTINUED.
624 			 */
625 			p->signal->flags = SIGNAL_STOP_CONTINUED;
626 			p->signal->group_exit_code = 0;
627 			spin_unlock(&p->sighand->siglock);
628 			do_notify_parent_cldstop(p, CLD_CONTINUED);
629 			spin_lock(&p->sighand->siglock);
630 		} else {
631 			/*
632 			 * We are not stopped, but there could be a stop
633 			 * signal in the middle of being processed after
634 			 * being removed from the queue.  Clear that too.
635 			 */
636 			p->signal->flags = 0;
637 		}
638 	} else if (sig == SIGKILL) {
639 		/*
640 		 * Make sure that any pending stop signal already dequeued
641 		 * is undone by the wakeup for SIGKILL.
642 		 */
643 		p->signal->flags = 0;
644 	}
645 }
646 
647 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
648 			struct sigpending *signals)
649 {
650 	struct sigqueue * q = NULL;
651 	int ret = 0;
652 
653 	/*
654 	 * Deliver the signal to listening signalfds. This must be called
655 	 * with the sighand lock held.
656 	 */
657 	signalfd_notify(t, sig);
658 
659 	/*
660 	 * fast-pathed signals for kernel-internal things like SIGSTOP
661 	 * or SIGKILL.
662 	 */
663 	if (info == SEND_SIG_FORCED)
664 		goto out_set;
665 
666 	/* Real-time signals must be queued if sent by sigqueue, or
667 	   some other real-time mechanism.  It is implementation
668 	   defined whether kill() does so.  We attempt to do so, on
669 	   the principle of least surprise, but since kill is not
670 	   allowed to fail with EAGAIN when low on memory we just
671 	   make sure at least one signal gets delivered and don't
672 	   pass on the info struct.  */
673 
674 	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
675 					     (is_si_special(info) ||
676 					      info->si_code >= 0)));
677 	if (q) {
678 		list_add_tail(&q->list, &signals->list);
679 		switch ((unsigned long) info) {
680 		case (unsigned long) SEND_SIG_NOINFO:
681 			q->info.si_signo = sig;
682 			q->info.si_errno = 0;
683 			q->info.si_code = SI_USER;
684 			q->info.si_pid = current->pid;
685 			q->info.si_uid = current->uid;
686 			break;
687 		case (unsigned long) SEND_SIG_PRIV:
688 			q->info.si_signo = sig;
689 			q->info.si_errno = 0;
690 			q->info.si_code = SI_KERNEL;
691 			q->info.si_pid = 0;
692 			q->info.si_uid = 0;
693 			break;
694 		default:
695 			copy_siginfo(&q->info, info);
696 			break;
697 		}
698 	} else if (!is_si_special(info)) {
699 		if (sig >= SIGRTMIN && info->si_code != SI_USER)
700 		/*
701 		 * Queue overflow, abort.  We may abort if the signal was rt
702 		 * and sent by user using something other than kill().
703 		 */
704 			return -EAGAIN;
705 	}
706 
707 out_set:
708 	sigaddset(&signals->signal, sig);
709 	return ret;
710 }
711 
712 #define LEGACY_QUEUE(sigptr, sig) \
713 	(((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
714 
715 
716 static int
717 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
718 {
719 	int ret = 0;
720 
721 	BUG_ON(!irqs_disabled());
722 	assert_spin_locked(&t->sighand->siglock);
723 
724 	/* Short-circuit ignored signals.  */
725 	if (sig_ignored(t, sig))
726 		goto out;
727 
728 	/* Support queueing exactly one non-rt signal, so that we
729 	   can get more detailed information about the cause of
730 	   the signal. */
731 	if (LEGACY_QUEUE(&t->pending, sig))
732 		goto out;
733 
734 	ret = send_signal(sig, info, t, &t->pending);
735 	if (!ret && !sigismember(&t->blocked, sig))
736 		signal_wake_up(t, sig == SIGKILL);
737 out:
738 	return ret;
739 }
740 
741 /*
742  * Force a signal that the process can't ignore: if necessary
743  * we unblock the signal and change any SIG_IGN to SIG_DFL.
744  *
745  * Note: If we unblock the signal, we always reset it to SIG_DFL,
746  * since we do not want to have a signal handler that was blocked
747  * be invoked when user space had explicitly blocked it.
748  *
749  * We don't want to have recursive SIGSEGV's etc, for example.
750  */
751 int
752 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
753 {
754 	unsigned long int flags;
755 	int ret, blocked, ignored;
756 	struct k_sigaction *action;
757 
758 	spin_lock_irqsave(&t->sighand->siglock, flags);
759 	action = &t->sighand->action[sig-1];
760 	ignored = action->sa.sa_handler == SIG_IGN;
761 	blocked = sigismember(&t->blocked, sig);
762 	if (blocked || ignored) {
763 		action->sa.sa_handler = SIG_DFL;
764 		if (blocked) {
765 			sigdelset(&t->blocked, sig);
766 			recalc_sigpending_and_wake(t);
767 		}
768 	}
769 	ret = specific_send_sig_info(sig, info, t);
770 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
771 
772 	return ret;
773 }
774 
775 void
776 force_sig_specific(int sig, struct task_struct *t)
777 {
778 	force_sig_info(sig, SEND_SIG_FORCED, t);
779 }
780 
781 /*
782  * Test if P wants to take SIG.  After we've checked all threads with this,
783  * it's equivalent to finding no threads not blocking SIG.  Any threads not
784  * blocking SIG were ruled out because they are not running and already
785  * have pending signals.  Such threads will dequeue from the shared queue
786  * as soon as they're available, so putting the signal on the shared queue
787  * will be equivalent to sending it to one such thread.
788  */
789 static inline int wants_signal(int sig, struct task_struct *p)
790 {
791 	if (sigismember(&p->blocked, sig))
792 		return 0;
793 	if (p->flags & PF_EXITING)
794 		return 0;
795 	if (sig == SIGKILL)
796 		return 1;
797 	if (p->state & (TASK_STOPPED | TASK_TRACED))
798 		return 0;
799 	return task_curr(p) || !signal_pending(p);
800 }
801 
802 static void
803 __group_complete_signal(int sig, struct task_struct *p)
804 {
805 	struct task_struct *t;
806 
807 	/*
808 	 * Now find a thread we can wake up to take the signal off the queue.
809 	 *
810 	 * If the main thread wants the signal, it gets first crack.
811 	 * Probably the least surprising to the average bear.
812 	 */
813 	if (wants_signal(sig, p))
814 		t = p;
815 	else if (thread_group_empty(p))
816 		/*
817 		 * There is just one thread and it does not need to be woken.
818 		 * It will dequeue unblocked signals before it runs again.
819 		 */
820 		return;
821 	else {
822 		/*
823 		 * Otherwise try to find a suitable thread.
824 		 */
825 		t = p->signal->curr_target;
826 		if (t == NULL)
827 			/* restart balancing at this thread */
828 			t = p->signal->curr_target = p;
829 
830 		while (!wants_signal(sig, t)) {
831 			t = next_thread(t);
832 			if (t == p->signal->curr_target)
833 				/*
834 				 * No thread needs to be woken.
835 				 * Any eligible threads will see
836 				 * the signal in the queue soon.
837 				 */
838 				return;
839 		}
840 		p->signal->curr_target = t;
841 	}
842 
843 	/*
844 	 * Found a killable thread.  If the signal will be fatal,
845 	 * then start taking the whole group down immediately.
846 	 */
847 	if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
848 	    !sigismember(&t->real_blocked, sig) &&
849 	    (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
850 		/*
851 		 * This signal will be fatal to the whole group.
852 		 */
853 		if (!sig_kernel_coredump(sig)) {
854 			/*
855 			 * Start a group exit and wake everybody up.
856 			 * This way we don't have other threads
857 			 * running and doing things after a slower
858 			 * thread has the fatal signal pending.
859 			 */
860 			p->signal->flags = SIGNAL_GROUP_EXIT;
861 			p->signal->group_exit_code = sig;
862 			p->signal->group_stop_count = 0;
863 			t = p;
864 			do {
865 				sigaddset(&t->pending.signal, SIGKILL);
866 				signal_wake_up(t, 1);
867 				t = next_thread(t);
868 			} while (t != p);
869 			return;
870 		}
871 
872 		/*
873 		 * There will be a core dump.  We make all threads other
874 		 * than the chosen one go into a group stop so that nothing
875 		 * happens until it gets scheduled, takes the signal off
876 		 * the shared queue, and does the core dump.  This is a
877 		 * little more complicated than strictly necessary, but it
878 		 * keeps the signal state that winds up in the core dump
879 		 * unchanged from the death state, e.g. which thread had
880 		 * the core-dump signal unblocked.
881 		 */
882 		rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
883 		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
884 		p->signal->group_stop_count = 0;
885 		p->signal->group_exit_task = t;
886 		t = p;
887 		do {
888 			p->signal->group_stop_count++;
889 			signal_wake_up(t, 0);
890 			t = next_thread(t);
891 		} while (t != p);
892 		wake_up_process(p->signal->group_exit_task);
893 		return;
894 	}
895 
896 	/*
897 	 * The signal is already in the shared-pending queue.
898 	 * Tell the chosen thread to wake up and dequeue it.
899 	 */
900 	signal_wake_up(t, sig == SIGKILL);
901 	return;
902 }
903 
904 int
905 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
906 {
907 	int ret = 0;
908 
909 	assert_spin_locked(&p->sighand->siglock);
910 	handle_stop_signal(sig, p);
911 
912 	/* Short-circuit ignored signals.  */
913 	if (sig_ignored(p, sig))
914 		return ret;
915 
916 	if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
917 		/* This is a non-RT signal and we already have one queued.  */
918 		return ret;
919 
920 	/*
921 	 * Put this signal on the shared-pending queue, or fail with EAGAIN.
922 	 * We always use the shared queue for process-wide signals,
923 	 * to avoid several races.
924 	 */
925 	ret = send_signal(sig, info, p, &p->signal->shared_pending);
926 	if (unlikely(ret))
927 		return ret;
928 
929 	__group_complete_signal(sig, p);
930 	return 0;
931 }
932 
933 /*
934  * Nuke all other threads in the group.
935  */
936 void zap_other_threads(struct task_struct *p)
937 {
938 	struct task_struct *t;
939 
940 	p->signal->flags = SIGNAL_GROUP_EXIT;
941 	p->signal->group_stop_count = 0;
942 
943 	if (thread_group_empty(p))
944 		return;
945 
946 	for (t = next_thread(p); t != p; t = next_thread(t)) {
947 		/*
948 		 * Don't bother with already dead threads
949 		 */
950 		if (t->exit_state)
951 			continue;
952 
953 		/* SIGKILL will be handled before any pending SIGSTOP */
954 		sigaddset(&t->pending.signal, SIGKILL);
955 		signal_wake_up(t, 1);
956 	}
957 }
958 
959 /*
960  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
961  */
962 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
963 {
964 	struct sighand_struct *sighand;
965 
966 	for (;;) {
967 		sighand = rcu_dereference(tsk->sighand);
968 		if (unlikely(sighand == NULL))
969 			break;
970 
971 		spin_lock_irqsave(&sighand->siglock, *flags);
972 		if (likely(sighand == tsk->sighand))
973 			break;
974 		spin_unlock_irqrestore(&sighand->siglock, *flags);
975 	}
976 
977 	return sighand;
978 }
979 
980 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
981 {
982 	unsigned long flags;
983 	int ret;
984 
985 	ret = check_kill_permission(sig, info, p);
986 
987 	if (!ret && sig) {
988 		ret = -ESRCH;
989 		if (lock_task_sighand(p, &flags)) {
990 			ret = __group_send_sig_info(sig, info, p);
991 			unlock_task_sighand(p, &flags);
992 		}
993 	}
994 
995 	return ret;
996 }
997 
998 /*
999  * kill_pgrp_info() sends a signal to a process group: this is what the tty
1000  * control characters do (^C, ^Z etc)
1001  */
1002 
1003 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1004 {
1005 	struct task_struct *p = NULL;
1006 	int retval, success;
1007 
1008 	success = 0;
1009 	retval = -ESRCH;
1010 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1011 		int err = group_send_sig_info(sig, info, p);
1012 		success |= !err;
1013 		retval = err;
1014 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1015 	return success ? 0 : retval;
1016 }
1017 
1018 int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1019 {
1020 	int retval;
1021 
1022 	read_lock(&tasklist_lock);
1023 	retval = __kill_pgrp_info(sig, info, pgrp);
1024 	read_unlock(&tasklist_lock);
1025 
1026 	return retval;
1027 }
1028 
1029 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1030 {
1031 	int error;
1032 	struct task_struct *p;
1033 
1034 	rcu_read_lock();
1035 	if (unlikely(sig_needs_tasklist(sig)))
1036 		read_lock(&tasklist_lock);
1037 
1038 	p = pid_task(pid, PIDTYPE_PID);
1039 	error = -ESRCH;
1040 	if (p)
1041 		error = group_send_sig_info(sig, info, p);
1042 
1043 	if (unlikely(sig_needs_tasklist(sig)))
1044 		read_unlock(&tasklist_lock);
1045 	rcu_read_unlock();
1046 	return error;
1047 }
1048 
1049 int
1050 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1051 {
1052 	int error;
1053 	rcu_read_lock();
1054 	error = kill_pid_info(sig, info, find_pid(pid));
1055 	rcu_read_unlock();
1056 	return error;
1057 }
1058 
1059 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1060 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1061 		      uid_t uid, uid_t euid, u32 secid)
1062 {
1063 	int ret = -EINVAL;
1064 	struct task_struct *p;
1065 
1066 	if (!valid_signal(sig))
1067 		return ret;
1068 
1069 	read_lock(&tasklist_lock);
1070 	p = pid_task(pid, PIDTYPE_PID);
1071 	if (!p) {
1072 		ret = -ESRCH;
1073 		goto out_unlock;
1074 	}
1075 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1076 	    && (euid != p->suid) && (euid != p->uid)
1077 	    && (uid != p->suid) && (uid != p->uid)) {
1078 		ret = -EPERM;
1079 		goto out_unlock;
1080 	}
1081 	ret = security_task_kill(p, info, sig, secid);
1082 	if (ret)
1083 		goto out_unlock;
1084 	if (sig && p->sighand) {
1085 		unsigned long flags;
1086 		spin_lock_irqsave(&p->sighand->siglock, flags);
1087 		ret = __group_send_sig_info(sig, info, p);
1088 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1089 	}
1090 out_unlock:
1091 	read_unlock(&tasklist_lock);
1092 	return ret;
1093 }
1094 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1095 
1096 /*
1097  * kill_something_info() interprets pid in interesting ways just like kill(2).
1098  *
1099  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1100  * is probably wrong.  Should make it like BSD or SYSV.
1101  */
1102 
1103 static int kill_something_info(int sig, struct siginfo *info, int pid)
1104 {
1105 	int ret;
1106 	rcu_read_lock();
1107 	if (!pid) {
1108 		ret = kill_pgrp_info(sig, info, task_pgrp(current));
1109 	} else if (pid == -1) {
1110 		int retval = 0, count = 0;
1111 		struct task_struct * p;
1112 
1113 		read_lock(&tasklist_lock);
1114 		for_each_process(p) {
1115 			if (p->pid > 1 && p->tgid != current->tgid) {
1116 				int err = group_send_sig_info(sig, info, p);
1117 				++count;
1118 				if (err != -EPERM)
1119 					retval = err;
1120 			}
1121 		}
1122 		read_unlock(&tasklist_lock);
1123 		ret = count ? retval : -ESRCH;
1124 	} else if (pid < 0) {
1125 		ret = kill_pgrp_info(sig, info, find_pid(-pid));
1126 	} else {
1127 		ret = kill_pid_info(sig, info, find_pid(pid));
1128 	}
1129 	rcu_read_unlock();
1130 	return ret;
1131 }
1132 
1133 /*
1134  * These are for backward compatibility with the rest of the kernel source.
1135  */
1136 
1137 /*
1138  * These two are the most common entry points.  They send a signal
1139  * just to the specific thread.
1140  */
1141 int
1142 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1143 {
1144 	int ret;
1145 	unsigned long flags;
1146 
1147 	/*
1148 	 * Make sure legacy kernel users don't send in bad values
1149 	 * (normal paths check this in check_kill_permission).
1150 	 */
1151 	if (!valid_signal(sig))
1152 		return -EINVAL;
1153 
1154 	/*
1155 	 * We need the tasklist lock even for the specific
1156 	 * thread case (when we don't need to follow the group
1157 	 * lists) in order to avoid races with "p->sighand"
1158 	 * going away or changing from under us.
1159 	 */
1160 	read_lock(&tasklist_lock);
1161 	spin_lock_irqsave(&p->sighand->siglock, flags);
1162 	ret = specific_send_sig_info(sig, info, p);
1163 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1164 	read_unlock(&tasklist_lock);
1165 	return ret;
1166 }
1167 
1168 #define __si_special(priv) \
1169 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1170 
1171 int
1172 send_sig(int sig, struct task_struct *p, int priv)
1173 {
1174 	return send_sig_info(sig, __si_special(priv), p);
1175 }
1176 
1177 /*
1178  * This is the entry point for "process-wide" signals.
1179  * They will go to an appropriate thread in the thread group.
1180  */
1181 int
1182 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1183 {
1184 	int ret;
1185 	read_lock(&tasklist_lock);
1186 	ret = group_send_sig_info(sig, info, p);
1187 	read_unlock(&tasklist_lock);
1188 	return ret;
1189 }
1190 
1191 void
1192 force_sig(int sig, struct task_struct *p)
1193 {
1194 	force_sig_info(sig, SEND_SIG_PRIV, p);
1195 }
1196 
1197 /*
1198  * When things go south during signal handling, we
1199  * will force a SIGSEGV. And if the signal that caused
1200  * the problem was already a SIGSEGV, we'll want to
1201  * make sure we don't even try to deliver the signal..
1202  */
1203 int
1204 force_sigsegv(int sig, struct task_struct *p)
1205 {
1206 	if (sig == SIGSEGV) {
1207 		unsigned long flags;
1208 		spin_lock_irqsave(&p->sighand->siglock, flags);
1209 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1210 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1211 	}
1212 	force_sig(SIGSEGV, p);
1213 	return 0;
1214 }
1215 
1216 int kill_pgrp(struct pid *pid, int sig, int priv)
1217 {
1218 	return kill_pgrp_info(sig, __si_special(priv), pid);
1219 }
1220 EXPORT_SYMBOL(kill_pgrp);
1221 
1222 int kill_pid(struct pid *pid, int sig, int priv)
1223 {
1224 	return kill_pid_info(sig, __si_special(priv), pid);
1225 }
1226 EXPORT_SYMBOL(kill_pid);
1227 
1228 int
1229 kill_proc(pid_t pid, int sig, int priv)
1230 {
1231 	return kill_proc_info(sig, __si_special(priv), pid);
1232 }
1233 
1234 /*
1235  * These functions support sending signals using preallocated sigqueue
1236  * structures.  This is needed "because realtime applications cannot
1237  * afford to lose notifications of asynchronous events, like timer
1238  * expirations or I/O completions".  In the case of Posix Timers
1239  * we allocate the sigqueue structure from the timer_create.  If this
1240  * allocation fails we are able to report the failure to the application
1241  * with an EAGAIN error.
1242  */
1243 
1244 struct sigqueue *sigqueue_alloc(void)
1245 {
1246 	struct sigqueue *q;
1247 
1248 	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1249 		q->flags |= SIGQUEUE_PREALLOC;
1250 	return(q);
1251 }
1252 
1253 void sigqueue_free(struct sigqueue *q)
1254 {
1255 	unsigned long flags;
1256 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1257 	/*
1258 	 * If the signal is still pending remove it from the
1259 	 * pending queue.
1260 	 */
1261 	if (unlikely(!list_empty(&q->list))) {
1262 		spinlock_t *lock = &current->sighand->siglock;
1263 		read_lock(&tasklist_lock);
1264 		spin_lock_irqsave(lock, flags);
1265 		if (!list_empty(&q->list))
1266 			list_del_init(&q->list);
1267 		spin_unlock_irqrestore(lock, flags);
1268 		read_unlock(&tasklist_lock);
1269 	}
1270 	q->flags &= ~SIGQUEUE_PREALLOC;
1271 	__sigqueue_free(q);
1272 }
1273 
1274 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1275 {
1276 	unsigned long flags;
1277 	int ret = 0;
1278 
1279 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1280 
1281 	/*
1282 	 * The rcu based delayed sighand destroy makes it possible to
1283 	 * run this without tasklist lock held. The task struct itself
1284 	 * cannot go away as create_timer did get_task_struct().
1285 	 *
1286 	 * We return -1, when the task is marked exiting, so
1287 	 * posix_timer_event can redirect it to the group leader
1288 	 */
1289 	rcu_read_lock();
1290 
1291 	if (!likely(lock_task_sighand(p, &flags))) {
1292 		ret = -1;
1293 		goto out_err;
1294 	}
1295 
1296 	if (unlikely(!list_empty(&q->list))) {
1297 		/*
1298 		 * If an SI_TIMER entry is already queue just increment
1299 		 * the overrun count.
1300 		 */
1301 		BUG_ON(q->info.si_code != SI_TIMER);
1302 		q->info.si_overrun++;
1303 		goto out;
1304 	}
1305 	/* Short-circuit ignored signals.  */
1306 	if (sig_ignored(p, sig)) {
1307 		ret = 1;
1308 		goto out;
1309 	}
1310 	/*
1311 	 * Deliver the signal to listening signalfds. This must be called
1312 	 * with the sighand lock held.
1313 	 */
1314 	signalfd_notify(p, sig);
1315 
1316 	list_add_tail(&q->list, &p->pending.list);
1317 	sigaddset(&p->pending.signal, sig);
1318 	if (!sigismember(&p->blocked, sig))
1319 		signal_wake_up(p, sig == SIGKILL);
1320 
1321 out:
1322 	unlock_task_sighand(p, &flags);
1323 out_err:
1324 	rcu_read_unlock();
1325 
1326 	return ret;
1327 }
1328 
1329 int
1330 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1331 {
1332 	unsigned long flags;
1333 	int ret = 0;
1334 
1335 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1336 
1337 	read_lock(&tasklist_lock);
1338 	/* Since it_lock is held, p->sighand cannot be NULL. */
1339 	spin_lock_irqsave(&p->sighand->siglock, flags);
1340 	handle_stop_signal(sig, p);
1341 
1342 	/* Short-circuit ignored signals.  */
1343 	if (sig_ignored(p, sig)) {
1344 		ret = 1;
1345 		goto out;
1346 	}
1347 
1348 	if (unlikely(!list_empty(&q->list))) {
1349 		/*
1350 		 * If an SI_TIMER entry is already queue just increment
1351 		 * the overrun count.  Other uses should not try to
1352 		 * send the signal multiple times.
1353 		 */
1354 		BUG_ON(q->info.si_code != SI_TIMER);
1355 		q->info.si_overrun++;
1356 		goto out;
1357 	}
1358 	/*
1359 	 * Deliver the signal to listening signalfds. This must be called
1360 	 * with the sighand lock held.
1361 	 */
1362 	signalfd_notify(p, sig);
1363 
1364 	/*
1365 	 * Put this signal on the shared-pending queue.
1366 	 * We always use the shared queue for process-wide signals,
1367 	 * to avoid several races.
1368 	 */
1369 	list_add_tail(&q->list, &p->signal->shared_pending.list);
1370 	sigaddset(&p->signal->shared_pending.signal, sig);
1371 
1372 	__group_complete_signal(sig, p);
1373 out:
1374 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1375 	read_unlock(&tasklist_lock);
1376 	return ret;
1377 }
1378 
1379 /*
1380  * Wake up any threads in the parent blocked in wait* syscalls.
1381  */
1382 static inline void __wake_up_parent(struct task_struct *p,
1383 				    struct task_struct *parent)
1384 {
1385 	wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1386 }
1387 
1388 /*
1389  * Let a parent know about the death of a child.
1390  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1391  */
1392 
1393 void do_notify_parent(struct task_struct *tsk, int sig)
1394 {
1395 	struct siginfo info;
1396 	unsigned long flags;
1397 	struct sighand_struct *psig;
1398 
1399 	BUG_ON(sig == -1);
1400 
1401  	/* do_notify_parent_cldstop should have been called instead.  */
1402  	BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1403 
1404 	BUG_ON(!tsk->ptrace &&
1405 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1406 
1407 	info.si_signo = sig;
1408 	info.si_errno = 0;
1409 	info.si_pid = tsk->pid;
1410 	info.si_uid = tsk->uid;
1411 
1412 	/* FIXME: find out whether or not this is supposed to be c*time. */
1413 	info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1414 						       tsk->signal->utime));
1415 	info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1416 						       tsk->signal->stime));
1417 
1418 	info.si_status = tsk->exit_code & 0x7f;
1419 	if (tsk->exit_code & 0x80)
1420 		info.si_code = CLD_DUMPED;
1421 	else if (tsk->exit_code & 0x7f)
1422 		info.si_code = CLD_KILLED;
1423 	else {
1424 		info.si_code = CLD_EXITED;
1425 		info.si_status = tsk->exit_code >> 8;
1426 	}
1427 
1428 	psig = tsk->parent->sighand;
1429 	spin_lock_irqsave(&psig->siglock, flags);
1430 	if (!tsk->ptrace && sig == SIGCHLD &&
1431 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1432 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1433 		/*
1434 		 * We are exiting and our parent doesn't care.  POSIX.1
1435 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1436 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1437 		 * automatically and not left for our parent's wait4 call.
1438 		 * Rather than having the parent do it as a magic kind of
1439 		 * signal handler, we just set this to tell do_exit that we
1440 		 * can be cleaned up without becoming a zombie.  Note that
1441 		 * we still call __wake_up_parent in this case, because a
1442 		 * blocked sys_wait4 might now return -ECHILD.
1443 		 *
1444 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1445 		 * is implementation-defined: we do (if you don't want
1446 		 * it, just use SIG_IGN instead).
1447 		 */
1448 		tsk->exit_signal = -1;
1449 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1450 			sig = 0;
1451 	}
1452 	if (valid_signal(sig) && sig > 0)
1453 		__group_send_sig_info(sig, &info, tsk->parent);
1454 	__wake_up_parent(tsk, tsk->parent);
1455 	spin_unlock_irqrestore(&psig->siglock, flags);
1456 }
1457 
1458 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1459 {
1460 	struct siginfo info;
1461 	unsigned long flags;
1462 	struct task_struct *parent;
1463 	struct sighand_struct *sighand;
1464 
1465 	if (tsk->ptrace & PT_PTRACED)
1466 		parent = tsk->parent;
1467 	else {
1468 		tsk = tsk->group_leader;
1469 		parent = tsk->real_parent;
1470 	}
1471 
1472 	info.si_signo = SIGCHLD;
1473 	info.si_errno = 0;
1474 	info.si_pid = tsk->pid;
1475 	info.si_uid = tsk->uid;
1476 
1477 	/* FIXME: find out whether or not this is supposed to be c*time. */
1478 	info.si_utime = cputime_to_jiffies(tsk->utime);
1479 	info.si_stime = cputime_to_jiffies(tsk->stime);
1480 
1481  	info.si_code = why;
1482  	switch (why) {
1483  	case CLD_CONTINUED:
1484  		info.si_status = SIGCONT;
1485  		break;
1486  	case CLD_STOPPED:
1487  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1488  		break;
1489  	case CLD_TRAPPED:
1490  		info.si_status = tsk->exit_code & 0x7f;
1491  		break;
1492  	default:
1493  		BUG();
1494  	}
1495 
1496 	sighand = parent->sighand;
1497 	spin_lock_irqsave(&sighand->siglock, flags);
1498 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1499 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1500 		__group_send_sig_info(SIGCHLD, &info, parent);
1501 	/*
1502 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1503 	 */
1504 	__wake_up_parent(tsk, parent);
1505 	spin_unlock_irqrestore(&sighand->siglock, flags);
1506 }
1507 
1508 static inline int may_ptrace_stop(void)
1509 {
1510 	if (!likely(current->ptrace & PT_PTRACED))
1511 		return 0;
1512 
1513 	if (unlikely(current->parent == current->real_parent &&
1514 		    (current->ptrace & PT_ATTACHED)))
1515 		return 0;
1516 
1517 	if (unlikely(current->signal == current->parent->signal) &&
1518 	    unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
1519 		return 0;
1520 
1521 	/*
1522 	 * Are we in the middle of do_coredump?
1523 	 * If so and our tracer is also part of the coredump stopping
1524 	 * is a deadlock situation, and pointless because our tracer
1525 	 * is dead so don't allow us to stop.
1526 	 * If SIGKILL was already sent before the caller unlocked
1527 	 * ->siglock we must see ->core_waiters != 0. Otherwise it
1528 	 * is safe to enter schedule().
1529 	 */
1530 	if (unlikely(current->mm->core_waiters) &&
1531 	    unlikely(current->mm == current->parent->mm))
1532 		return 0;
1533 
1534 	return 1;
1535 }
1536 
1537 /*
1538  * This must be called with current->sighand->siglock held.
1539  *
1540  * This should be the path for all ptrace stops.
1541  * We always set current->last_siginfo while stopped here.
1542  * That makes it a way to test a stopped process for
1543  * being ptrace-stopped vs being job-control-stopped.
1544  *
1545  * If we actually decide not to stop at all because the tracer is gone,
1546  * we leave nostop_code in current->exit_code.
1547  */
1548 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1549 {
1550 	/*
1551 	 * If there is a group stop in progress,
1552 	 * we must participate in the bookkeeping.
1553 	 */
1554 	if (current->signal->group_stop_count > 0)
1555 		--current->signal->group_stop_count;
1556 
1557 	current->last_siginfo = info;
1558 	current->exit_code = exit_code;
1559 
1560 	/* Let the debugger run.  */
1561 	set_current_state(TASK_TRACED);
1562 	spin_unlock_irq(&current->sighand->siglock);
1563 	try_to_freeze();
1564 	read_lock(&tasklist_lock);
1565 	if (may_ptrace_stop()) {
1566 		do_notify_parent_cldstop(current, CLD_TRAPPED);
1567 		read_unlock(&tasklist_lock);
1568 		schedule();
1569 	} else {
1570 		/*
1571 		 * By the time we got the lock, our tracer went away.
1572 		 * Don't stop here.
1573 		 */
1574 		read_unlock(&tasklist_lock);
1575 		set_current_state(TASK_RUNNING);
1576 		current->exit_code = nostop_code;
1577 	}
1578 
1579 	/*
1580 	 * We are back.  Now reacquire the siglock before touching
1581 	 * last_siginfo, so that we are sure to have synchronized with
1582 	 * any signal-sending on another CPU that wants to examine it.
1583 	 */
1584 	spin_lock_irq(&current->sighand->siglock);
1585 	current->last_siginfo = NULL;
1586 
1587 	/*
1588 	 * Queued signals ignored us while we were stopped for tracing.
1589 	 * So check for any that we should take before resuming user mode.
1590 	 * This sets TIF_SIGPENDING, but never clears it.
1591 	 */
1592 	recalc_sigpending_tsk(current);
1593 }
1594 
1595 void ptrace_notify(int exit_code)
1596 {
1597 	siginfo_t info;
1598 
1599 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1600 
1601 	memset(&info, 0, sizeof info);
1602 	info.si_signo = SIGTRAP;
1603 	info.si_code = exit_code;
1604 	info.si_pid = current->pid;
1605 	info.si_uid = current->uid;
1606 
1607 	/* Let the debugger run.  */
1608 	spin_lock_irq(&current->sighand->siglock);
1609 	ptrace_stop(exit_code, 0, &info);
1610 	spin_unlock_irq(&current->sighand->siglock);
1611 }
1612 
1613 static void
1614 finish_stop(int stop_count)
1615 {
1616 	/*
1617 	 * If there are no other threads in the group, or if there is
1618 	 * a group stop in progress and we are the last to stop,
1619 	 * report to the parent.  When ptraced, every thread reports itself.
1620 	 */
1621 	if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1622 		read_lock(&tasklist_lock);
1623 		do_notify_parent_cldstop(current, CLD_STOPPED);
1624 		read_unlock(&tasklist_lock);
1625 	}
1626 
1627 	do {
1628 		schedule();
1629 	} while (try_to_freeze());
1630 	/*
1631 	 * Now we don't run again until continued.
1632 	 */
1633 	current->exit_code = 0;
1634 }
1635 
1636 /*
1637  * This performs the stopping for SIGSTOP and other stop signals.
1638  * We have to stop all threads in the thread group.
1639  * Returns nonzero if we've actually stopped and released the siglock.
1640  * Returns zero if we didn't stop and still hold the siglock.
1641  */
1642 static int do_signal_stop(int signr)
1643 {
1644 	struct signal_struct *sig = current->signal;
1645 	int stop_count;
1646 
1647 	if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1648 		return 0;
1649 
1650 	if (sig->group_stop_count > 0) {
1651 		/*
1652 		 * There is a group stop in progress.  We don't need to
1653 		 * start another one.
1654 		 */
1655 		stop_count = --sig->group_stop_count;
1656 	} else {
1657 		/*
1658 		 * There is no group stop already in progress.
1659 		 * We must initiate one now.
1660 		 */
1661 		struct task_struct *t;
1662 
1663 		sig->group_exit_code = signr;
1664 
1665 		stop_count = 0;
1666 		for (t = next_thread(current); t != current; t = next_thread(t))
1667 			/*
1668 			 * Setting state to TASK_STOPPED for a group
1669 			 * stop is always done with the siglock held,
1670 			 * so this check has no races.
1671 			 */
1672 			if (!t->exit_state &&
1673 			    !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1674 				stop_count++;
1675 				signal_wake_up(t, 0);
1676 			}
1677 		sig->group_stop_count = stop_count;
1678 	}
1679 
1680 	if (stop_count == 0)
1681 		sig->flags = SIGNAL_STOP_STOPPED;
1682 	current->exit_code = sig->group_exit_code;
1683 	__set_current_state(TASK_STOPPED);
1684 
1685 	spin_unlock_irq(&current->sighand->siglock);
1686 	finish_stop(stop_count);
1687 	return 1;
1688 }
1689 
1690 /*
1691  * Do appropriate magic when group_stop_count > 0.
1692  * We return nonzero if we stopped, after releasing the siglock.
1693  * We return zero if we still hold the siglock and should look
1694  * for another signal without checking group_stop_count again.
1695  */
1696 static int handle_group_stop(void)
1697 {
1698 	int stop_count;
1699 
1700 	if (current->signal->group_exit_task == current) {
1701 		/*
1702 		 * Group stop is so we can do a core dump,
1703 		 * We are the initiating thread, so get on with it.
1704 		 */
1705 		current->signal->group_exit_task = NULL;
1706 		return 0;
1707 	}
1708 
1709 	if (current->signal->flags & SIGNAL_GROUP_EXIT)
1710 		/*
1711 		 * Group stop is so another thread can do a core dump,
1712 		 * or else we are racing against a death signal.
1713 		 * Just punt the stop so we can get the next signal.
1714 		 */
1715 		return 0;
1716 
1717 	/*
1718 	 * There is a group stop in progress.  We stop
1719 	 * without any associated signal being in our queue.
1720 	 */
1721 	stop_count = --current->signal->group_stop_count;
1722 	if (stop_count == 0)
1723 		current->signal->flags = SIGNAL_STOP_STOPPED;
1724 	current->exit_code = current->signal->group_exit_code;
1725 	set_current_state(TASK_STOPPED);
1726 	spin_unlock_irq(&current->sighand->siglock);
1727 	finish_stop(stop_count);
1728 	return 1;
1729 }
1730 
1731 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1732 			  struct pt_regs *regs, void *cookie)
1733 {
1734 	sigset_t *mask = &current->blocked;
1735 	int signr = 0;
1736 
1737 	try_to_freeze();
1738 
1739 relock:
1740 	spin_lock_irq(&current->sighand->siglock);
1741 	for (;;) {
1742 		struct k_sigaction *ka;
1743 
1744 		if (unlikely(current->signal->group_stop_count > 0) &&
1745 		    handle_group_stop())
1746 			goto relock;
1747 
1748 		signr = dequeue_signal(current, mask, info);
1749 
1750 		if (!signr)
1751 			break; /* will return 0 */
1752 
1753 		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1754 			ptrace_signal_deliver(regs, cookie);
1755 
1756 			/* Let the debugger run.  */
1757 			ptrace_stop(signr, signr, info);
1758 
1759 			/* We're back.  Did the debugger cancel the sig?  */
1760 			signr = current->exit_code;
1761 			if (signr == 0)
1762 				continue;
1763 
1764 			current->exit_code = 0;
1765 
1766 			/* Update the siginfo structure if the signal has
1767 			   changed.  If the debugger wanted something
1768 			   specific in the siginfo structure then it should
1769 			   have updated *info via PTRACE_SETSIGINFO.  */
1770 			if (signr != info->si_signo) {
1771 				info->si_signo = signr;
1772 				info->si_errno = 0;
1773 				info->si_code = SI_USER;
1774 				info->si_pid = current->parent->pid;
1775 				info->si_uid = current->parent->uid;
1776 			}
1777 
1778 			/* If the (new) signal is now blocked, requeue it.  */
1779 			if (sigismember(&current->blocked, signr)) {
1780 				specific_send_sig_info(signr, info, current);
1781 				continue;
1782 			}
1783 		}
1784 
1785 		ka = &current->sighand->action[signr-1];
1786 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1787 			continue;
1788 		if (ka->sa.sa_handler != SIG_DFL) {
1789 			/* Run the handler.  */
1790 			*return_ka = *ka;
1791 
1792 			if (ka->sa.sa_flags & SA_ONESHOT)
1793 				ka->sa.sa_handler = SIG_DFL;
1794 
1795 			break; /* will return non-zero "signr" value */
1796 		}
1797 
1798 		/*
1799 		 * Now we are doing the default action for this signal.
1800 		 */
1801 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1802 			continue;
1803 
1804 		/*
1805 		 * Init of a pid space gets no signals it doesn't want from
1806 		 * within that pid space. It can of course get signals from
1807 		 * its parent pid space.
1808 		 */
1809 		if (current == child_reaper(current))
1810 			continue;
1811 
1812 		if (sig_kernel_stop(signr)) {
1813 			/*
1814 			 * The default action is to stop all threads in
1815 			 * the thread group.  The job control signals
1816 			 * do nothing in an orphaned pgrp, but SIGSTOP
1817 			 * always works.  Note that siglock needs to be
1818 			 * dropped during the call to is_orphaned_pgrp()
1819 			 * because of lock ordering with tasklist_lock.
1820 			 * This allows an intervening SIGCONT to be posted.
1821 			 * We need to check for that and bail out if necessary.
1822 			 */
1823 			if (signr != SIGSTOP) {
1824 				spin_unlock_irq(&current->sighand->siglock);
1825 
1826 				/* signals can be posted during this window */
1827 
1828 				if (is_current_pgrp_orphaned())
1829 					goto relock;
1830 
1831 				spin_lock_irq(&current->sighand->siglock);
1832 			}
1833 
1834 			if (likely(do_signal_stop(signr))) {
1835 				/* It released the siglock.  */
1836 				goto relock;
1837 			}
1838 
1839 			/*
1840 			 * We didn't actually stop, due to a race
1841 			 * with SIGCONT or something like that.
1842 			 */
1843 			continue;
1844 		}
1845 
1846 		spin_unlock_irq(&current->sighand->siglock);
1847 
1848 		/*
1849 		 * Anything else is fatal, maybe with a core dump.
1850 		 */
1851 		current->flags |= PF_SIGNALED;
1852 		if (sig_kernel_coredump(signr)) {
1853 			/*
1854 			 * If it was able to dump core, this kills all
1855 			 * other threads in the group and synchronizes with
1856 			 * their demise.  If we lost the race with another
1857 			 * thread getting here, it set group_exit_code
1858 			 * first and our do_group_exit call below will use
1859 			 * that value and ignore the one we pass it.
1860 			 */
1861 			do_coredump((long)signr, signr, regs);
1862 		}
1863 
1864 		/*
1865 		 * Death signals, no core dump.
1866 		 */
1867 		do_group_exit(signr);
1868 		/* NOTREACHED */
1869 	}
1870 	spin_unlock_irq(&current->sighand->siglock);
1871 	return signr;
1872 }
1873 
1874 EXPORT_SYMBOL(recalc_sigpending);
1875 EXPORT_SYMBOL_GPL(dequeue_signal);
1876 EXPORT_SYMBOL(flush_signals);
1877 EXPORT_SYMBOL(force_sig);
1878 EXPORT_SYMBOL(kill_proc);
1879 EXPORT_SYMBOL(ptrace_notify);
1880 EXPORT_SYMBOL(send_sig);
1881 EXPORT_SYMBOL(send_sig_info);
1882 EXPORT_SYMBOL(sigprocmask);
1883 EXPORT_SYMBOL(block_all_signals);
1884 EXPORT_SYMBOL(unblock_all_signals);
1885 
1886 
1887 /*
1888  * System call entry points.
1889  */
1890 
1891 asmlinkage long sys_restart_syscall(void)
1892 {
1893 	struct restart_block *restart = &current_thread_info()->restart_block;
1894 	return restart->fn(restart);
1895 }
1896 
1897 long do_no_restart_syscall(struct restart_block *param)
1898 {
1899 	return -EINTR;
1900 }
1901 
1902 /*
1903  * We don't need to get the kernel lock - this is all local to this
1904  * particular thread.. (and that's good, because this is _heavily_
1905  * used by various programs)
1906  */
1907 
1908 /*
1909  * This is also useful for kernel threads that want to temporarily
1910  * (or permanently) block certain signals.
1911  *
1912  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1913  * interface happily blocks "unblockable" signals like SIGKILL
1914  * and friends.
1915  */
1916 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1917 {
1918 	int error;
1919 
1920 	spin_lock_irq(&current->sighand->siglock);
1921 	if (oldset)
1922 		*oldset = current->blocked;
1923 
1924 	error = 0;
1925 	switch (how) {
1926 	case SIG_BLOCK:
1927 		sigorsets(&current->blocked, &current->blocked, set);
1928 		break;
1929 	case SIG_UNBLOCK:
1930 		signandsets(&current->blocked, &current->blocked, set);
1931 		break;
1932 	case SIG_SETMASK:
1933 		current->blocked = *set;
1934 		break;
1935 	default:
1936 		error = -EINVAL;
1937 	}
1938 	recalc_sigpending();
1939 	spin_unlock_irq(&current->sighand->siglock);
1940 
1941 	return error;
1942 }
1943 
1944 asmlinkage long
1945 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1946 {
1947 	int error = -EINVAL;
1948 	sigset_t old_set, new_set;
1949 
1950 	/* XXX: Don't preclude handling different sized sigset_t's.  */
1951 	if (sigsetsize != sizeof(sigset_t))
1952 		goto out;
1953 
1954 	if (set) {
1955 		error = -EFAULT;
1956 		if (copy_from_user(&new_set, set, sizeof(*set)))
1957 			goto out;
1958 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1959 
1960 		error = sigprocmask(how, &new_set, &old_set);
1961 		if (error)
1962 			goto out;
1963 		if (oset)
1964 			goto set_old;
1965 	} else if (oset) {
1966 		spin_lock_irq(&current->sighand->siglock);
1967 		old_set = current->blocked;
1968 		spin_unlock_irq(&current->sighand->siglock);
1969 
1970 	set_old:
1971 		error = -EFAULT;
1972 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
1973 			goto out;
1974 	}
1975 	error = 0;
1976 out:
1977 	return error;
1978 }
1979 
1980 long do_sigpending(void __user *set, unsigned long sigsetsize)
1981 {
1982 	long error = -EINVAL;
1983 	sigset_t pending;
1984 
1985 	if (sigsetsize > sizeof(sigset_t))
1986 		goto out;
1987 
1988 	spin_lock_irq(&current->sighand->siglock);
1989 	sigorsets(&pending, &current->pending.signal,
1990 		  &current->signal->shared_pending.signal);
1991 	spin_unlock_irq(&current->sighand->siglock);
1992 
1993 	/* Outside the lock because only this thread touches it.  */
1994 	sigandsets(&pending, &current->blocked, &pending);
1995 
1996 	error = -EFAULT;
1997 	if (!copy_to_user(set, &pending, sigsetsize))
1998 		error = 0;
1999 
2000 out:
2001 	return error;
2002 }
2003 
2004 asmlinkage long
2005 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2006 {
2007 	return do_sigpending(set, sigsetsize);
2008 }
2009 
2010 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2011 
2012 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2013 {
2014 	int err;
2015 
2016 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2017 		return -EFAULT;
2018 	if (from->si_code < 0)
2019 		return __copy_to_user(to, from, sizeof(siginfo_t))
2020 			? -EFAULT : 0;
2021 	/*
2022 	 * If you change siginfo_t structure, please be sure
2023 	 * this code is fixed accordingly.
2024 	 * Please remember to update the signalfd_copyinfo() function
2025 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2026 	 * It should never copy any pad contained in the structure
2027 	 * to avoid security leaks, but must copy the generic
2028 	 * 3 ints plus the relevant union member.
2029 	 */
2030 	err = __put_user(from->si_signo, &to->si_signo);
2031 	err |= __put_user(from->si_errno, &to->si_errno);
2032 	err |= __put_user((short)from->si_code, &to->si_code);
2033 	switch (from->si_code & __SI_MASK) {
2034 	case __SI_KILL:
2035 		err |= __put_user(from->si_pid, &to->si_pid);
2036 		err |= __put_user(from->si_uid, &to->si_uid);
2037 		break;
2038 	case __SI_TIMER:
2039 		 err |= __put_user(from->si_tid, &to->si_tid);
2040 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2041 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2042 		break;
2043 	case __SI_POLL:
2044 		err |= __put_user(from->si_band, &to->si_band);
2045 		err |= __put_user(from->si_fd, &to->si_fd);
2046 		break;
2047 	case __SI_FAULT:
2048 		err |= __put_user(from->si_addr, &to->si_addr);
2049 #ifdef __ARCH_SI_TRAPNO
2050 		err |= __put_user(from->si_trapno, &to->si_trapno);
2051 #endif
2052 		break;
2053 	case __SI_CHLD:
2054 		err |= __put_user(from->si_pid, &to->si_pid);
2055 		err |= __put_user(from->si_uid, &to->si_uid);
2056 		err |= __put_user(from->si_status, &to->si_status);
2057 		err |= __put_user(from->si_utime, &to->si_utime);
2058 		err |= __put_user(from->si_stime, &to->si_stime);
2059 		break;
2060 	case __SI_RT: /* This is not generated by the kernel as of now. */
2061 	case __SI_MESGQ: /* But this is */
2062 		err |= __put_user(from->si_pid, &to->si_pid);
2063 		err |= __put_user(from->si_uid, &to->si_uid);
2064 		err |= __put_user(from->si_ptr, &to->si_ptr);
2065 		break;
2066 	default: /* this is just in case for now ... */
2067 		err |= __put_user(from->si_pid, &to->si_pid);
2068 		err |= __put_user(from->si_uid, &to->si_uid);
2069 		break;
2070 	}
2071 	return err;
2072 }
2073 
2074 #endif
2075 
2076 asmlinkage long
2077 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2078 		    siginfo_t __user *uinfo,
2079 		    const struct timespec __user *uts,
2080 		    size_t sigsetsize)
2081 {
2082 	int ret, sig;
2083 	sigset_t these;
2084 	struct timespec ts;
2085 	siginfo_t info;
2086 	long timeout = 0;
2087 
2088 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2089 	if (sigsetsize != sizeof(sigset_t))
2090 		return -EINVAL;
2091 
2092 	if (copy_from_user(&these, uthese, sizeof(these)))
2093 		return -EFAULT;
2094 
2095 	/*
2096 	 * Invert the set of allowed signals to get those we
2097 	 * want to block.
2098 	 */
2099 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2100 	signotset(&these);
2101 
2102 	if (uts) {
2103 		if (copy_from_user(&ts, uts, sizeof(ts)))
2104 			return -EFAULT;
2105 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2106 		    || ts.tv_sec < 0)
2107 			return -EINVAL;
2108 	}
2109 
2110 	spin_lock_irq(&current->sighand->siglock);
2111 	sig = dequeue_signal(current, &these, &info);
2112 	if (!sig) {
2113 		timeout = MAX_SCHEDULE_TIMEOUT;
2114 		if (uts)
2115 			timeout = (timespec_to_jiffies(&ts)
2116 				   + (ts.tv_sec || ts.tv_nsec));
2117 
2118 		if (timeout) {
2119 			/* None ready -- temporarily unblock those we're
2120 			 * interested while we are sleeping in so that we'll
2121 			 * be awakened when they arrive.  */
2122 			current->real_blocked = current->blocked;
2123 			sigandsets(&current->blocked, &current->blocked, &these);
2124 			recalc_sigpending();
2125 			spin_unlock_irq(&current->sighand->siglock);
2126 
2127 			timeout = schedule_timeout_interruptible(timeout);
2128 
2129 			spin_lock_irq(&current->sighand->siglock);
2130 			sig = dequeue_signal(current, &these, &info);
2131 			current->blocked = current->real_blocked;
2132 			siginitset(&current->real_blocked, 0);
2133 			recalc_sigpending();
2134 		}
2135 	}
2136 	spin_unlock_irq(&current->sighand->siglock);
2137 
2138 	if (sig) {
2139 		ret = sig;
2140 		if (uinfo) {
2141 			if (copy_siginfo_to_user(uinfo, &info))
2142 				ret = -EFAULT;
2143 		}
2144 	} else {
2145 		ret = -EAGAIN;
2146 		if (timeout)
2147 			ret = -EINTR;
2148 	}
2149 
2150 	return ret;
2151 }
2152 
2153 asmlinkage long
2154 sys_kill(int pid, int sig)
2155 {
2156 	struct siginfo info;
2157 
2158 	info.si_signo = sig;
2159 	info.si_errno = 0;
2160 	info.si_code = SI_USER;
2161 	info.si_pid = current->tgid;
2162 	info.si_uid = current->uid;
2163 
2164 	return kill_something_info(sig, &info, pid);
2165 }
2166 
2167 static int do_tkill(int tgid, int pid, int sig)
2168 {
2169 	int error;
2170 	struct siginfo info;
2171 	struct task_struct *p;
2172 
2173 	error = -ESRCH;
2174 	info.si_signo = sig;
2175 	info.si_errno = 0;
2176 	info.si_code = SI_TKILL;
2177 	info.si_pid = current->tgid;
2178 	info.si_uid = current->uid;
2179 
2180 	read_lock(&tasklist_lock);
2181 	p = find_task_by_pid(pid);
2182 	if (p && (tgid <= 0 || p->tgid == tgid)) {
2183 		error = check_kill_permission(sig, &info, p);
2184 		/*
2185 		 * The null signal is a permissions and process existence
2186 		 * probe.  No signal is actually delivered.
2187 		 */
2188 		if (!error && sig && p->sighand) {
2189 			spin_lock_irq(&p->sighand->siglock);
2190 			handle_stop_signal(sig, p);
2191 			error = specific_send_sig_info(sig, &info, p);
2192 			spin_unlock_irq(&p->sighand->siglock);
2193 		}
2194 	}
2195 	read_unlock(&tasklist_lock);
2196 
2197 	return error;
2198 }
2199 
2200 /**
2201  *  sys_tgkill - send signal to one specific thread
2202  *  @tgid: the thread group ID of the thread
2203  *  @pid: the PID of the thread
2204  *  @sig: signal to be sent
2205  *
2206  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2207  *  exists but it's not belonging to the target process anymore. This
2208  *  method solves the problem of threads exiting and PIDs getting reused.
2209  */
2210 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2211 {
2212 	/* This is only valid for single tasks */
2213 	if (pid <= 0 || tgid <= 0)
2214 		return -EINVAL;
2215 
2216 	return do_tkill(tgid, pid, sig);
2217 }
2218 
2219 /*
2220  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2221  */
2222 asmlinkage long
2223 sys_tkill(int pid, int sig)
2224 {
2225 	/* This is only valid for single tasks */
2226 	if (pid <= 0)
2227 		return -EINVAL;
2228 
2229 	return do_tkill(0, pid, sig);
2230 }
2231 
2232 asmlinkage long
2233 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2234 {
2235 	siginfo_t info;
2236 
2237 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2238 		return -EFAULT;
2239 
2240 	/* Not even root can pretend to send signals from the kernel.
2241 	   Nor can they impersonate a kill(), which adds source info.  */
2242 	if (info.si_code >= 0)
2243 		return -EPERM;
2244 	info.si_signo = sig;
2245 
2246 	/* POSIX.1b doesn't mention process groups.  */
2247 	return kill_proc_info(sig, &info, pid);
2248 }
2249 
2250 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2251 {
2252 	struct k_sigaction *k;
2253 	sigset_t mask;
2254 
2255 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2256 		return -EINVAL;
2257 
2258 	k = &current->sighand->action[sig-1];
2259 
2260 	spin_lock_irq(&current->sighand->siglock);
2261 	if (signal_pending(current)) {
2262 		/*
2263 		 * If there might be a fatal signal pending on multiple
2264 		 * threads, make sure we take it before changing the action.
2265 		 */
2266 		spin_unlock_irq(&current->sighand->siglock);
2267 		return -ERESTARTNOINTR;
2268 	}
2269 
2270 	if (oact)
2271 		*oact = *k;
2272 
2273 	if (act) {
2274 		sigdelsetmask(&act->sa.sa_mask,
2275 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2276 		*k = *act;
2277 		/*
2278 		 * POSIX 3.3.1.3:
2279 		 *  "Setting a signal action to SIG_IGN for a signal that is
2280 		 *   pending shall cause the pending signal to be discarded,
2281 		 *   whether or not it is blocked."
2282 		 *
2283 		 *  "Setting a signal action to SIG_DFL for a signal that is
2284 		 *   pending and whose default action is to ignore the signal
2285 		 *   (for example, SIGCHLD), shall cause the pending signal to
2286 		 *   be discarded, whether or not it is blocked"
2287 		 */
2288 		if (act->sa.sa_handler == SIG_IGN ||
2289 		   (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2290 			struct task_struct *t = current;
2291 			sigemptyset(&mask);
2292 			sigaddset(&mask, sig);
2293 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2294 			do {
2295 				rm_from_queue_full(&mask, &t->pending);
2296 				recalc_sigpending_and_wake(t);
2297 				t = next_thread(t);
2298 			} while (t != current);
2299 		}
2300 	}
2301 
2302 	spin_unlock_irq(&current->sighand->siglock);
2303 	return 0;
2304 }
2305 
2306 int
2307 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2308 {
2309 	stack_t oss;
2310 	int error;
2311 
2312 	if (uoss) {
2313 		oss.ss_sp = (void __user *) current->sas_ss_sp;
2314 		oss.ss_size = current->sas_ss_size;
2315 		oss.ss_flags = sas_ss_flags(sp);
2316 	}
2317 
2318 	if (uss) {
2319 		void __user *ss_sp;
2320 		size_t ss_size;
2321 		int ss_flags;
2322 
2323 		error = -EFAULT;
2324 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2325 		    || __get_user(ss_sp, &uss->ss_sp)
2326 		    || __get_user(ss_flags, &uss->ss_flags)
2327 		    || __get_user(ss_size, &uss->ss_size))
2328 			goto out;
2329 
2330 		error = -EPERM;
2331 		if (on_sig_stack(sp))
2332 			goto out;
2333 
2334 		error = -EINVAL;
2335 		/*
2336 		 *
2337 		 * Note - this code used to test ss_flags incorrectly
2338 		 *  	  old code may have been written using ss_flags==0
2339 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2340 		 *	  way that worked) - this fix preserves that older
2341 		 *	  mechanism
2342 		 */
2343 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2344 			goto out;
2345 
2346 		if (ss_flags == SS_DISABLE) {
2347 			ss_size = 0;
2348 			ss_sp = NULL;
2349 		} else {
2350 			error = -ENOMEM;
2351 			if (ss_size < MINSIGSTKSZ)
2352 				goto out;
2353 		}
2354 
2355 		current->sas_ss_sp = (unsigned long) ss_sp;
2356 		current->sas_ss_size = ss_size;
2357 	}
2358 
2359 	if (uoss) {
2360 		error = -EFAULT;
2361 		if (copy_to_user(uoss, &oss, sizeof(oss)))
2362 			goto out;
2363 	}
2364 
2365 	error = 0;
2366 out:
2367 	return error;
2368 }
2369 
2370 #ifdef __ARCH_WANT_SYS_SIGPENDING
2371 
2372 asmlinkage long
2373 sys_sigpending(old_sigset_t __user *set)
2374 {
2375 	return do_sigpending(set, sizeof(*set));
2376 }
2377 
2378 #endif
2379 
2380 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2381 /* Some platforms have their own version with special arguments others
2382    support only sys_rt_sigprocmask.  */
2383 
2384 asmlinkage long
2385 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2386 {
2387 	int error;
2388 	old_sigset_t old_set, new_set;
2389 
2390 	if (set) {
2391 		error = -EFAULT;
2392 		if (copy_from_user(&new_set, set, sizeof(*set)))
2393 			goto out;
2394 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2395 
2396 		spin_lock_irq(&current->sighand->siglock);
2397 		old_set = current->blocked.sig[0];
2398 
2399 		error = 0;
2400 		switch (how) {
2401 		default:
2402 			error = -EINVAL;
2403 			break;
2404 		case SIG_BLOCK:
2405 			sigaddsetmask(&current->blocked, new_set);
2406 			break;
2407 		case SIG_UNBLOCK:
2408 			sigdelsetmask(&current->blocked, new_set);
2409 			break;
2410 		case SIG_SETMASK:
2411 			current->blocked.sig[0] = new_set;
2412 			break;
2413 		}
2414 
2415 		recalc_sigpending();
2416 		spin_unlock_irq(&current->sighand->siglock);
2417 		if (error)
2418 			goto out;
2419 		if (oset)
2420 			goto set_old;
2421 	} else if (oset) {
2422 		old_set = current->blocked.sig[0];
2423 	set_old:
2424 		error = -EFAULT;
2425 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2426 			goto out;
2427 	}
2428 	error = 0;
2429 out:
2430 	return error;
2431 }
2432 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2433 
2434 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2435 asmlinkage long
2436 sys_rt_sigaction(int sig,
2437 		 const struct sigaction __user *act,
2438 		 struct sigaction __user *oact,
2439 		 size_t sigsetsize)
2440 {
2441 	struct k_sigaction new_sa, old_sa;
2442 	int ret = -EINVAL;
2443 
2444 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2445 	if (sigsetsize != sizeof(sigset_t))
2446 		goto out;
2447 
2448 	if (act) {
2449 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2450 			return -EFAULT;
2451 	}
2452 
2453 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2454 
2455 	if (!ret && oact) {
2456 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2457 			return -EFAULT;
2458 	}
2459 out:
2460 	return ret;
2461 }
2462 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2463 
2464 #ifdef __ARCH_WANT_SYS_SGETMASK
2465 
2466 /*
2467  * For backwards compatibility.  Functionality superseded by sigprocmask.
2468  */
2469 asmlinkage long
2470 sys_sgetmask(void)
2471 {
2472 	/* SMP safe */
2473 	return current->blocked.sig[0];
2474 }
2475 
2476 asmlinkage long
2477 sys_ssetmask(int newmask)
2478 {
2479 	int old;
2480 
2481 	spin_lock_irq(&current->sighand->siglock);
2482 	old = current->blocked.sig[0];
2483 
2484 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2485 						  sigmask(SIGSTOP)));
2486 	recalc_sigpending();
2487 	spin_unlock_irq(&current->sighand->siglock);
2488 
2489 	return old;
2490 }
2491 #endif /* __ARCH_WANT_SGETMASK */
2492 
2493 #ifdef __ARCH_WANT_SYS_SIGNAL
2494 /*
2495  * For backwards compatibility.  Functionality superseded by sigaction.
2496  */
2497 asmlinkage unsigned long
2498 sys_signal(int sig, __sighandler_t handler)
2499 {
2500 	struct k_sigaction new_sa, old_sa;
2501 	int ret;
2502 
2503 	new_sa.sa.sa_handler = handler;
2504 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2505 	sigemptyset(&new_sa.sa.sa_mask);
2506 
2507 	ret = do_sigaction(sig, &new_sa, &old_sa);
2508 
2509 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2510 }
2511 #endif /* __ARCH_WANT_SYS_SIGNAL */
2512 
2513 #ifdef __ARCH_WANT_SYS_PAUSE
2514 
2515 asmlinkage long
2516 sys_pause(void)
2517 {
2518 	current->state = TASK_INTERRUPTIBLE;
2519 	schedule();
2520 	return -ERESTARTNOHAND;
2521 }
2522 
2523 #endif
2524 
2525 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2526 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2527 {
2528 	sigset_t newset;
2529 
2530 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2531 	if (sigsetsize != sizeof(sigset_t))
2532 		return -EINVAL;
2533 
2534 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2535 		return -EFAULT;
2536 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2537 
2538 	spin_lock_irq(&current->sighand->siglock);
2539 	current->saved_sigmask = current->blocked;
2540 	current->blocked = newset;
2541 	recalc_sigpending();
2542 	spin_unlock_irq(&current->sighand->siglock);
2543 
2544 	current->state = TASK_INTERRUPTIBLE;
2545 	schedule();
2546 	set_thread_flag(TIF_RESTORE_SIGMASK);
2547 	return -ERESTARTNOHAND;
2548 }
2549 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2550 
2551 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2552 {
2553 	return NULL;
2554 }
2555 
2556 void __init signals_init(void)
2557 {
2558 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2559 }
2560