1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 *
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
12 */
13
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/mm.h>
26 #include <linux/proc_fs.h>
27 #include <linux/tty.h>
28 #include <linux/binfmts.h>
29 #include <linux/coredump.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32 #include <linux/ptrace.h>
33 #include <linux/signal.h>
34 #include <linux/signalfd.h>
35 #include <linux/ratelimit.h>
36 #include <linux/task_work.h>
37 #include <linux/capability.h>
38 #include <linux/freezer.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/nsproxy.h>
41 #include <linux/user_namespace.h>
42 #include <linux/uprobes.h>
43 #include <linux/compat.h>
44 #include <linux/cn_proc.h>
45 #include <linux/compiler.h>
46 #include <linux/posix-timers.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
49 #include <linux/sysctl.h>
50 #include <uapi/linux/pidfd.h>
51
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/signal.h>
54
55 #include <asm/param.h>
56 #include <linux/uaccess.h>
57 #include <asm/unistd.h>
58 #include <asm/siginfo.h>
59 #include <asm/cacheflush.h>
60 #include <asm/syscall.h> /* for syscall_get_* */
61
62 #include "time/posix-timers.h"
63
64 /*
65 * SLAB caches for signal bits.
66 */
67
68 static struct kmem_cache *sigqueue_cachep;
69
70 int print_fatal_signals __read_mostly;
71
sig_handler(struct task_struct * t,int sig)72 static void __user *sig_handler(struct task_struct *t, int sig)
73 {
74 return t->sighand->action[sig - 1].sa.sa_handler;
75 }
76
sig_handler_ignored(void __user * handler,int sig)77 static inline bool sig_handler_ignored(void __user *handler, int sig)
78 {
79 /* Is it explicitly or implicitly ignored? */
80 return handler == SIG_IGN ||
81 (handler == SIG_DFL && sig_kernel_ignore(sig));
82 }
83
sig_task_ignored(struct task_struct * t,int sig,bool force)84 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
85 {
86 void __user *handler;
87
88 handler = sig_handler(t, sig);
89
90 /* SIGKILL and SIGSTOP may not be sent to the global init */
91 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
92 return true;
93
94 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
95 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
96 return true;
97
98 /* Only allow kernel generated signals to this kthread */
99 if (unlikely((t->flags & PF_KTHREAD) &&
100 (handler == SIG_KTHREAD_KERNEL) && !force))
101 return true;
102
103 return sig_handler_ignored(handler, sig);
104 }
105
sig_ignored(struct task_struct * t,int sig,bool force)106 static bool sig_ignored(struct task_struct *t, int sig, bool force)
107 {
108 /*
109 * Blocked signals are never ignored, since the
110 * signal handler may change by the time it is
111 * unblocked.
112 */
113 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
114 return false;
115
116 /*
117 * Tracers may want to know about even ignored signal unless it
118 * is SIGKILL which can't be reported anyway but can be ignored
119 * by SIGNAL_UNKILLABLE task.
120 */
121 if (t->ptrace && sig != SIGKILL)
122 return false;
123
124 return sig_task_ignored(t, sig, force);
125 }
126
127 /*
128 * Re-calculate pending state from the set of locally pending
129 * signals, globally pending signals, and blocked signals.
130 */
has_pending_signals(sigset_t * signal,sigset_t * blocked)131 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
132 {
133 unsigned long ready;
134 long i;
135
136 switch (_NSIG_WORDS) {
137 default:
138 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
139 ready |= signal->sig[i] &~ blocked->sig[i];
140 break;
141
142 case 4: ready = signal->sig[3] &~ blocked->sig[3];
143 ready |= signal->sig[2] &~ blocked->sig[2];
144 ready |= signal->sig[1] &~ blocked->sig[1];
145 ready |= signal->sig[0] &~ blocked->sig[0];
146 break;
147
148 case 2: ready = signal->sig[1] &~ blocked->sig[1];
149 ready |= signal->sig[0] &~ blocked->sig[0];
150 break;
151
152 case 1: ready = signal->sig[0] &~ blocked->sig[0];
153 }
154 return ready != 0;
155 }
156
157 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
158
recalc_sigpending_tsk(struct task_struct * t)159 static bool recalc_sigpending_tsk(struct task_struct *t)
160 {
161 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
162 PENDING(&t->pending, &t->blocked) ||
163 PENDING(&t->signal->shared_pending, &t->blocked) ||
164 cgroup_task_frozen(t)) {
165 set_tsk_thread_flag(t, TIF_SIGPENDING);
166 return true;
167 }
168
169 /*
170 * We must never clear the flag in another thread, or in current
171 * when it's possible the current syscall is returning -ERESTART*.
172 * So we don't clear it here, and only callers who know they should do.
173 */
174 return false;
175 }
176
recalc_sigpending(void)177 void recalc_sigpending(void)
178 {
179 if (!recalc_sigpending_tsk(current) && !freezing(current))
180 clear_thread_flag(TIF_SIGPENDING);
181
182 }
183 EXPORT_SYMBOL(recalc_sigpending);
184
calculate_sigpending(void)185 void calculate_sigpending(void)
186 {
187 /* Have any signals or users of TIF_SIGPENDING been delayed
188 * until after fork?
189 */
190 spin_lock_irq(¤t->sighand->siglock);
191 set_tsk_thread_flag(current, TIF_SIGPENDING);
192 recalc_sigpending();
193 spin_unlock_irq(¤t->sighand->siglock);
194 }
195
196 /* Given the mask, find the first available signal that should be serviced. */
197
198 #define SYNCHRONOUS_MASK \
199 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
200 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
201
next_signal(struct sigpending * pending,sigset_t * mask)202 int next_signal(struct sigpending *pending, sigset_t *mask)
203 {
204 unsigned long i, *s, *m, x;
205 int sig = 0;
206
207 s = pending->signal.sig;
208 m = mask->sig;
209
210 /*
211 * Handle the first word specially: it contains the
212 * synchronous signals that need to be dequeued first.
213 */
214 x = *s &~ *m;
215 if (x) {
216 if (x & SYNCHRONOUS_MASK)
217 x &= SYNCHRONOUS_MASK;
218 sig = ffz(~x) + 1;
219 return sig;
220 }
221
222 switch (_NSIG_WORDS) {
223 default:
224 for (i = 1; i < _NSIG_WORDS; ++i) {
225 x = *++s &~ *++m;
226 if (!x)
227 continue;
228 sig = ffz(~x) + i*_NSIG_BPW + 1;
229 break;
230 }
231 break;
232
233 case 2:
234 x = s[1] &~ m[1];
235 if (!x)
236 break;
237 sig = ffz(~x) + _NSIG_BPW + 1;
238 break;
239
240 case 1:
241 /* Nothing to do */
242 break;
243 }
244
245 return sig;
246 }
247
print_dropped_signal(int sig)248 static inline void print_dropped_signal(int sig)
249 {
250 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
251
252 if (!print_fatal_signals)
253 return;
254
255 if (!__ratelimit(&ratelimit_state))
256 return;
257
258 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
259 current->comm, current->pid, sig);
260 }
261
262 /**
263 * task_set_jobctl_pending - set jobctl pending bits
264 * @task: target task
265 * @mask: pending bits to set
266 *
267 * Clear @mask from @task->jobctl. @mask must be subset of
268 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
269 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
270 * cleared. If @task is already being killed or exiting, this function
271 * becomes noop.
272 *
273 * CONTEXT:
274 * Must be called with @task->sighand->siglock held.
275 *
276 * RETURNS:
277 * %true if @mask is set, %false if made noop because @task was dying.
278 */
task_set_jobctl_pending(struct task_struct * task,unsigned long mask)279 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
280 {
281 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
282 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
283 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
284
285 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
286 return false;
287
288 if (mask & JOBCTL_STOP_SIGMASK)
289 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
290
291 task->jobctl |= mask;
292 return true;
293 }
294
295 /**
296 * task_clear_jobctl_trapping - clear jobctl trapping bit
297 * @task: target task
298 *
299 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
300 * Clear it and wake up the ptracer. Note that we don't need any further
301 * locking. @task->siglock guarantees that @task->parent points to the
302 * ptracer.
303 *
304 * CONTEXT:
305 * Must be called with @task->sighand->siglock held.
306 */
task_clear_jobctl_trapping(struct task_struct * task)307 void task_clear_jobctl_trapping(struct task_struct *task)
308 {
309 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
310 task->jobctl &= ~JOBCTL_TRAPPING;
311 smp_mb(); /* advised by wake_up_bit() */
312 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
313 }
314 }
315
316 /**
317 * task_clear_jobctl_pending - clear jobctl pending bits
318 * @task: target task
319 * @mask: pending bits to clear
320 *
321 * Clear @mask from @task->jobctl. @mask must be subset of
322 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
323 * STOP bits are cleared together.
324 *
325 * If clearing of @mask leaves no stop or trap pending, this function calls
326 * task_clear_jobctl_trapping().
327 *
328 * CONTEXT:
329 * Must be called with @task->sighand->siglock held.
330 */
task_clear_jobctl_pending(struct task_struct * task,unsigned long mask)331 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
332 {
333 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
334
335 if (mask & JOBCTL_STOP_PENDING)
336 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
337
338 task->jobctl &= ~mask;
339
340 if (!(task->jobctl & JOBCTL_PENDING_MASK))
341 task_clear_jobctl_trapping(task);
342 }
343
344 /**
345 * task_participate_group_stop - participate in a group stop
346 * @task: task participating in a group stop
347 *
348 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
349 * Group stop states are cleared and the group stop count is consumed if
350 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
351 * stop, the appropriate `SIGNAL_*` flags are set.
352 *
353 * CONTEXT:
354 * Must be called with @task->sighand->siglock held.
355 *
356 * RETURNS:
357 * %true if group stop completion should be notified to the parent, %false
358 * otherwise.
359 */
task_participate_group_stop(struct task_struct * task)360 static bool task_participate_group_stop(struct task_struct *task)
361 {
362 struct signal_struct *sig = task->signal;
363 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
364
365 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
366
367 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
368
369 if (!consume)
370 return false;
371
372 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
373 sig->group_stop_count--;
374
375 /*
376 * Tell the caller to notify completion iff we are entering into a
377 * fresh group stop. Read comment in do_signal_stop() for details.
378 */
379 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
380 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
381 return true;
382 }
383 return false;
384 }
385
task_join_group_stop(struct task_struct * task)386 void task_join_group_stop(struct task_struct *task)
387 {
388 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
389 struct signal_struct *sig = current->signal;
390
391 if (sig->group_stop_count) {
392 sig->group_stop_count++;
393 mask |= JOBCTL_STOP_CONSUME;
394 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
395 return;
396
397 /* Have the new thread join an on-going signal group stop */
398 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
399 }
400
sig_get_ucounts(struct task_struct * t,int sig,int override_rlimit)401 static struct ucounts *sig_get_ucounts(struct task_struct *t, int sig,
402 int override_rlimit)
403 {
404 struct ucounts *ucounts;
405 long sigpending;
406
407 /*
408 * Protect access to @t credentials. This can go away when all
409 * callers hold rcu read lock.
410 *
411 * NOTE! A pending signal will hold on to the user refcount,
412 * and we get/put the refcount only when the sigpending count
413 * changes from/to zero.
414 */
415 rcu_read_lock();
416 ucounts = task_ucounts(t);
417 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING,
418 override_rlimit);
419 rcu_read_unlock();
420 if (!sigpending)
421 return NULL;
422
423 if (unlikely(!override_rlimit && sigpending > task_rlimit(t, RLIMIT_SIGPENDING))) {
424 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
425 print_dropped_signal(sig);
426 return NULL;
427 }
428
429 return ucounts;
430 }
431
__sigqueue_init(struct sigqueue * q,struct ucounts * ucounts,const unsigned int sigqueue_flags)432 static void __sigqueue_init(struct sigqueue *q, struct ucounts *ucounts,
433 const unsigned int sigqueue_flags)
434 {
435 INIT_LIST_HEAD(&q->list);
436 q->flags = sigqueue_flags;
437 q->ucounts = ucounts;
438 }
439
440 /*
441 * allocate a new signal queue record
442 * - this may be called without locks if and only if t == current, otherwise an
443 * appropriate lock must be held to stop the target task from exiting
444 */
sigqueue_alloc(int sig,struct task_struct * t,gfp_t gfp_flags,int override_rlimit)445 static struct sigqueue *sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
446 int override_rlimit)
447 {
448 struct ucounts *ucounts = sig_get_ucounts(t, sig, override_rlimit);
449 struct sigqueue *q;
450
451 if (!ucounts)
452 return NULL;
453
454 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
455 if (!q) {
456 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
457 return NULL;
458 }
459
460 __sigqueue_init(q, ucounts, 0);
461 return q;
462 }
463
__sigqueue_free(struct sigqueue * q)464 static void __sigqueue_free(struct sigqueue *q)
465 {
466 if (q->flags & SIGQUEUE_PREALLOC) {
467 posixtimer_sigqueue_putref(q);
468 return;
469 }
470 if (q->ucounts) {
471 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
472 q->ucounts = NULL;
473 }
474 kmem_cache_free(sigqueue_cachep, q);
475 }
476
flush_sigqueue(struct sigpending * queue)477 void flush_sigqueue(struct sigpending *queue)
478 {
479 struct sigqueue *q;
480
481 sigemptyset(&queue->signal);
482 while (!list_empty(&queue->list)) {
483 q = list_entry(queue->list.next, struct sigqueue , list);
484 list_del_init(&q->list);
485 __sigqueue_free(q);
486 }
487 }
488
489 /*
490 * Flush all pending signals for this kthread.
491 */
flush_signals(struct task_struct * t)492 void flush_signals(struct task_struct *t)
493 {
494 unsigned long flags;
495
496 spin_lock_irqsave(&t->sighand->siglock, flags);
497 clear_tsk_thread_flag(t, TIF_SIGPENDING);
498 flush_sigqueue(&t->pending);
499 flush_sigqueue(&t->signal->shared_pending);
500 spin_unlock_irqrestore(&t->sighand->siglock, flags);
501 }
502 EXPORT_SYMBOL(flush_signals);
503
ignore_signals(struct task_struct * t)504 void ignore_signals(struct task_struct *t)
505 {
506 int i;
507
508 for (i = 0; i < _NSIG; ++i)
509 t->sighand->action[i].sa.sa_handler = SIG_IGN;
510
511 flush_signals(t);
512 }
513
514 /*
515 * Flush all handlers for a task.
516 */
517
518 void
flush_signal_handlers(struct task_struct * t,int force_default)519 flush_signal_handlers(struct task_struct *t, int force_default)
520 {
521 int i;
522 struct k_sigaction *ka = &t->sighand->action[0];
523 for (i = _NSIG ; i != 0 ; i--) {
524 if (force_default || ka->sa.sa_handler != SIG_IGN)
525 ka->sa.sa_handler = SIG_DFL;
526 ka->sa.sa_flags = 0;
527 #ifdef __ARCH_HAS_SA_RESTORER
528 ka->sa.sa_restorer = NULL;
529 #endif
530 sigemptyset(&ka->sa.sa_mask);
531 ka++;
532 }
533 }
534
unhandled_signal(struct task_struct * tsk,int sig)535 bool unhandled_signal(struct task_struct *tsk, int sig)
536 {
537 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
538 if (is_global_init(tsk))
539 return true;
540
541 if (handler != SIG_IGN && handler != SIG_DFL)
542 return false;
543
544 /* If dying, we handle all new signals by ignoring them */
545 if (fatal_signal_pending(tsk))
546 return false;
547
548 /* if ptraced, let the tracer determine */
549 return !tsk->ptrace;
550 }
551
collect_signal(int sig,struct sigpending * list,kernel_siginfo_t * info,struct sigqueue ** timer_sigq)552 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
553 struct sigqueue **timer_sigq)
554 {
555 struct sigqueue *q, *first = NULL;
556
557 /*
558 * Collect the siginfo appropriate to this signal. Check if
559 * there is another siginfo for the same signal.
560 */
561 list_for_each_entry(q, &list->list, list) {
562 if (q->info.si_signo == sig) {
563 if (first)
564 goto still_pending;
565 first = q;
566 }
567 }
568
569 sigdelset(&list->signal, sig);
570
571 if (first) {
572 still_pending:
573 list_del_init(&first->list);
574 copy_siginfo(info, &first->info);
575
576 /*
577 * posix-timer signals are preallocated and freed when the last
578 * reference count is dropped in posixtimer_deliver_signal() or
579 * immediately on timer deletion when the signal is not pending.
580 * Spare the extra round through __sigqueue_free() which is
581 * ignoring preallocated signals.
582 */
583 if (unlikely((first->flags & SIGQUEUE_PREALLOC) && (info->si_code == SI_TIMER)))
584 *timer_sigq = first;
585 else
586 __sigqueue_free(first);
587 } else {
588 /*
589 * Ok, it wasn't in the queue. This must be
590 * a fast-pathed signal or we must have been
591 * out of queue space. So zero out the info.
592 */
593 clear_siginfo(info);
594 info->si_signo = sig;
595 info->si_errno = 0;
596 info->si_code = SI_USER;
597 info->si_pid = 0;
598 info->si_uid = 0;
599 }
600 }
601
__dequeue_signal(struct sigpending * pending,sigset_t * mask,kernel_siginfo_t * info,struct sigqueue ** timer_sigq)602 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
603 kernel_siginfo_t *info, struct sigqueue **timer_sigq)
604 {
605 int sig = next_signal(pending, mask);
606
607 if (sig)
608 collect_signal(sig, pending, info, timer_sigq);
609 return sig;
610 }
611
612 /*
613 * Try to dequeue a signal. If a deliverable signal is found fill in the
614 * caller provided siginfo and return the signal number. Otherwise return
615 * 0.
616 */
dequeue_signal(sigset_t * mask,kernel_siginfo_t * info,enum pid_type * type)617 int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type)
618 {
619 struct task_struct *tsk = current;
620 struct sigqueue *timer_sigq;
621 int signr;
622
623 lockdep_assert_held(&tsk->sighand->siglock);
624
625 again:
626 *type = PIDTYPE_PID;
627 timer_sigq = NULL;
628 signr = __dequeue_signal(&tsk->pending, mask, info, &timer_sigq);
629 if (!signr) {
630 *type = PIDTYPE_TGID;
631 signr = __dequeue_signal(&tsk->signal->shared_pending,
632 mask, info, &timer_sigq);
633
634 if (unlikely(signr == SIGALRM))
635 posixtimer_rearm_itimer(tsk);
636 }
637
638 recalc_sigpending();
639 if (!signr)
640 return 0;
641
642 if (unlikely(sig_kernel_stop(signr))) {
643 /*
644 * Set a marker that we have dequeued a stop signal. Our
645 * caller might release the siglock and then the pending
646 * stop signal it is about to process is no longer in the
647 * pending bitmasks, but must still be cleared by a SIGCONT
648 * (and overruled by a SIGKILL). So those cases clear this
649 * shared flag after we've set it. Note that this flag may
650 * remain set after the signal we return is ignored or
651 * handled. That doesn't matter because its only purpose
652 * is to alert stop-signal processing code when another
653 * processor has come along and cleared the flag.
654 */
655 current->jobctl |= JOBCTL_STOP_DEQUEUED;
656 }
657
658 if (IS_ENABLED(CONFIG_POSIX_TIMERS) && unlikely(timer_sigq)) {
659 if (!posixtimer_deliver_signal(info, timer_sigq))
660 goto again;
661 }
662
663 return signr;
664 }
665 EXPORT_SYMBOL_GPL(dequeue_signal);
666
dequeue_synchronous_signal(kernel_siginfo_t * info)667 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
668 {
669 struct task_struct *tsk = current;
670 struct sigpending *pending = &tsk->pending;
671 struct sigqueue *q, *sync = NULL;
672
673 /*
674 * Might a synchronous signal be in the queue?
675 */
676 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
677 return 0;
678
679 /*
680 * Return the first synchronous signal in the queue.
681 */
682 list_for_each_entry(q, &pending->list, list) {
683 /* Synchronous signals have a positive si_code */
684 if ((q->info.si_code > SI_USER) &&
685 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
686 sync = q;
687 goto next;
688 }
689 }
690 return 0;
691 next:
692 /*
693 * Check if there is another siginfo for the same signal.
694 */
695 list_for_each_entry_continue(q, &pending->list, list) {
696 if (q->info.si_signo == sync->info.si_signo)
697 goto still_pending;
698 }
699
700 sigdelset(&pending->signal, sync->info.si_signo);
701 recalc_sigpending();
702 still_pending:
703 list_del_init(&sync->list);
704 copy_siginfo(info, &sync->info);
705 __sigqueue_free(sync);
706 return info->si_signo;
707 }
708
709 /*
710 * Tell a process that it has a new active signal..
711 *
712 * NOTE! we rely on the previous spin_lock to
713 * lock interrupts for us! We can only be called with
714 * "siglock" held, and the local interrupt must
715 * have been disabled when that got acquired!
716 *
717 * No need to set need_resched since signal event passing
718 * goes through ->blocked
719 */
signal_wake_up_state(struct task_struct * t,unsigned int state)720 void signal_wake_up_state(struct task_struct *t, unsigned int state)
721 {
722 lockdep_assert_held(&t->sighand->siglock);
723
724 set_tsk_thread_flag(t, TIF_SIGPENDING);
725
726 /*
727 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
728 * case. We don't check t->state here because there is a race with it
729 * executing another processor and just now entering stopped state.
730 * By using wake_up_state, we ensure the process will wake up and
731 * handle its death signal.
732 */
733 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
734 kick_process(t);
735 }
736
737 static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q);
738
sigqueue_free_ignored(struct task_struct * tsk,struct sigqueue * q)739 static void sigqueue_free_ignored(struct task_struct *tsk, struct sigqueue *q)
740 {
741 if (likely(!(q->flags & SIGQUEUE_PREALLOC) || q->info.si_code != SI_TIMER))
742 __sigqueue_free(q);
743 else
744 posixtimer_sig_ignore(tsk, q);
745 }
746
747 /* Remove signals in mask from the pending set and queue. */
flush_sigqueue_mask(struct task_struct * p,sigset_t * mask,struct sigpending * s)748 static void flush_sigqueue_mask(struct task_struct *p, sigset_t *mask, struct sigpending *s)
749 {
750 struct sigqueue *q, *n;
751 sigset_t m;
752
753 lockdep_assert_held(&p->sighand->siglock);
754
755 sigandsets(&m, mask, &s->signal);
756 if (sigisemptyset(&m))
757 return;
758
759 sigandnsets(&s->signal, &s->signal, mask);
760 list_for_each_entry_safe(q, n, &s->list, list) {
761 if (sigismember(mask, q->info.si_signo)) {
762 list_del_init(&q->list);
763 sigqueue_free_ignored(p, q);
764 }
765 }
766 }
767
is_si_special(const struct kernel_siginfo * info)768 static inline int is_si_special(const struct kernel_siginfo *info)
769 {
770 return info <= SEND_SIG_PRIV;
771 }
772
si_fromuser(const struct kernel_siginfo * info)773 static inline bool si_fromuser(const struct kernel_siginfo *info)
774 {
775 return info == SEND_SIG_NOINFO ||
776 (!is_si_special(info) && SI_FROMUSER(info));
777 }
778
779 /*
780 * called with RCU read lock from check_kill_permission()
781 */
kill_ok_by_cred(struct task_struct * t)782 static bool kill_ok_by_cred(struct task_struct *t)
783 {
784 const struct cred *cred = current_cred();
785 const struct cred *tcred = __task_cred(t);
786
787 return uid_eq(cred->euid, tcred->suid) ||
788 uid_eq(cred->euid, tcred->uid) ||
789 uid_eq(cred->uid, tcred->suid) ||
790 uid_eq(cred->uid, tcred->uid) ||
791 ns_capable(tcred->user_ns, CAP_KILL);
792 }
793
794 /*
795 * Bad permissions for sending the signal
796 * - the caller must hold the RCU read lock
797 */
check_kill_permission(int sig,struct kernel_siginfo * info,struct task_struct * t)798 static int check_kill_permission(int sig, struct kernel_siginfo *info,
799 struct task_struct *t)
800 {
801 struct pid *sid;
802 int error;
803
804 if (!valid_signal(sig))
805 return -EINVAL;
806
807 if (!si_fromuser(info))
808 return 0;
809
810 error = audit_signal_info(sig, t); /* Let audit system see the signal */
811 if (error)
812 return error;
813
814 if (!same_thread_group(current, t) &&
815 !kill_ok_by_cred(t)) {
816 switch (sig) {
817 case SIGCONT:
818 sid = task_session(t);
819 /*
820 * We don't return the error if sid == NULL. The
821 * task was unhashed, the caller must notice this.
822 */
823 if (!sid || sid == task_session(current))
824 break;
825 fallthrough;
826 default:
827 return -EPERM;
828 }
829 }
830
831 return security_task_kill(t, info, sig, NULL);
832 }
833
834 /**
835 * ptrace_trap_notify - schedule trap to notify ptracer
836 * @t: tracee wanting to notify tracer
837 *
838 * This function schedules sticky ptrace trap which is cleared on the next
839 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
840 * ptracer.
841 *
842 * If @t is running, STOP trap will be taken. If trapped for STOP and
843 * ptracer is listening for events, tracee is woken up so that it can
844 * re-trap for the new event. If trapped otherwise, STOP trap will be
845 * eventually taken without returning to userland after the existing traps
846 * are finished by PTRACE_CONT.
847 *
848 * CONTEXT:
849 * Must be called with @task->sighand->siglock held.
850 */
ptrace_trap_notify(struct task_struct * t)851 static void ptrace_trap_notify(struct task_struct *t)
852 {
853 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
854 lockdep_assert_held(&t->sighand->siglock);
855
856 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
857 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
858 }
859
860 /*
861 * Handle magic process-wide effects of stop/continue signals. Unlike
862 * the signal actions, these happen immediately at signal-generation
863 * time regardless of blocking, ignoring, or handling. This does the
864 * actual continuing for SIGCONT, but not the actual stopping for stop
865 * signals. The process stop is done as a signal action for SIG_DFL.
866 *
867 * Returns true if the signal should be actually delivered, otherwise
868 * it should be dropped.
869 */
prepare_signal(int sig,struct task_struct * p,bool force)870 static bool prepare_signal(int sig, struct task_struct *p, bool force)
871 {
872 struct signal_struct *signal = p->signal;
873 struct task_struct *t;
874 sigset_t flush;
875
876 if (signal->flags & SIGNAL_GROUP_EXIT) {
877 if (signal->core_state)
878 return sig == SIGKILL;
879 /*
880 * The process is in the middle of dying, drop the signal.
881 */
882 return false;
883 } else if (sig_kernel_stop(sig)) {
884 /*
885 * This is a stop signal. Remove SIGCONT from all queues.
886 */
887 siginitset(&flush, sigmask(SIGCONT));
888 flush_sigqueue_mask(p, &flush, &signal->shared_pending);
889 for_each_thread(p, t)
890 flush_sigqueue_mask(p, &flush, &t->pending);
891 } else if (sig == SIGCONT) {
892 unsigned int why;
893 /*
894 * Remove all stop signals from all queues, wake all threads.
895 */
896 siginitset(&flush, SIG_KERNEL_STOP_MASK);
897 flush_sigqueue_mask(p, &flush, &signal->shared_pending);
898 for_each_thread(p, t) {
899 flush_sigqueue_mask(p, &flush, &t->pending);
900 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
901 if (likely(!(t->ptrace & PT_SEIZED))) {
902 t->jobctl &= ~JOBCTL_STOPPED;
903 wake_up_state(t, __TASK_STOPPED);
904 } else
905 ptrace_trap_notify(t);
906 }
907
908 /*
909 * Notify the parent with CLD_CONTINUED if we were stopped.
910 *
911 * If we were in the middle of a group stop, we pretend it
912 * was already finished, and then continued. Since SIGCHLD
913 * doesn't queue we report only CLD_STOPPED, as if the next
914 * CLD_CONTINUED was dropped.
915 */
916 why = 0;
917 if (signal->flags & SIGNAL_STOP_STOPPED)
918 why |= SIGNAL_CLD_CONTINUED;
919 else if (signal->group_stop_count)
920 why |= SIGNAL_CLD_STOPPED;
921
922 if (why) {
923 /*
924 * The first thread which returns from do_signal_stop()
925 * will take ->siglock, notice SIGNAL_CLD_MASK, and
926 * notify its parent. See get_signal().
927 */
928 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
929 signal->group_stop_count = 0;
930 signal->group_exit_code = 0;
931 }
932 }
933
934 return !sig_ignored(p, sig, force);
935 }
936
937 /*
938 * Test if P wants to take SIG. After we've checked all threads with this,
939 * it's equivalent to finding no threads not blocking SIG. Any threads not
940 * blocking SIG were ruled out because they are not running and already
941 * have pending signals. Such threads will dequeue from the shared queue
942 * as soon as they're available, so putting the signal on the shared queue
943 * will be equivalent to sending it to one such thread.
944 */
wants_signal(int sig,struct task_struct * p)945 static inline bool wants_signal(int sig, struct task_struct *p)
946 {
947 if (sigismember(&p->blocked, sig))
948 return false;
949
950 if (p->flags & PF_EXITING)
951 return false;
952
953 if (sig == SIGKILL)
954 return true;
955
956 if (task_is_stopped_or_traced(p))
957 return false;
958
959 return task_curr(p) || !task_sigpending(p);
960 }
961
complete_signal(int sig,struct task_struct * p,enum pid_type type)962 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
963 {
964 struct signal_struct *signal = p->signal;
965 struct task_struct *t;
966
967 /*
968 * Now find a thread we can wake up to take the signal off the queue.
969 *
970 * Try the suggested task first (may or may not be the main thread).
971 */
972 if (wants_signal(sig, p))
973 t = p;
974 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
975 /*
976 * There is just one thread and it does not need to be woken.
977 * It will dequeue unblocked signals before it runs again.
978 */
979 return;
980 else {
981 /*
982 * Otherwise try to find a suitable thread.
983 */
984 t = signal->curr_target;
985 while (!wants_signal(sig, t)) {
986 t = next_thread(t);
987 if (t == signal->curr_target)
988 /*
989 * No thread needs to be woken.
990 * Any eligible threads will see
991 * the signal in the queue soon.
992 */
993 return;
994 }
995 signal->curr_target = t;
996 }
997
998 /*
999 * Found a killable thread. If the signal will be fatal,
1000 * then start taking the whole group down immediately.
1001 */
1002 if (sig_fatal(p, sig) &&
1003 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1004 !sigismember(&t->real_blocked, sig) &&
1005 (sig == SIGKILL || !p->ptrace)) {
1006 /*
1007 * This signal will be fatal to the whole group.
1008 */
1009 if (!sig_kernel_coredump(sig)) {
1010 /*
1011 * Start a group exit and wake everybody up.
1012 * This way we don't have other threads
1013 * running and doing things after a slower
1014 * thread has the fatal signal pending.
1015 */
1016 signal->flags = SIGNAL_GROUP_EXIT;
1017 signal->group_exit_code = sig;
1018 signal->group_stop_count = 0;
1019 __for_each_thread(signal, t) {
1020 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1021 sigaddset(&t->pending.signal, SIGKILL);
1022 signal_wake_up(t, 1);
1023 }
1024 return;
1025 }
1026 }
1027
1028 /*
1029 * The signal is already in the shared-pending queue.
1030 * Tell the chosen thread to wake up and dequeue it.
1031 */
1032 signal_wake_up(t, sig == SIGKILL);
1033 return;
1034 }
1035
legacy_queue(struct sigpending * signals,int sig)1036 static inline bool legacy_queue(struct sigpending *signals, int sig)
1037 {
1038 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1039 }
1040
__send_signal_locked(int sig,struct kernel_siginfo * info,struct task_struct * t,enum pid_type type,bool force)1041 static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1042 struct task_struct *t, enum pid_type type, bool force)
1043 {
1044 struct sigpending *pending;
1045 struct sigqueue *q;
1046 int override_rlimit;
1047 int ret = 0, result;
1048
1049 lockdep_assert_held(&t->sighand->siglock);
1050
1051 result = TRACE_SIGNAL_IGNORED;
1052 if (!prepare_signal(sig, t, force))
1053 goto ret;
1054
1055 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1056 /*
1057 * Short-circuit ignored signals and support queuing
1058 * exactly one non-rt signal, so that we can get more
1059 * detailed information about the cause of the signal.
1060 */
1061 result = TRACE_SIGNAL_ALREADY_PENDING;
1062 if (legacy_queue(pending, sig))
1063 goto ret;
1064
1065 result = TRACE_SIGNAL_DELIVERED;
1066 /*
1067 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1068 */
1069 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1070 goto out_set;
1071
1072 /*
1073 * Real-time signals must be queued if sent by sigqueue, or
1074 * some other real-time mechanism. It is implementation
1075 * defined whether kill() does so. We attempt to do so, on
1076 * the principle of least surprise, but since kill is not
1077 * allowed to fail with EAGAIN when low on memory we just
1078 * make sure at least one signal gets delivered and don't
1079 * pass on the info struct.
1080 */
1081 if (sig < SIGRTMIN)
1082 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1083 else
1084 override_rlimit = 0;
1085
1086 q = sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1087
1088 if (q) {
1089 list_add_tail(&q->list, &pending->list);
1090 switch ((unsigned long) info) {
1091 case (unsigned long) SEND_SIG_NOINFO:
1092 clear_siginfo(&q->info);
1093 q->info.si_signo = sig;
1094 q->info.si_errno = 0;
1095 q->info.si_code = SI_USER;
1096 q->info.si_pid = task_tgid_nr_ns(current,
1097 task_active_pid_ns(t));
1098 rcu_read_lock();
1099 q->info.si_uid =
1100 from_kuid_munged(task_cred_xxx(t, user_ns),
1101 current_uid());
1102 rcu_read_unlock();
1103 break;
1104 case (unsigned long) SEND_SIG_PRIV:
1105 clear_siginfo(&q->info);
1106 q->info.si_signo = sig;
1107 q->info.si_errno = 0;
1108 q->info.si_code = SI_KERNEL;
1109 q->info.si_pid = 0;
1110 q->info.si_uid = 0;
1111 break;
1112 default:
1113 copy_siginfo(&q->info, info);
1114 break;
1115 }
1116 } else if (!is_si_special(info) &&
1117 sig >= SIGRTMIN && info->si_code != SI_USER) {
1118 /*
1119 * Queue overflow, abort. We may abort if the
1120 * signal was rt and sent by user using something
1121 * other than kill().
1122 */
1123 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1124 ret = -EAGAIN;
1125 goto ret;
1126 } else {
1127 /*
1128 * This is a silent loss of information. We still
1129 * send the signal, but the *info bits are lost.
1130 */
1131 result = TRACE_SIGNAL_LOSE_INFO;
1132 }
1133
1134 out_set:
1135 signalfd_notify(t, sig);
1136 sigaddset(&pending->signal, sig);
1137
1138 /* Let multiprocess signals appear after on-going forks */
1139 if (type > PIDTYPE_TGID) {
1140 struct multiprocess_signals *delayed;
1141 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1142 sigset_t *signal = &delayed->signal;
1143 /* Can't queue both a stop and a continue signal */
1144 if (sig == SIGCONT)
1145 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1146 else if (sig_kernel_stop(sig))
1147 sigdelset(signal, SIGCONT);
1148 sigaddset(signal, sig);
1149 }
1150 }
1151
1152 complete_signal(sig, t, type);
1153 ret:
1154 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1155 return ret;
1156 }
1157
has_si_pid_and_uid(struct kernel_siginfo * info)1158 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1159 {
1160 bool ret = false;
1161 switch (siginfo_layout(info->si_signo, info->si_code)) {
1162 case SIL_KILL:
1163 case SIL_CHLD:
1164 case SIL_RT:
1165 ret = true;
1166 break;
1167 case SIL_TIMER:
1168 case SIL_POLL:
1169 case SIL_FAULT:
1170 case SIL_FAULT_TRAPNO:
1171 case SIL_FAULT_MCEERR:
1172 case SIL_FAULT_BNDERR:
1173 case SIL_FAULT_PKUERR:
1174 case SIL_FAULT_PERF_EVENT:
1175 case SIL_SYS:
1176 ret = false;
1177 break;
1178 }
1179 return ret;
1180 }
1181
send_signal_locked(int sig,struct kernel_siginfo * info,struct task_struct * t,enum pid_type type)1182 int send_signal_locked(int sig, struct kernel_siginfo *info,
1183 struct task_struct *t, enum pid_type type)
1184 {
1185 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1186 bool force = false;
1187
1188 if (info == SEND_SIG_NOINFO) {
1189 /* Force if sent from an ancestor pid namespace */
1190 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1191 } else if (info == SEND_SIG_PRIV) {
1192 /* Don't ignore kernel generated signals */
1193 force = true;
1194 } else if (has_si_pid_and_uid(info)) {
1195 /* SIGKILL and SIGSTOP is special or has ids */
1196 struct user_namespace *t_user_ns;
1197
1198 rcu_read_lock();
1199 t_user_ns = task_cred_xxx(t, user_ns);
1200 if (current_user_ns() != t_user_ns) {
1201 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1202 info->si_uid = from_kuid_munged(t_user_ns, uid);
1203 }
1204 rcu_read_unlock();
1205
1206 /* A kernel generated signal? */
1207 force = (info->si_code == SI_KERNEL);
1208
1209 /* From an ancestor pid namespace? */
1210 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1211 info->si_pid = 0;
1212 force = true;
1213 }
1214 }
1215 return __send_signal_locked(sig, info, t, type, force);
1216 }
1217
print_fatal_signal(int signr)1218 static void print_fatal_signal(int signr)
1219 {
1220 struct pt_regs *regs = task_pt_regs(current);
1221 struct file *exe_file;
1222
1223 exe_file = get_task_exe_file(current);
1224 if (exe_file) {
1225 pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1226 exe_file, current->comm, signr);
1227 fput(exe_file);
1228 } else {
1229 pr_info("%s: potentially unexpected fatal signal %d.\n",
1230 current->comm, signr);
1231 }
1232
1233 #if defined(__i386__) && !defined(__arch_um__)
1234 pr_info("code at %08lx: ", regs->ip);
1235 {
1236 int i;
1237 for (i = 0; i < 16; i++) {
1238 unsigned char insn;
1239
1240 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1241 break;
1242 pr_cont("%02x ", insn);
1243 }
1244 }
1245 pr_cont("\n");
1246 #endif
1247 preempt_disable();
1248 show_regs(regs);
1249 preempt_enable();
1250 }
1251
setup_print_fatal_signals(char * str)1252 static int __init setup_print_fatal_signals(char *str)
1253 {
1254 get_option (&str, &print_fatal_signals);
1255
1256 return 1;
1257 }
1258
1259 __setup("print-fatal-signals=", setup_print_fatal_signals);
1260
do_send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p,enum pid_type type)1261 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1262 enum pid_type type)
1263 {
1264 unsigned long flags;
1265 int ret = -ESRCH;
1266
1267 if (lock_task_sighand(p, &flags)) {
1268 ret = send_signal_locked(sig, info, p, type);
1269 unlock_task_sighand(p, &flags);
1270 }
1271
1272 return ret;
1273 }
1274
1275 enum sig_handler {
1276 HANDLER_CURRENT, /* If reachable use the current handler */
1277 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1278 HANDLER_EXIT, /* Only visible as the process exit code */
1279 };
1280
1281 /*
1282 * Force a signal that the process can't ignore: if necessary
1283 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1284 *
1285 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1286 * since we do not want to have a signal handler that was blocked
1287 * be invoked when user space had explicitly blocked it.
1288 *
1289 * We don't want to have recursive SIGSEGV's etc, for example,
1290 * that is why we also clear SIGNAL_UNKILLABLE.
1291 */
1292 static int
force_sig_info_to_task(struct kernel_siginfo * info,struct task_struct * t,enum sig_handler handler)1293 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1294 enum sig_handler handler)
1295 {
1296 unsigned long int flags;
1297 int ret, blocked, ignored;
1298 struct k_sigaction *action;
1299 int sig = info->si_signo;
1300
1301 spin_lock_irqsave(&t->sighand->siglock, flags);
1302 action = &t->sighand->action[sig-1];
1303 ignored = action->sa.sa_handler == SIG_IGN;
1304 blocked = sigismember(&t->blocked, sig);
1305 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1306 action->sa.sa_handler = SIG_DFL;
1307 if (handler == HANDLER_EXIT)
1308 action->sa.sa_flags |= SA_IMMUTABLE;
1309 if (blocked)
1310 sigdelset(&t->blocked, sig);
1311 }
1312 /*
1313 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1314 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1315 */
1316 if (action->sa.sa_handler == SIG_DFL &&
1317 (!t->ptrace || (handler == HANDLER_EXIT)))
1318 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1319 ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1320 /* This can happen if the signal was already pending and blocked */
1321 if (!task_sigpending(t))
1322 signal_wake_up(t, 0);
1323 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1324
1325 return ret;
1326 }
1327
force_sig_info(struct kernel_siginfo * info)1328 int force_sig_info(struct kernel_siginfo *info)
1329 {
1330 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1331 }
1332
1333 /*
1334 * Nuke all other threads in the group.
1335 */
zap_other_threads(struct task_struct * p)1336 int zap_other_threads(struct task_struct *p)
1337 {
1338 struct task_struct *t;
1339 int count = 0;
1340
1341 p->signal->group_stop_count = 0;
1342
1343 for_other_threads(p, t) {
1344 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1345 count++;
1346
1347 /* Don't bother with already dead threads */
1348 if (t->exit_state)
1349 continue;
1350 sigaddset(&t->pending.signal, SIGKILL);
1351 signal_wake_up(t, 1);
1352 }
1353
1354 return count;
1355 }
1356
__lock_task_sighand(struct task_struct * tsk,unsigned long * flags)1357 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1358 unsigned long *flags)
1359 {
1360 struct sighand_struct *sighand;
1361
1362 rcu_read_lock();
1363 for (;;) {
1364 sighand = rcu_dereference(tsk->sighand);
1365 if (unlikely(sighand == NULL))
1366 break;
1367
1368 /*
1369 * This sighand can be already freed and even reused, but
1370 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1371 * initializes ->siglock: this slab can't go away, it has
1372 * the same object type, ->siglock can't be reinitialized.
1373 *
1374 * We need to ensure that tsk->sighand is still the same
1375 * after we take the lock, we can race with de_thread() or
1376 * __exit_signal(). In the latter case the next iteration
1377 * must see ->sighand == NULL.
1378 */
1379 spin_lock_irqsave(&sighand->siglock, *flags);
1380 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1381 break;
1382 spin_unlock_irqrestore(&sighand->siglock, *flags);
1383 }
1384 rcu_read_unlock();
1385
1386 return sighand;
1387 }
1388
1389 #ifdef CONFIG_LOCKDEP
lockdep_assert_task_sighand_held(struct task_struct * task)1390 void lockdep_assert_task_sighand_held(struct task_struct *task)
1391 {
1392 struct sighand_struct *sighand;
1393
1394 rcu_read_lock();
1395 sighand = rcu_dereference(task->sighand);
1396 if (sighand)
1397 lockdep_assert_held(&sighand->siglock);
1398 else
1399 WARN_ON_ONCE(1);
1400 rcu_read_unlock();
1401 }
1402 #endif
1403
1404 /*
1405 * send signal info to all the members of a thread group or to the
1406 * individual thread if type == PIDTYPE_PID.
1407 */
group_send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p,enum pid_type type)1408 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1409 struct task_struct *p, enum pid_type type)
1410 {
1411 int ret;
1412
1413 rcu_read_lock();
1414 ret = check_kill_permission(sig, info, p);
1415 rcu_read_unlock();
1416
1417 if (!ret && sig)
1418 ret = do_send_sig_info(sig, info, p, type);
1419
1420 return ret;
1421 }
1422
1423 /*
1424 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1425 * control characters do (^C, ^Z etc)
1426 * - the caller must hold at least a readlock on tasklist_lock
1427 */
__kill_pgrp_info(int sig,struct kernel_siginfo * info,struct pid * pgrp)1428 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1429 {
1430 struct task_struct *p = NULL;
1431 int ret = -ESRCH;
1432
1433 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1434 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1435 /*
1436 * If group_send_sig_info() succeeds at least once ret
1437 * becomes 0 and after that the code below has no effect.
1438 * Otherwise we return the last err or -ESRCH if this
1439 * process group is empty.
1440 */
1441 if (ret)
1442 ret = err;
1443 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1444
1445 return ret;
1446 }
1447
kill_pid_info_type(int sig,struct kernel_siginfo * info,struct pid * pid,enum pid_type type)1448 static int kill_pid_info_type(int sig, struct kernel_siginfo *info,
1449 struct pid *pid, enum pid_type type)
1450 {
1451 int error = -ESRCH;
1452 struct task_struct *p;
1453
1454 for (;;) {
1455 rcu_read_lock();
1456 p = pid_task(pid, PIDTYPE_PID);
1457 if (p)
1458 error = group_send_sig_info(sig, info, p, type);
1459 rcu_read_unlock();
1460 if (likely(!p || error != -ESRCH))
1461 return error;
1462 /*
1463 * The task was unhashed in between, try again. If it
1464 * is dead, pid_task() will return NULL, if we race with
1465 * de_thread() it will find the new leader.
1466 */
1467 }
1468 }
1469
kill_pid_info(int sig,struct kernel_siginfo * info,struct pid * pid)1470 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1471 {
1472 return kill_pid_info_type(sig, info, pid, PIDTYPE_TGID);
1473 }
1474
kill_proc_info(int sig,struct kernel_siginfo * info,pid_t pid)1475 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1476 {
1477 int error;
1478 rcu_read_lock();
1479 error = kill_pid_info(sig, info, find_vpid(pid));
1480 rcu_read_unlock();
1481 return error;
1482 }
1483
kill_as_cred_perm(const struct cred * cred,struct task_struct * target)1484 static inline bool kill_as_cred_perm(const struct cred *cred,
1485 struct task_struct *target)
1486 {
1487 const struct cred *pcred = __task_cred(target);
1488
1489 return uid_eq(cred->euid, pcred->suid) ||
1490 uid_eq(cred->euid, pcred->uid) ||
1491 uid_eq(cred->uid, pcred->suid) ||
1492 uid_eq(cred->uid, pcred->uid);
1493 }
1494
1495 /*
1496 * The usb asyncio usage of siginfo is wrong. The glibc support
1497 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1498 * AKA after the generic fields:
1499 * kernel_pid_t si_pid;
1500 * kernel_uid32_t si_uid;
1501 * sigval_t si_value;
1502 *
1503 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1504 * after the generic fields is:
1505 * void __user *si_addr;
1506 *
1507 * This is a practical problem when there is a 64bit big endian kernel
1508 * and a 32bit userspace. As the 32bit address will encoded in the low
1509 * 32bits of the pointer. Those low 32bits will be stored at higher
1510 * address than appear in a 32 bit pointer. So userspace will not
1511 * see the address it was expecting for it's completions.
1512 *
1513 * There is nothing in the encoding that can allow
1514 * copy_siginfo_to_user32 to detect this confusion of formats, so
1515 * handle this by requiring the caller of kill_pid_usb_asyncio to
1516 * notice when this situration takes place and to store the 32bit
1517 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1518 * parameter.
1519 */
kill_pid_usb_asyncio(int sig,int errno,sigval_t addr,struct pid * pid,const struct cred * cred)1520 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1521 struct pid *pid, const struct cred *cred)
1522 {
1523 struct kernel_siginfo info;
1524 struct task_struct *p;
1525 unsigned long flags;
1526 int ret = -EINVAL;
1527
1528 if (!valid_signal(sig))
1529 return ret;
1530
1531 clear_siginfo(&info);
1532 info.si_signo = sig;
1533 info.si_errno = errno;
1534 info.si_code = SI_ASYNCIO;
1535 *((sigval_t *)&info.si_pid) = addr;
1536
1537 rcu_read_lock();
1538 p = pid_task(pid, PIDTYPE_PID);
1539 if (!p) {
1540 ret = -ESRCH;
1541 goto out_unlock;
1542 }
1543 if (!kill_as_cred_perm(cred, p)) {
1544 ret = -EPERM;
1545 goto out_unlock;
1546 }
1547 ret = security_task_kill(p, &info, sig, cred);
1548 if (ret)
1549 goto out_unlock;
1550
1551 if (sig) {
1552 if (lock_task_sighand(p, &flags)) {
1553 ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1554 unlock_task_sighand(p, &flags);
1555 } else
1556 ret = -ESRCH;
1557 }
1558 out_unlock:
1559 rcu_read_unlock();
1560 return ret;
1561 }
1562 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1563
1564 /*
1565 * kill_something_info() interprets pid in interesting ways just like kill(2).
1566 *
1567 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1568 * is probably wrong. Should make it like BSD or SYSV.
1569 */
1570
kill_something_info(int sig,struct kernel_siginfo * info,pid_t pid)1571 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1572 {
1573 int ret;
1574
1575 if (pid > 0)
1576 return kill_proc_info(sig, info, pid);
1577
1578 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1579 if (pid == INT_MIN)
1580 return -ESRCH;
1581
1582 read_lock(&tasklist_lock);
1583 if (pid != -1) {
1584 ret = __kill_pgrp_info(sig, info,
1585 pid ? find_vpid(-pid) : task_pgrp(current));
1586 } else {
1587 int retval = 0, count = 0;
1588 struct task_struct * p;
1589
1590 for_each_process(p) {
1591 if (task_pid_vnr(p) > 1 &&
1592 !same_thread_group(p, current)) {
1593 int err = group_send_sig_info(sig, info, p,
1594 PIDTYPE_MAX);
1595 ++count;
1596 if (err != -EPERM)
1597 retval = err;
1598 }
1599 }
1600 ret = count ? retval : -ESRCH;
1601 }
1602 read_unlock(&tasklist_lock);
1603
1604 return ret;
1605 }
1606
1607 /*
1608 * These are for backward compatibility with the rest of the kernel source.
1609 */
1610
send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p)1611 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1612 {
1613 /*
1614 * Make sure legacy kernel users don't send in bad values
1615 * (normal paths check this in check_kill_permission).
1616 */
1617 if (!valid_signal(sig))
1618 return -EINVAL;
1619
1620 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1621 }
1622 EXPORT_SYMBOL(send_sig_info);
1623
1624 #define __si_special(priv) \
1625 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1626
1627 int
send_sig(int sig,struct task_struct * p,int priv)1628 send_sig(int sig, struct task_struct *p, int priv)
1629 {
1630 return send_sig_info(sig, __si_special(priv), p);
1631 }
1632 EXPORT_SYMBOL(send_sig);
1633
force_sig(int sig)1634 void force_sig(int sig)
1635 {
1636 struct kernel_siginfo info;
1637
1638 clear_siginfo(&info);
1639 info.si_signo = sig;
1640 info.si_errno = 0;
1641 info.si_code = SI_KERNEL;
1642 info.si_pid = 0;
1643 info.si_uid = 0;
1644 force_sig_info(&info);
1645 }
1646 EXPORT_SYMBOL(force_sig);
1647
force_fatal_sig(int sig)1648 void force_fatal_sig(int sig)
1649 {
1650 struct kernel_siginfo info;
1651
1652 clear_siginfo(&info);
1653 info.si_signo = sig;
1654 info.si_errno = 0;
1655 info.si_code = SI_KERNEL;
1656 info.si_pid = 0;
1657 info.si_uid = 0;
1658 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1659 }
1660
force_exit_sig(int sig)1661 void force_exit_sig(int sig)
1662 {
1663 struct kernel_siginfo info;
1664
1665 clear_siginfo(&info);
1666 info.si_signo = sig;
1667 info.si_errno = 0;
1668 info.si_code = SI_KERNEL;
1669 info.si_pid = 0;
1670 info.si_uid = 0;
1671 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1672 }
1673
1674 /*
1675 * When things go south during signal handling, we
1676 * will force a SIGSEGV. And if the signal that caused
1677 * the problem was already a SIGSEGV, we'll want to
1678 * make sure we don't even try to deliver the signal..
1679 */
force_sigsegv(int sig)1680 void force_sigsegv(int sig)
1681 {
1682 if (sig == SIGSEGV)
1683 force_fatal_sig(SIGSEGV);
1684 else
1685 force_sig(SIGSEGV);
1686 }
1687
force_sig_fault_to_task(int sig,int code,void __user * addr,struct task_struct * t)1688 int force_sig_fault_to_task(int sig, int code, void __user *addr,
1689 struct task_struct *t)
1690 {
1691 struct kernel_siginfo info;
1692
1693 clear_siginfo(&info);
1694 info.si_signo = sig;
1695 info.si_errno = 0;
1696 info.si_code = code;
1697 info.si_addr = addr;
1698 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1699 }
1700
force_sig_fault(int sig,int code,void __user * addr)1701 int force_sig_fault(int sig, int code, void __user *addr)
1702 {
1703 return force_sig_fault_to_task(sig, code, addr, current);
1704 }
1705
send_sig_fault(int sig,int code,void __user * addr,struct task_struct * t)1706 int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t)
1707 {
1708 struct kernel_siginfo info;
1709
1710 clear_siginfo(&info);
1711 info.si_signo = sig;
1712 info.si_errno = 0;
1713 info.si_code = code;
1714 info.si_addr = addr;
1715 return send_sig_info(info.si_signo, &info, t);
1716 }
1717
force_sig_mceerr(int code,void __user * addr,short lsb)1718 int force_sig_mceerr(int code, void __user *addr, short lsb)
1719 {
1720 struct kernel_siginfo info;
1721
1722 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1723 clear_siginfo(&info);
1724 info.si_signo = SIGBUS;
1725 info.si_errno = 0;
1726 info.si_code = code;
1727 info.si_addr = addr;
1728 info.si_addr_lsb = lsb;
1729 return force_sig_info(&info);
1730 }
1731
send_sig_mceerr(int code,void __user * addr,short lsb,struct task_struct * t)1732 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1733 {
1734 struct kernel_siginfo info;
1735
1736 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1737 clear_siginfo(&info);
1738 info.si_signo = SIGBUS;
1739 info.si_errno = 0;
1740 info.si_code = code;
1741 info.si_addr = addr;
1742 info.si_addr_lsb = lsb;
1743 return send_sig_info(info.si_signo, &info, t);
1744 }
1745 EXPORT_SYMBOL(send_sig_mceerr);
1746
force_sig_bnderr(void __user * addr,void __user * lower,void __user * upper)1747 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1748 {
1749 struct kernel_siginfo info;
1750
1751 clear_siginfo(&info);
1752 info.si_signo = SIGSEGV;
1753 info.si_errno = 0;
1754 info.si_code = SEGV_BNDERR;
1755 info.si_addr = addr;
1756 info.si_lower = lower;
1757 info.si_upper = upper;
1758 return force_sig_info(&info);
1759 }
1760
1761 #ifdef SEGV_PKUERR
force_sig_pkuerr(void __user * addr,u32 pkey)1762 int force_sig_pkuerr(void __user *addr, u32 pkey)
1763 {
1764 struct kernel_siginfo info;
1765
1766 clear_siginfo(&info);
1767 info.si_signo = SIGSEGV;
1768 info.si_errno = 0;
1769 info.si_code = SEGV_PKUERR;
1770 info.si_addr = addr;
1771 info.si_pkey = pkey;
1772 return force_sig_info(&info);
1773 }
1774 #endif
1775
send_sig_perf(void __user * addr,u32 type,u64 sig_data)1776 int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1777 {
1778 struct kernel_siginfo info;
1779
1780 clear_siginfo(&info);
1781 info.si_signo = SIGTRAP;
1782 info.si_errno = 0;
1783 info.si_code = TRAP_PERF;
1784 info.si_addr = addr;
1785 info.si_perf_data = sig_data;
1786 info.si_perf_type = type;
1787
1788 /*
1789 * Signals generated by perf events should not terminate the whole
1790 * process if SIGTRAP is blocked, however, delivering the signal
1791 * asynchronously is better than not delivering at all. But tell user
1792 * space if the signal was asynchronous, so it can clearly be
1793 * distinguished from normal synchronous ones.
1794 */
1795 info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ?
1796 TRAP_PERF_FLAG_ASYNC :
1797 0;
1798
1799 return send_sig_info(info.si_signo, &info, current);
1800 }
1801
1802 /**
1803 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1804 * @syscall: syscall number to send to userland
1805 * @reason: filter-supplied reason code to send to userland (via si_errno)
1806 * @force_coredump: true to trigger a coredump
1807 *
1808 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1809 */
force_sig_seccomp(int syscall,int reason,bool force_coredump)1810 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1811 {
1812 struct kernel_siginfo info;
1813
1814 clear_siginfo(&info);
1815 info.si_signo = SIGSYS;
1816 info.si_code = SYS_SECCOMP;
1817 info.si_call_addr = (void __user *)KSTK_EIP(current);
1818 info.si_errno = reason;
1819 info.si_arch = syscall_get_arch(current);
1820 info.si_syscall = syscall;
1821 return force_sig_info_to_task(&info, current,
1822 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1823 }
1824
1825 /* For the crazy architectures that include trap information in
1826 * the errno field, instead of an actual errno value.
1827 */
force_sig_ptrace_errno_trap(int errno,void __user * addr)1828 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1829 {
1830 struct kernel_siginfo info;
1831
1832 clear_siginfo(&info);
1833 info.si_signo = SIGTRAP;
1834 info.si_errno = errno;
1835 info.si_code = TRAP_HWBKPT;
1836 info.si_addr = addr;
1837 return force_sig_info(&info);
1838 }
1839
1840 /* For the rare architectures that include trap information using
1841 * si_trapno.
1842 */
force_sig_fault_trapno(int sig,int code,void __user * addr,int trapno)1843 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1844 {
1845 struct kernel_siginfo info;
1846
1847 clear_siginfo(&info);
1848 info.si_signo = sig;
1849 info.si_errno = 0;
1850 info.si_code = code;
1851 info.si_addr = addr;
1852 info.si_trapno = trapno;
1853 return force_sig_info(&info);
1854 }
1855
1856 /* For the rare architectures that include trap information using
1857 * si_trapno.
1858 */
send_sig_fault_trapno(int sig,int code,void __user * addr,int trapno,struct task_struct * t)1859 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1860 struct task_struct *t)
1861 {
1862 struct kernel_siginfo info;
1863
1864 clear_siginfo(&info);
1865 info.si_signo = sig;
1866 info.si_errno = 0;
1867 info.si_code = code;
1868 info.si_addr = addr;
1869 info.si_trapno = trapno;
1870 return send_sig_info(info.si_signo, &info, t);
1871 }
1872
kill_pgrp_info(int sig,struct kernel_siginfo * info,struct pid * pgrp)1873 static int kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1874 {
1875 int ret;
1876 read_lock(&tasklist_lock);
1877 ret = __kill_pgrp_info(sig, info, pgrp);
1878 read_unlock(&tasklist_lock);
1879 return ret;
1880 }
1881
kill_pgrp(struct pid * pid,int sig,int priv)1882 int kill_pgrp(struct pid *pid, int sig, int priv)
1883 {
1884 return kill_pgrp_info(sig, __si_special(priv), pid);
1885 }
1886 EXPORT_SYMBOL(kill_pgrp);
1887
kill_pid(struct pid * pid,int sig,int priv)1888 int kill_pid(struct pid *pid, int sig, int priv)
1889 {
1890 return kill_pid_info(sig, __si_special(priv), pid);
1891 }
1892 EXPORT_SYMBOL(kill_pid);
1893
1894 #ifdef CONFIG_POSIX_TIMERS
1895 /*
1896 * These functions handle POSIX timer signals. POSIX timers use
1897 * preallocated sigqueue structs for sending signals.
1898 */
__flush_itimer_signals(struct sigpending * pending)1899 static void __flush_itimer_signals(struct sigpending *pending)
1900 {
1901 sigset_t signal, retain;
1902 struct sigqueue *q, *n;
1903
1904 signal = pending->signal;
1905 sigemptyset(&retain);
1906
1907 list_for_each_entry_safe(q, n, &pending->list, list) {
1908 int sig = q->info.si_signo;
1909
1910 if (likely(q->info.si_code != SI_TIMER)) {
1911 sigaddset(&retain, sig);
1912 } else {
1913 sigdelset(&signal, sig);
1914 list_del_init(&q->list);
1915 __sigqueue_free(q);
1916 }
1917 }
1918
1919 sigorsets(&pending->signal, &signal, &retain);
1920 }
1921
flush_itimer_signals(void)1922 void flush_itimer_signals(void)
1923 {
1924 struct task_struct *tsk = current;
1925
1926 guard(spinlock_irqsave)(&tsk->sighand->siglock);
1927 __flush_itimer_signals(&tsk->pending);
1928 __flush_itimer_signals(&tsk->signal->shared_pending);
1929 }
1930
posixtimer_init_sigqueue(struct sigqueue * q)1931 bool posixtimer_init_sigqueue(struct sigqueue *q)
1932 {
1933 struct ucounts *ucounts = sig_get_ucounts(current, -1, 0);
1934
1935 if (!ucounts)
1936 return false;
1937 clear_siginfo(&q->info);
1938 __sigqueue_init(q, ucounts, SIGQUEUE_PREALLOC);
1939 return true;
1940 }
1941
posixtimer_queue_sigqueue(struct sigqueue * q,struct task_struct * t,enum pid_type type)1942 static void posixtimer_queue_sigqueue(struct sigqueue *q, struct task_struct *t, enum pid_type type)
1943 {
1944 struct sigpending *pending;
1945 int sig = q->info.si_signo;
1946
1947 signalfd_notify(t, sig);
1948 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1949 list_add_tail(&q->list, &pending->list);
1950 sigaddset(&pending->signal, sig);
1951 complete_signal(sig, t, type);
1952 }
1953
1954 /*
1955 * This function is used by POSIX timers to deliver a timer signal.
1956 * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1957 * set), the signal must be delivered to the specific thread (queues
1958 * into t->pending).
1959 *
1960 * Where type is not PIDTYPE_PID, signals must be delivered to the
1961 * process. In this case, prefer to deliver to current if it is in
1962 * the same thread group as the target process and its sighand is
1963 * stable, which avoids unnecessarily waking up a potentially idle task.
1964 */
posixtimer_get_target(struct k_itimer * tmr)1965 static inline struct task_struct *posixtimer_get_target(struct k_itimer *tmr)
1966 {
1967 struct task_struct *t = pid_task(tmr->it_pid, tmr->it_pid_type);
1968
1969 if (t && tmr->it_pid_type != PIDTYPE_PID &&
1970 same_thread_group(t, current) && !current->exit_state)
1971 t = current;
1972 return t;
1973 }
1974
posixtimer_send_sigqueue(struct k_itimer * tmr)1975 void posixtimer_send_sigqueue(struct k_itimer *tmr)
1976 {
1977 struct sigqueue *q = &tmr->sigq;
1978 int sig = q->info.si_signo;
1979 struct task_struct *t;
1980 unsigned long flags;
1981 int result;
1982
1983 guard(rcu)();
1984
1985 t = posixtimer_get_target(tmr);
1986 if (!t)
1987 return;
1988
1989 if (!likely(lock_task_sighand(t, &flags)))
1990 return;
1991
1992 /*
1993 * Update @tmr::sigqueue_seq for posix timer signals with sighand
1994 * locked to prevent a race against dequeue_signal().
1995 */
1996 tmr->it_sigqueue_seq = tmr->it_signal_seq;
1997
1998 /*
1999 * Set the signal delivery status under sighand lock, so that the
2000 * ignored signal handling can distinguish between a periodic and a
2001 * non-periodic timer.
2002 */
2003 tmr->it_sig_periodic = tmr->it_status == POSIX_TIMER_REQUEUE_PENDING;
2004
2005 if (!prepare_signal(sig, t, false)) {
2006 result = TRACE_SIGNAL_IGNORED;
2007
2008 if (!list_empty(&q->list)) {
2009 /*
2010 * If task group is exiting with the signal already pending,
2011 * wait for __exit_signal() to do its job. Otherwise if
2012 * ignored, it's not supposed to be queued. Try to survive.
2013 */
2014 WARN_ON_ONCE(!(t->signal->flags & SIGNAL_GROUP_EXIT));
2015 goto out;
2016 }
2017
2018 /* Periodic timers with SIG_IGN are queued on the ignored list */
2019 if (tmr->it_sig_periodic) {
2020 /*
2021 * Already queued means the timer was rearmed after
2022 * the previous expiry got it on the ignore list.
2023 * Nothing to do for that case.
2024 */
2025 if (hlist_unhashed(&tmr->ignored_list)) {
2026 /*
2027 * Take a signal reference and queue it on
2028 * the ignored list.
2029 */
2030 posixtimer_sigqueue_getref(q);
2031 posixtimer_sig_ignore(t, q);
2032 }
2033 } else if (!hlist_unhashed(&tmr->ignored_list)) {
2034 /*
2035 * Covers the case where a timer was periodic and
2036 * then the signal was ignored. Later it was rearmed
2037 * as oneshot timer. The previous signal is invalid
2038 * now, and this oneshot signal has to be dropped.
2039 * Remove it from the ignored list and drop the
2040 * reference count as the signal is not longer
2041 * queued.
2042 */
2043 hlist_del_init(&tmr->ignored_list);
2044 posixtimer_putref(tmr);
2045 }
2046 goto out;
2047 }
2048
2049 /* This should never happen and leaks a reference count */
2050 if (WARN_ON_ONCE(!hlist_unhashed(&tmr->ignored_list)))
2051 hlist_del_init(&tmr->ignored_list);
2052
2053 if (unlikely(!list_empty(&q->list))) {
2054 /* This holds a reference count already */
2055 result = TRACE_SIGNAL_ALREADY_PENDING;
2056 goto out;
2057 }
2058
2059 posixtimer_sigqueue_getref(q);
2060 posixtimer_queue_sigqueue(q, t, tmr->it_pid_type);
2061 result = TRACE_SIGNAL_DELIVERED;
2062 out:
2063 trace_signal_generate(sig, &q->info, t, tmr->it_pid_type != PIDTYPE_PID, result);
2064 unlock_task_sighand(t, &flags);
2065 }
2066
posixtimer_sig_ignore(struct task_struct * tsk,struct sigqueue * q)2067 static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q)
2068 {
2069 struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
2070
2071 /*
2072 * If the timer is marked deleted already or the signal originates
2073 * from a non-periodic timer, then just drop the reference
2074 * count. Otherwise queue it on the ignored list.
2075 */
2076 if (tmr->it_signal && tmr->it_sig_periodic)
2077 hlist_add_head(&tmr->ignored_list, &tsk->signal->ignored_posix_timers);
2078 else
2079 posixtimer_putref(tmr);
2080 }
2081
posixtimer_sig_unignore(struct task_struct * tsk,int sig)2082 static void posixtimer_sig_unignore(struct task_struct *tsk, int sig)
2083 {
2084 struct hlist_head *head = &tsk->signal->ignored_posix_timers;
2085 struct hlist_node *tmp;
2086 struct k_itimer *tmr;
2087
2088 if (likely(hlist_empty(head)))
2089 return;
2090
2091 /*
2092 * Rearming a timer with sighand lock held is not possible due to
2093 * lock ordering vs. tmr::it_lock. Just stick the sigqueue back and
2094 * let the signal delivery path deal with it whether it needs to be
2095 * rearmed or not. This cannot be decided here w/o dropping sighand
2096 * lock and creating a loop retry horror show.
2097 */
2098 hlist_for_each_entry_safe(tmr, tmp , head, ignored_list) {
2099 struct task_struct *target;
2100
2101 /*
2102 * tmr::sigq.info.si_signo is immutable, so accessing it
2103 * without holding tmr::it_lock is safe.
2104 */
2105 if (tmr->sigq.info.si_signo != sig)
2106 continue;
2107
2108 hlist_del_init(&tmr->ignored_list);
2109
2110 /* This should never happen and leaks a reference count */
2111 if (WARN_ON_ONCE(!list_empty(&tmr->sigq.list)))
2112 continue;
2113
2114 /*
2115 * Get the target for the signal. If target is a thread and
2116 * has exited by now, drop the reference count.
2117 */
2118 guard(rcu)();
2119 target = posixtimer_get_target(tmr);
2120 if (target)
2121 posixtimer_queue_sigqueue(&tmr->sigq, target, tmr->it_pid_type);
2122 else
2123 posixtimer_putref(tmr);
2124 }
2125 }
2126 #else /* CONFIG_POSIX_TIMERS */
posixtimer_sig_ignore(struct task_struct * tsk,struct sigqueue * q)2127 static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q) { }
posixtimer_sig_unignore(struct task_struct * tsk,int sig)2128 static inline void posixtimer_sig_unignore(struct task_struct *tsk, int sig) { }
2129 #endif /* !CONFIG_POSIX_TIMERS */
2130
do_notify_pidfd(struct task_struct * task)2131 void do_notify_pidfd(struct task_struct *task)
2132 {
2133 struct pid *pid = task_pid(task);
2134
2135 WARN_ON(task->exit_state == 0);
2136
2137 __wake_up(&pid->wait_pidfd, TASK_NORMAL, 0,
2138 poll_to_key(EPOLLIN | EPOLLRDNORM));
2139 }
2140
2141 /*
2142 * Let a parent know about the death of a child.
2143 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2144 *
2145 * Returns true if our parent ignored us and so we've switched to
2146 * self-reaping.
2147 */
do_notify_parent(struct task_struct * tsk,int sig)2148 bool do_notify_parent(struct task_struct *tsk, int sig)
2149 {
2150 struct kernel_siginfo info;
2151 unsigned long flags;
2152 struct sighand_struct *psig;
2153 bool autoreap = false;
2154 u64 utime, stime;
2155
2156 WARN_ON_ONCE(sig == -1);
2157
2158 /* do_notify_parent_cldstop should have been called instead. */
2159 WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2160
2161 WARN_ON_ONCE(!tsk->ptrace &&
2162 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2163 /*
2164 * tsk is a group leader and has no threads, wake up the
2165 * non-PIDFD_THREAD waiters.
2166 */
2167 if (thread_group_empty(tsk))
2168 do_notify_pidfd(tsk);
2169
2170 if (sig != SIGCHLD) {
2171 /*
2172 * This is only possible if parent == real_parent.
2173 * Check if it has changed security domain.
2174 */
2175 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2176 sig = SIGCHLD;
2177 }
2178
2179 clear_siginfo(&info);
2180 info.si_signo = sig;
2181 info.si_errno = 0;
2182 /*
2183 * We are under tasklist_lock here so our parent is tied to
2184 * us and cannot change.
2185 *
2186 * task_active_pid_ns will always return the same pid namespace
2187 * until a task passes through release_task.
2188 *
2189 * write_lock() currently calls preempt_disable() which is the
2190 * same as rcu_read_lock(), but according to Oleg, this is not
2191 * correct to rely on this
2192 */
2193 rcu_read_lock();
2194 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2195 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2196 task_uid(tsk));
2197 rcu_read_unlock();
2198
2199 task_cputime(tsk, &utime, &stime);
2200 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2201 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2202
2203 info.si_status = tsk->exit_code & 0x7f;
2204 if (tsk->exit_code & 0x80)
2205 info.si_code = CLD_DUMPED;
2206 else if (tsk->exit_code & 0x7f)
2207 info.si_code = CLD_KILLED;
2208 else {
2209 info.si_code = CLD_EXITED;
2210 info.si_status = tsk->exit_code >> 8;
2211 }
2212
2213 psig = tsk->parent->sighand;
2214 spin_lock_irqsave(&psig->siglock, flags);
2215 if (!tsk->ptrace && sig == SIGCHLD &&
2216 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2217 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2218 /*
2219 * We are exiting and our parent doesn't care. POSIX.1
2220 * defines special semantics for setting SIGCHLD to SIG_IGN
2221 * or setting the SA_NOCLDWAIT flag: we should be reaped
2222 * automatically and not left for our parent's wait4 call.
2223 * Rather than having the parent do it as a magic kind of
2224 * signal handler, we just set this to tell do_exit that we
2225 * can be cleaned up without becoming a zombie. Note that
2226 * we still call __wake_up_parent in this case, because a
2227 * blocked sys_wait4 might now return -ECHILD.
2228 *
2229 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2230 * is implementation-defined: we do (if you don't want
2231 * it, just use SIG_IGN instead).
2232 */
2233 autoreap = true;
2234 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2235 sig = 0;
2236 }
2237 /*
2238 * Send with __send_signal as si_pid and si_uid are in the
2239 * parent's namespaces.
2240 */
2241 if (valid_signal(sig) && sig)
2242 __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2243 __wake_up_parent(tsk, tsk->parent);
2244 spin_unlock_irqrestore(&psig->siglock, flags);
2245
2246 return autoreap;
2247 }
2248
2249 /**
2250 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2251 * @tsk: task reporting the state change
2252 * @for_ptracer: the notification is for ptracer
2253 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2254 *
2255 * Notify @tsk's parent that the stopped/continued state has changed. If
2256 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2257 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2258 *
2259 * CONTEXT:
2260 * Must be called with tasklist_lock at least read locked.
2261 */
do_notify_parent_cldstop(struct task_struct * tsk,bool for_ptracer,int why)2262 static void do_notify_parent_cldstop(struct task_struct *tsk,
2263 bool for_ptracer, int why)
2264 {
2265 struct kernel_siginfo info;
2266 unsigned long flags;
2267 struct task_struct *parent;
2268 struct sighand_struct *sighand;
2269 u64 utime, stime;
2270
2271 if (for_ptracer) {
2272 parent = tsk->parent;
2273 } else {
2274 tsk = tsk->group_leader;
2275 parent = tsk->real_parent;
2276 }
2277
2278 clear_siginfo(&info);
2279 info.si_signo = SIGCHLD;
2280 info.si_errno = 0;
2281 /*
2282 * see comment in do_notify_parent() about the following 4 lines
2283 */
2284 rcu_read_lock();
2285 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2286 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2287 rcu_read_unlock();
2288
2289 task_cputime(tsk, &utime, &stime);
2290 info.si_utime = nsec_to_clock_t(utime);
2291 info.si_stime = nsec_to_clock_t(stime);
2292
2293 info.si_code = why;
2294 switch (why) {
2295 case CLD_CONTINUED:
2296 info.si_status = SIGCONT;
2297 break;
2298 case CLD_STOPPED:
2299 info.si_status = tsk->signal->group_exit_code & 0x7f;
2300 break;
2301 case CLD_TRAPPED:
2302 info.si_status = tsk->exit_code & 0x7f;
2303 break;
2304 default:
2305 BUG();
2306 }
2307
2308 sighand = parent->sighand;
2309 spin_lock_irqsave(&sighand->siglock, flags);
2310 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2311 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2312 send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2313 /*
2314 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2315 */
2316 __wake_up_parent(tsk, parent);
2317 spin_unlock_irqrestore(&sighand->siglock, flags);
2318 }
2319
2320 /*
2321 * This must be called with current->sighand->siglock held.
2322 *
2323 * This should be the path for all ptrace stops.
2324 * We always set current->last_siginfo while stopped here.
2325 * That makes it a way to test a stopped process for
2326 * being ptrace-stopped vs being job-control-stopped.
2327 *
2328 * Returns the signal the ptracer requested the code resume
2329 * with. If the code did not stop because the tracer is gone,
2330 * the stop signal remains unchanged unless clear_code.
2331 */
ptrace_stop(int exit_code,int why,unsigned long message,kernel_siginfo_t * info)2332 static int ptrace_stop(int exit_code, int why, unsigned long message,
2333 kernel_siginfo_t *info)
2334 __releases(¤t->sighand->siglock)
2335 __acquires(¤t->sighand->siglock)
2336 {
2337 bool gstop_done = false;
2338
2339 if (arch_ptrace_stop_needed()) {
2340 /*
2341 * The arch code has something special to do before a
2342 * ptrace stop. This is allowed to block, e.g. for faults
2343 * on user stack pages. We can't keep the siglock while
2344 * calling arch_ptrace_stop, so we must release it now.
2345 * To preserve proper semantics, we must do this before
2346 * any signal bookkeeping like checking group_stop_count.
2347 */
2348 spin_unlock_irq(¤t->sighand->siglock);
2349 arch_ptrace_stop();
2350 spin_lock_irq(¤t->sighand->siglock);
2351 }
2352
2353 /*
2354 * After this point ptrace_signal_wake_up or signal_wake_up
2355 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2356 * signal comes in. Handle previous ptrace_unlinks and fatal
2357 * signals here to prevent ptrace_stop sleeping in schedule.
2358 */
2359 if (!current->ptrace || __fatal_signal_pending(current))
2360 return exit_code;
2361
2362 set_special_state(TASK_TRACED);
2363 current->jobctl |= JOBCTL_TRACED;
2364
2365 /*
2366 * We're committing to trapping. TRACED should be visible before
2367 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2368 * Also, transition to TRACED and updates to ->jobctl should be
2369 * atomic with respect to siglock and should be done after the arch
2370 * hook as siglock is released and regrabbed across it.
2371 *
2372 * TRACER TRACEE
2373 *
2374 * ptrace_attach()
2375 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2376 * do_wait()
2377 * set_current_state() smp_wmb();
2378 * ptrace_do_wait()
2379 * wait_task_stopped()
2380 * task_stopped_code()
2381 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2382 */
2383 smp_wmb();
2384
2385 current->ptrace_message = message;
2386 current->last_siginfo = info;
2387 current->exit_code = exit_code;
2388
2389 /*
2390 * If @why is CLD_STOPPED, we're trapping to participate in a group
2391 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2392 * across siglock relocks since INTERRUPT was scheduled, PENDING
2393 * could be clear now. We act as if SIGCONT is received after
2394 * TASK_TRACED is entered - ignore it.
2395 */
2396 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2397 gstop_done = task_participate_group_stop(current);
2398
2399 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2400 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2401 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2402 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2403
2404 /* entering a trap, clear TRAPPING */
2405 task_clear_jobctl_trapping(current);
2406
2407 spin_unlock_irq(¤t->sighand->siglock);
2408 read_lock(&tasklist_lock);
2409 /*
2410 * Notify parents of the stop.
2411 *
2412 * While ptraced, there are two parents - the ptracer and
2413 * the real_parent of the group_leader. The ptracer should
2414 * know about every stop while the real parent is only
2415 * interested in the completion of group stop. The states
2416 * for the two don't interact with each other. Notify
2417 * separately unless they're gonna be duplicates.
2418 */
2419 if (current->ptrace)
2420 do_notify_parent_cldstop(current, true, why);
2421 if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2422 do_notify_parent_cldstop(current, false, why);
2423
2424 /*
2425 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2426 * One a PREEMPTION kernel this can result in preemption requirement
2427 * which will be fulfilled after read_unlock() and the ptracer will be
2428 * put on the CPU.
2429 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2430 * this task wait in schedule(). If this task gets preempted then it
2431 * remains enqueued on the runqueue. The ptracer will observe this and
2432 * then sleep for a delay of one HZ tick. In the meantime this task
2433 * gets scheduled, enters schedule() and will wait for the ptracer.
2434 *
2435 * This preemption point is not bad from a correctness point of
2436 * view but extends the runtime by one HZ tick time due to the
2437 * ptracer's sleep. The preempt-disable section ensures that there
2438 * will be no preemption between unlock and schedule() and so
2439 * improving the performance since the ptracer will observe that
2440 * the tracee is scheduled out once it gets on the CPU.
2441 *
2442 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2443 * Therefore the task can be preempted after do_notify_parent_cldstop()
2444 * before unlocking tasklist_lock so there is no benefit in doing this.
2445 *
2446 * In fact disabling preemption is harmful on PREEMPT_RT because
2447 * the spinlock_t in cgroup_enter_frozen() must not be acquired
2448 * with preemption disabled due to the 'sleeping' spinlock
2449 * substitution of RT.
2450 */
2451 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2452 preempt_disable();
2453 read_unlock(&tasklist_lock);
2454 cgroup_enter_frozen();
2455 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2456 preempt_enable_no_resched();
2457 schedule();
2458 cgroup_leave_frozen(true);
2459
2460 /*
2461 * We are back. Now reacquire the siglock before touching
2462 * last_siginfo, so that we are sure to have synchronized with
2463 * any signal-sending on another CPU that wants to examine it.
2464 */
2465 spin_lock_irq(¤t->sighand->siglock);
2466 exit_code = current->exit_code;
2467 current->last_siginfo = NULL;
2468 current->ptrace_message = 0;
2469 current->exit_code = 0;
2470
2471 /* LISTENING can be set only during STOP traps, clear it */
2472 current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2473
2474 /*
2475 * Queued signals ignored us while we were stopped for tracing.
2476 * So check for any that we should take before resuming user mode.
2477 * This sets TIF_SIGPENDING, but never clears it.
2478 */
2479 recalc_sigpending_tsk(current);
2480 return exit_code;
2481 }
2482
ptrace_do_notify(int signr,int exit_code,int why,unsigned long message)2483 static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2484 {
2485 kernel_siginfo_t info;
2486
2487 clear_siginfo(&info);
2488 info.si_signo = signr;
2489 info.si_code = exit_code;
2490 info.si_pid = task_pid_vnr(current);
2491 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2492
2493 /* Let the debugger run. */
2494 return ptrace_stop(exit_code, why, message, &info);
2495 }
2496
ptrace_notify(int exit_code,unsigned long message)2497 int ptrace_notify(int exit_code, unsigned long message)
2498 {
2499 int signr;
2500
2501 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2502 if (unlikely(task_work_pending(current)))
2503 task_work_run();
2504
2505 spin_lock_irq(¤t->sighand->siglock);
2506 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2507 spin_unlock_irq(¤t->sighand->siglock);
2508 return signr;
2509 }
2510
2511 /**
2512 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2513 * @signr: signr causing group stop if initiating
2514 *
2515 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2516 * and participate in it. If already set, participate in the existing
2517 * group stop. If participated in a group stop (and thus slept), %true is
2518 * returned with siglock released.
2519 *
2520 * If ptraced, this function doesn't handle stop itself. Instead,
2521 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2522 * untouched. The caller must ensure that INTERRUPT trap handling takes
2523 * places afterwards.
2524 *
2525 * CONTEXT:
2526 * Must be called with @current->sighand->siglock held, which is released
2527 * on %true return.
2528 *
2529 * RETURNS:
2530 * %false if group stop is already cancelled or ptrace trap is scheduled.
2531 * %true if participated in group stop.
2532 */
do_signal_stop(int signr)2533 static bool do_signal_stop(int signr)
2534 __releases(¤t->sighand->siglock)
2535 {
2536 struct signal_struct *sig = current->signal;
2537
2538 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2539 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2540 struct task_struct *t;
2541
2542 /* signr will be recorded in task->jobctl for retries */
2543 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2544
2545 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2546 unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2547 unlikely(sig->group_exec_task))
2548 return false;
2549 /*
2550 * There is no group stop already in progress. We must
2551 * initiate one now.
2552 *
2553 * While ptraced, a task may be resumed while group stop is
2554 * still in effect and then receive a stop signal and
2555 * initiate another group stop. This deviates from the
2556 * usual behavior as two consecutive stop signals can't
2557 * cause two group stops when !ptraced. That is why we
2558 * also check !task_is_stopped(t) below.
2559 *
2560 * The condition can be distinguished by testing whether
2561 * SIGNAL_STOP_STOPPED is already set. Don't generate
2562 * group_exit_code in such case.
2563 *
2564 * This is not necessary for SIGNAL_STOP_CONTINUED because
2565 * an intervening stop signal is required to cause two
2566 * continued events regardless of ptrace.
2567 */
2568 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2569 sig->group_exit_code = signr;
2570
2571 sig->group_stop_count = 0;
2572 if (task_set_jobctl_pending(current, signr | gstop))
2573 sig->group_stop_count++;
2574
2575 for_other_threads(current, t) {
2576 /*
2577 * Setting state to TASK_STOPPED for a group
2578 * stop is always done with the siglock held,
2579 * so this check has no races.
2580 */
2581 if (!task_is_stopped(t) &&
2582 task_set_jobctl_pending(t, signr | gstop)) {
2583 sig->group_stop_count++;
2584 if (likely(!(t->ptrace & PT_SEIZED)))
2585 signal_wake_up(t, 0);
2586 else
2587 ptrace_trap_notify(t);
2588 }
2589 }
2590 }
2591
2592 if (likely(!current->ptrace)) {
2593 int notify = 0;
2594
2595 /*
2596 * If there are no other threads in the group, or if there
2597 * is a group stop in progress and we are the last to stop,
2598 * report to the parent.
2599 */
2600 if (task_participate_group_stop(current))
2601 notify = CLD_STOPPED;
2602
2603 current->jobctl |= JOBCTL_STOPPED;
2604 set_special_state(TASK_STOPPED);
2605 spin_unlock_irq(¤t->sighand->siglock);
2606
2607 /*
2608 * Notify the parent of the group stop completion. Because
2609 * we're not holding either the siglock or tasklist_lock
2610 * here, ptracer may attach inbetween; however, this is for
2611 * group stop and should always be delivered to the real
2612 * parent of the group leader. The new ptracer will get
2613 * its notification when this task transitions into
2614 * TASK_TRACED.
2615 */
2616 if (notify) {
2617 read_lock(&tasklist_lock);
2618 do_notify_parent_cldstop(current, false, notify);
2619 read_unlock(&tasklist_lock);
2620 }
2621
2622 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2623 cgroup_enter_frozen();
2624 schedule();
2625 return true;
2626 } else {
2627 /*
2628 * While ptraced, group stop is handled by STOP trap.
2629 * Schedule it and let the caller deal with it.
2630 */
2631 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2632 return false;
2633 }
2634 }
2635
2636 /**
2637 * do_jobctl_trap - take care of ptrace jobctl traps
2638 *
2639 * When PT_SEIZED, it's used for both group stop and explicit
2640 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2641 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2642 * the stop signal; otherwise, %SIGTRAP.
2643 *
2644 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2645 * number as exit_code and no siginfo.
2646 *
2647 * CONTEXT:
2648 * Must be called with @current->sighand->siglock held, which may be
2649 * released and re-acquired before returning with intervening sleep.
2650 */
do_jobctl_trap(void)2651 static void do_jobctl_trap(void)
2652 {
2653 struct signal_struct *signal = current->signal;
2654 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2655
2656 if (current->ptrace & PT_SEIZED) {
2657 if (!signal->group_stop_count &&
2658 !(signal->flags & SIGNAL_STOP_STOPPED))
2659 signr = SIGTRAP;
2660 WARN_ON_ONCE(!signr);
2661 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2662 CLD_STOPPED, 0);
2663 } else {
2664 WARN_ON_ONCE(!signr);
2665 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2666 }
2667 }
2668
2669 /**
2670 * do_freezer_trap - handle the freezer jobctl trap
2671 *
2672 * Puts the task into frozen state, if only the task is not about to quit.
2673 * In this case it drops JOBCTL_TRAP_FREEZE.
2674 *
2675 * CONTEXT:
2676 * Must be called with @current->sighand->siglock held,
2677 * which is always released before returning.
2678 */
do_freezer_trap(void)2679 static void do_freezer_trap(void)
2680 __releases(¤t->sighand->siglock)
2681 {
2682 /*
2683 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2684 * let's make another loop to give it a chance to be handled.
2685 * In any case, we'll return back.
2686 */
2687 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2688 JOBCTL_TRAP_FREEZE) {
2689 spin_unlock_irq(¤t->sighand->siglock);
2690 return;
2691 }
2692
2693 /*
2694 * Now we're sure that there is no pending fatal signal and no
2695 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2696 * immediately (if there is a non-fatal signal pending), and
2697 * put the task into sleep.
2698 */
2699 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2700 clear_thread_flag(TIF_SIGPENDING);
2701 spin_unlock_irq(¤t->sighand->siglock);
2702 cgroup_enter_frozen();
2703 schedule();
2704
2705 /*
2706 * We could've been woken by task_work, run it to clear
2707 * TIF_NOTIFY_SIGNAL. The caller will retry if necessary.
2708 */
2709 clear_notify_signal();
2710 if (unlikely(task_work_pending(current)))
2711 task_work_run();
2712 }
2713
ptrace_signal(int signr,kernel_siginfo_t * info,enum pid_type type)2714 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2715 {
2716 /*
2717 * We do not check sig_kernel_stop(signr) but set this marker
2718 * unconditionally because we do not know whether debugger will
2719 * change signr. This flag has no meaning unless we are going
2720 * to stop after return from ptrace_stop(). In this case it will
2721 * be checked in do_signal_stop(), we should only stop if it was
2722 * not cleared by SIGCONT while we were sleeping. See also the
2723 * comment in dequeue_signal().
2724 */
2725 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2726 signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2727
2728 /* We're back. Did the debugger cancel the sig? */
2729 if (signr == 0)
2730 return signr;
2731
2732 /*
2733 * Update the siginfo structure if the signal has
2734 * changed. If the debugger wanted something
2735 * specific in the siginfo structure then it should
2736 * have updated *info via PTRACE_SETSIGINFO.
2737 */
2738 if (signr != info->si_signo) {
2739 clear_siginfo(info);
2740 info->si_signo = signr;
2741 info->si_errno = 0;
2742 info->si_code = SI_USER;
2743 rcu_read_lock();
2744 info->si_pid = task_pid_vnr(current->parent);
2745 info->si_uid = from_kuid_munged(current_user_ns(),
2746 task_uid(current->parent));
2747 rcu_read_unlock();
2748 }
2749
2750 /* If the (new) signal is now blocked, requeue it. */
2751 if (sigismember(¤t->blocked, signr) ||
2752 fatal_signal_pending(current)) {
2753 send_signal_locked(signr, info, current, type);
2754 signr = 0;
2755 }
2756
2757 return signr;
2758 }
2759
hide_si_addr_tag_bits(struct ksignal * ksig)2760 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2761 {
2762 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2763 case SIL_FAULT:
2764 case SIL_FAULT_TRAPNO:
2765 case SIL_FAULT_MCEERR:
2766 case SIL_FAULT_BNDERR:
2767 case SIL_FAULT_PKUERR:
2768 case SIL_FAULT_PERF_EVENT:
2769 ksig->info.si_addr = arch_untagged_si_addr(
2770 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2771 break;
2772 case SIL_KILL:
2773 case SIL_TIMER:
2774 case SIL_POLL:
2775 case SIL_CHLD:
2776 case SIL_RT:
2777 case SIL_SYS:
2778 break;
2779 }
2780 }
2781
get_signal(struct ksignal * ksig)2782 bool get_signal(struct ksignal *ksig)
2783 {
2784 struct sighand_struct *sighand = current->sighand;
2785 struct signal_struct *signal = current->signal;
2786 int signr;
2787
2788 clear_notify_signal();
2789 if (unlikely(task_work_pending(current)))
2790 task_work_run();
2791
2792 if (!task_sigpending(current))
2793 return false;
2794
2795 if (unlikely(uprobe_deny_signal()))
2796 return false;
2797
2798 /*
2799 * Do this once, we can't return to user-mode if freezing() == T.
2800 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2801 * thus do not need another check after return.
2802 */
2803 try_to_freeze();
2804
2805 relock:
2806 spin_lock_irq(&sighand->siglock);
2807
2808 /*
2809 * Every stopped thread goes here after wakeup. Check to see if
2810 * we should notify the parent, prepare_signal(SIGCONT) encodes
2811 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2812 */
2813 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2814 int why;
2815
2816 if (signal->flags & SIGNAL_CLD_CONTINUED)
2817 why = CLD_CONTINUED;
2818 else
2819 why = CLD_STOPPED;
2820
2821 signal->flags &= ~SIGNAL_CLD_MASK;
2822
2823 spin_unlock_irq(&sighand->siglock);
2824
2825 /*
2826 * Notify the parent that we're continuing. This event is
2827 * always per-process and doesn't make whole lot of sense
2828 * for ptracers, who shouldn't consume the state via
2829 * wait(2) either, but, for backward compatibility, notify
2830 * the ptracer of the group leader too unless it's gonna be
2831 * a duplicate.
2832 */
2833 read_lock(&tasklist_lock);
2834 do_notify_parent_cldstop(current, false, why);
2835
2836 if (ptrace_reparented(current->group_leader))
2837 do_notify_parent_cldstop(current->group_leader,
2838 true, why);
2839 read_unlock(&tasklist_lock);
2840
2841 goto relock;
2842 }
2843
2844 for (;;) {
2845 struct k_sigaction *ka;
2846 enum pid_type type;
2847
2848 /* Has this task already been marked for death? */
2849 if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2850 signal->group_exec_task) {
2851 signr = SIGKILL;
2852 sigdelset(¤t->pending.signal, SIGKILL);
2853 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2854 &sighand->action[SIGKILL-1]);
2855 recalc_sigpending();
2856 /*
2857 * implies do_group_exit() or return to PF_USER_WORKER,
2858 * no need to initialize ksig->info/etc.
2859 */
2860 goto fatal;
2861 }
2862
2863 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2864 do_signal_stop(0))
2865 goto relock;
2866
2867 if (unlikely(current->jobctl &
2868 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2869 if (current->jobctl & JOBCTL_TRAP_MASK) {
2870 do_jobctl_trap();
2871 spin_unlock_irq(&sighand->siglock);
2872 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2873 do_freezer_trap();
2874
2875 goto relock;
2876 }
2877
2878 /*
2879 * If the task is leaving the frozen state, let's update
2880 * cgroup counters and reset the frozen bit.
2881 */
2882 if (unlikely(cgroup_task_frozen(current))) {
2883 spin_unlock_irq(&sighand->siglock);
2884 cgroup_leave_frozen(false);
2885 goto relock;
2886 }
2887
2888 /*
2889 * Signals generated by the execution of an instruction
2890 * need to be delivered before any other pending signals
2891 * so that the instruction pointer in the signal stack
2892 * frame points to the faulting instruction.
2893 */
2894 type = PIDTYPE_PID;
2895 signr = dequeue_synchronous_signal(&ksig->info);
2896 if (!signr)
2897 signr = dequeue_signal(¤t->blocked, &ksig->info, &type);
2898
2899 if (!signr)
2900 break; /* will return 0 */
2901
2902 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2903 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2904 signr = ptrace_signal(signr, &ksig->info, type);
2905 if (!signr)
2906 continue;
2907 }
2908
2909 ka = &sighand->action[signr-1];
2910
2911 /* Trace actually delivered signals. */
2912 trace_signal_deliver(signr, &ksig->info, ka);
2913
2914 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2915 continue;
2916 if (ka->sa.sa_handler != SIG_DFL) {
2917 /* Run the handler. */
2918 ksig->ka = *ka;
2919
2920 if (ka->sa.sa_flags & SA_ONESHOT)
2921 ka->sa.sa_handler = SIG_DFL;
2922
2923 break; /* will return non-zero "signr" value */
2924 }
2925
2926 /*
2927 * Now we are doing the default action for this signal.
2928 */
2929 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2930 continue;
2931
2932 /*
2933 * Global init gets no signals it doesn't want.
2934 * Container-init gets no signals it doesn't want from same
2935 * container.
2936 *
2937 * Note that if global/container-init sees a sig_kernel_only()
2938 * signal here, the signal must have been generated internally
2939 * or must have come from an ancestor namespace. In either
2940 * case, the signal cannot be dropped.
2941 */
2942 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2943 !sig_kernel_only(signr))
2944 continue;
2945
2946 if (sig_kernel_stop(signr)) {
2947 /*
2948 * The default action is to stop all threads in
2949 * the thread group. The job control signals
2950 * do nothing in an orphaned pgrp, but SIGSTOP
2951 * always works. Note that siglock needs to be
2952 * dropped during the call to is_orphaned_pgrp()
2953 * because of lock ordering with tasklist_lock.
2954 * This allows an intervening SIGCONT to be posted.
2955 * We need to check for that and bail out if necessary.
2956 */
2957 if (signr != SIGSTOP) {
2958 spin_unlock_irq(&sighand->siglock);
2959
2960 /* signals can be posted during this window */
2961
2962 if (is_current_pgrp_orphaned())
2963 goto relock;
2964
2965 spin_lock_irq(&sighand->siglock);
2966 }
2967
2968 if (likely(do_signal_stop(signr))) {
2969 /* It released the siglock. */
2970 goto relock;
2971 }
2972
2973 /*
2974 * We didn't actually stop, due to a race
2975 * with SIGCONT or something like that.
2976 */
2977 continue;
2978 }
2979
2980 fatal:
2981 spin_unlock_irq(&sighand->siglock);
2982 if (unlikely(cgroup_task_frozen(current)))
2983 cgroup_leave_frozen(true);
2984
2985 /*
2986 * Anything else is fatal, maybe with a core dump.
2987 */
2988 current->flags |= PF_SIGNALED;
2989
2990 if (sig_kernel_coredump(signr)) {
2991 if (print_fatal_signals)
2992 print_fatal_signal(signr);
2993 proc_coredump_connector(current);
2994 /*
2995 * If it was able to dump core, this kills all
2996 * other threads in the group and synchronizes with
2997 * their demise. If we lost the race with another
2998 * thread getting here, it set group_exit_code
2999 * first and our do_group_exit call below will use
3000 * that value and ignore the one we pass it.
3001 */
3002 do_coredump(&ksig->info);
3003 }
3004
3005 /*
3006 * PF_USER_WORKER threads will catch and exit on fatal signals
3007 * themselves. They have cleanup that must be performed, so we
3008 * cannot call do_exit() on their behalf. Note that ksig won't
3009 * be properly initialized, PF_USER_WORKER's shouldn't use it.
3010 */
3011 if (current->flags & PF_USER_WORKER)
3012 goto out;
3013
3014 /*
3015 * Death signals, no core dump.
3016 */
3017 do_group_exit(signr);
3018 /* NOTREACHED */
3019 }
3020 spin_unlock_irq(&sighand->siglock);
3021
3022 ksig->sig = signr;
3023
3024 if (signr && !(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
3025 hide_si_addr_tag_bits(ksig);
3026 out:
3027 return signr > 0;
3028 }
3029
3030 /**
3031 * signal_delivered - called after signal delivery to update blocked signals
3032 * @ksig: kernel signal struct
3033 * @stepping: nonzero if debugger single-step or block-step in use
3034 *
3035 * This function should be called when a signal has successfully been
3036 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
3037 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
3038 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
3039 */
signal_delivered(struct ksignal * ksig,int stepping)3040 static void signal_delivered(struct ksignal *ksig, int stepping)
3041 {
3042 sigset_t blocked;
3043
3044 /* A signal was successfully delivered, and the
3045 saved sigmask was stored on the signal frame,
3046 and will be restored by sigreturn. So we can
3047 simply clear the restore sigmask flag. */
3048 clear_restore_sigmask();
3049
3050 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
3051 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
3052 sigaddset(&blocked, ksig->sig);
3053 set_current_blocked(&blocked);
3054 if (current->sas_ss_flags & SS_AUTODISARM)
3055 sas_ss_reset(current);
3056 if (stepping)
3057 ptrace_notify(SIGTRAP, 0);
3058 }
3059
signal_setup_done(int failed,struct ksignal * ksig,int stepping)3060 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
3061 {
3062 if (failed)
3063 force_sigsegv(ksig->sig);
3064 else
3065 signal_delivered(ksig, stepping);
3066 }
3067
3068 /*
3069 * It could be that complete_signal() picked us to notify about the
3070 * group-wide signal. Other threads should be notified now to take
3071 * the shared signals in @which since we will not.
3072 */
retarget_shared_pending(struct task_struct * tsk,sigset_t * which)3073 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
3074 {
3075 sigset_t retarget;
3076 struct task_struct *t;
3077
3078 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
3079 if (sigisemptyset(&retarget))
3080 return;
3081
3082 for_other_threads(tsk, t) {
3083 if (t->flags & PF_EXITING)
3084 continue;
3085
3086 if (!has_pending_signals(&retarget, &t->blocked))
3087 continue;
3088 /* Remove the signals this thread can handle. */
3089 sigandsets(&retarget, &retarget, &t->blocked);
3090
3091 if (!task_sigpending(t))
3092 signal_wake_up(t, 0);
3093
3094 if (sigisemptyset(&retarget))
3095 break;
3096 }
3097 }
3098
exit_signals(struct task_struct * tsk)3099 void exit_signals(struct task_struct *tsk)
3100 {
3101 int group_stop = 0;
3102 sigset_t unblocked;
3103
3104 /*
3105 * @tsk is about to have PF_EXITING set - lock out users which
3106 * expect stable threadgroup.
3107 */
3108 cgroup_threadgroup_change_begin(tsk);
3109
3110 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
3111 sched_mm_cid_exit_signals(tsk);
3112 tsk->flags |= PF_EXITING;
3113 cgroup_threadgroup_change_end(tsk);
3114 return;
3115 }
3116
3117 spin_lock_irq(&tsk->sighand->siglock);
3118 /*
3119 * From now this task is not visible for group-wide signals,
3120 * see wants_signal(), do_signal_stop().
3121 */
3122 sched_mm_cid_exit_signals(tsk);
3123 tsk->flags |= PF_EXITING;
3124
3125 cgroup_threadgroup_change_end(tsk);
3126
3127 if (!task_sigpending(tsk))
3128 goto out;
3129
3130 unblocked = tsk->blocked;
3131 signotset(&unblocked);
3132 retarget_shared_pending(tsk, &unblocked);
3133
3134 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3135 task_participate_group_stop(tsk))
3136 group_stop = CLD_STOPPED;
3137 out:
3138 spin_unlock_irq(&tsk->sighand->siglock);
3139
3140 /*
3141 * If group stop has completed, deliver the notification. This
3142 * should always go to the real parent of the group leader.
3143 */
3144 if (unlikely(group_stop)) {
3145 read_lock(&tasklist_lock);
3146 do_notify_parent_cldstop(tsk, false, group_stop);
3147 read_unlock(&tasklist_lock);
3148 }
3149 }
3150
3151 /*
3152 * System call entry points.
3153 */
3154
3155 /**
3156 * sys_restart_syscall - restart a system call
3157 */
SYSCALL_DEFINE0(restart_syscall)3158 SYSCALL_DEFINE0(restart_syscall)
3159 {
3160 struct restart_block *restart = ¤t->restart_block;
3161 return restart->fn(restart);
3162 }
3163
do_no_restart_syscall(struct restart_block * param)3164 long do_no_restart_syscall(struct restart_block *param)
3165 {
3166 return -EINTR;
3167 }
3168
__set_task_blocked(struct task_struct * tsk,const sigset_t * newset)3169 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3170 {
3171 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3172 sigset_t newblocked;
3173 /* A set of now blocked but previously unblocked signals. */
3174 sigandnsets(&newblocked, newset, ¤t->blocked);
3175 retarget_shared_pending(tsk, &newblocked);
3176 }
3177 tsk->blocked = *newset;
3178 recalc_sigpending();
3179 }
3180
3181 /**
3182 * set_current_blocked - change current->blocked mask
3183 * @newset: new mask
3184 *
3185 * It is wrong to change ->blocked directly, this helper should be used
3186 * to ensure the process can't miss a shared signal we are going to block.
3187 */
set_current_blocked(sigset_t * newset)3188 void set_current_blocked(sigset_t *newset)
3189 {
3190 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3191 __set_current_blocked(newset);
3192 }
3193
__set_current_blocked(const sigset_t * newset)3194 void __set_current_blocked(const sigset_t *newset)
3195 {
3196 struct task_struct *tsk = current;
3197
3198 /*
3199 * In case the signal mask hasn't changed, there is nothing we need
3200 * to do. The current->blocked shouldn't be modified by other task.
3201 */
3202 if (sigequalsets(&tsk->blocked, newset))
3203 return;
3204
3205 spin_lock_irq(&tsk->sighand->siglock);
3206 __set_task_blocked(tsk, newset);
3207 spin_unlock_irq(&tsk->sighand->siglock);
3208 }
3209
3210 /*
3211 * This is also useful for kernel threads that want to temporarily
3212 * (or permanently) block certain signals.
3213 *
3214 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3215 * interface happily blocks "unblockable" signals like SIGKILL
3216 * and friends.
3217 */
sigprocmask(int how,sigset_t * set,sigset_t * oldset)3218 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3219 {
3220 struct task_struct *tsk = current;
3221 sigset_t newset;
3222
3223 /* Lockless, only current can change ->blocked, never from irq */
3224 if (oldset)
3225 *oldset = tsk->blocked;
3226
3227 switch (how) {
3228 case SIG_BLOCK:
3229 sigorsets(&newset, &tsk->blocked, set);
3230 break;
3231 case SIG_UNBLOCK:
3232 sigandnsets(&newset, &tsk->blocked, set);
3233 break;
3234 case SIG_SETMASK:
3235 newset = *set;
3236 break;
3237 default:
3238 return -EINVAL;
3239 }
3240
3241 __set_current_blocked(&newset);
3242 return 0;
3243 }
3244 EXPORT_SYMBOL(sigprocmask);
3245
3246 /*
3247 * The api helps set app-provided sigmasks.
3248 *
3249 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3250 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3251 *
3252 * Note that it does set_restore_sigmask() in advance, so it must be always
3253 * paired with restore_saved_sigmask_unless() before return from syscall.
3254 */
set_user_sigmask(const sigset_t __user * umask,size_t sigsetsize)3255 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3256 {
3257 sigset_t kmask;
3258
3259 if (!umask)
3260 return 0;
3261 if (sigsetsize != sizeof(sigset_t))
3262 return -EINVAL;
3263 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3264 return -EFAULT;
3265
3266 set_restore_sigmask();
3267 current->saved_sigmask = current->blocked;
3268 set_current_blocked(&kmask);
3269
3270 return 0;
3271 }
3272
3273 #ifdef CONFIG_COMPAT
set_compat_user_sigmask(const compat_sigset_t __user * umask,size_t sigsetsize)3274 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3275 size_t sigsetsize)
3276 {
3277 sigset_t kmask;
3278
3279 if (!umask)
3280 return 0;
3281 if (sigsetsize != sizeof(compat_sigset_t))
3282 return -EINVAL;
3283 if (get_compat_sigset(&kmask, umask))
3284 return -EFAULT;
3285
3286 set_restore_sigmask();
3287 current->saved_sigmask = current->blocked;
3288 set_current_blocked(&kmask);
3289
3290 return 0;
3291 }
3292 #endif
3293
3294 /**
3295 * sys_rt_sigprocmask - change the list of currently blocked signals
3296 * @how: whether to add, remove, or set signals
3297 * @nset: stores pending signals
3298 * @oset: previous value of signal mask if non-null
3299 * @sigsetsize: size of sigset_t type
3300 */
SYSCALL_DEFINE4(rt_sigprocmask,int,how,sigset_t __user *,nset,sigset_t __user *,oset,size_t,sigsetsize)3301 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3302 sigset_t __user *, oset, size_t, sigsetsize)
3303 {
3304 sigset_t old_set, new_set;
3305 int error;
3306
3307 /* XXX: Don't preclude handling different sized sigset_t's. */
3308 if (sigsetsize != sizeof(sigset_t))
3309 return -EINVAL;
3310
3311 old_set = current->blocked;
3312
3313 if (nset) {
3314 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3315 return -EFAULT;
3316 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3317
3318 error = sigprocmask(how, &new_set, NULL);
3319 if (error)
3320 return error;
3321 }
3322
3323 if (oset) {
3324 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3325 return -EFAULT;
3326 }
3327
3328 return 0;
3329 }
3330
3331 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigprocmask,int,how,compat_sigset_t __user *,nset,compat_sigset_t __user *,oset,compat_size_t,sigsetsize)3332 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3333 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3334 {
3335 sigset_t old_set = current->blocked;
3336
3337 /* XXX: Don't preclude handling different sized sigset_t's. */
3338 if (sigsetsize != sizeof(sigset_t))
3339 return -EINVAL;
3340
3341 if (nset) {
3342 sigset_t new_set;
3343 int error;
3344 if (get_compat_sigset(&new_set, nset))
3345 return -EFAULT;
3346 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3347
3348 error = sigprocmask(how, &new_set, NULL);
3349 if (error)
3350 return error;
3351 }
3352 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3353 }
3354 #endif
3355
do_sigpending(sigset_t * set)3356 static void do_sigpending(sigset_t *set)
3357 {
3358 spin_lock_irq(¤t->sighand->siglock);
3359 sigorsets(set, ¤t->pending.signal,
3360 ¤t->signal->shared_pending.signal);
3361 spin_unlock_irq(¤t->sighand->siglock);
3362
3363 /* Outside the lock because only this thread touches it. */
3364 sigandsets(set, ¤t->blocked, set);
3365 }
3366
3367 /**
3368 * sys_rt_sigpending - examine a pending signal that has been raised
3369 * while blocked
3370 * @uset: stores pending signals
3371 * @sigsetsize: size of sigset_t type or larger
3372 */
SYSCALL_DEFINE2(rt_sigpending,sigset_t __user *,uset,size_t,sigsetsize)3373 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3374 {
3375 sigset_t set;
3376
3377 if (sigsetsize > sizeof(*uset))
3378 return -EINVAL;
3379
3380 do_sigpending(&set);
3381
3382 if (copy_to_user(uset, &set, sigsetsize))
3383 return -EFAULT;
3384
3385 return 0;
3386 }
3387
3388 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigpending,compat_sigset_t __user *,uset,compat_size_t,sigsetsize)3389 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3390 compat_size_t, sigsetsize)
3391 {
3392 sigset_t set;
3393
3394 if (sigsetsize > sizeof(*uset))
3395 return -EINVAL;
3396
3397 do_sigpending(&set);
3398
3399 return put_compat_sigset(uset, &set, sigsetsize);
3400 }
3401 #endif
3402
3403 static const struct {
3404 unsigned char limit, layout;
3405 } sig_sicodes[] = {
3406 [SIGILL] = { NSIGILL, SIL_FAULT },
3407 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3408 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3409 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3410 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3411 #if defined(SIGEMT)
3412 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3413 #endif
3414 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3415 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3416 [SIGSYS] = { NSIGSYS, SIL_SYS },
3417 };
3418
known_siginfo_layout(unsigned sig,int si_code)3419 static bool known_siginfo_layout(unsigned sig, int si_code)
3420 {
3421 if (si_code == SI_KERNEL)
3422 return true;
3423 else if ((si_code > SI_USER)) {
3424 if (sig_specific_sicodes(sig)) {
3425 if (si_code <= sig_sicodes[sig].limit)
3426 return true;
3427 }
3428 else if (si_code <= NSIGPOLL)
3429 return true;
3430 }
3431 else if (si_code >= SI_DETHREAD)
3432 return true;
3433 else if (si_code == SI_ASYNCNL)
3434 return true;
3435 return false;
3436 }
3437
siginfo_layout(unsigned sig,int si_code)3438 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3439 {
3440 enum siginfo_layout layout = SIL_KILL;
3441 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3442 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3443 (si_code <= sig_sicodes[sig].limit)) {
3444 layout = sig_sicodes[sig].layout;
3445 /* Handle the exceptions */
3446 if ((sig == SIGBUS) &&
3447 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3448 layout = SIL_FAULT_MCEERR;
3449 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3450 layout = SIL_FAULT_BNDERR;
3451 #ifdef SEGV_PKUERR
3452 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3453 layout = SIL_FAULT_PKUERR;
3454 #endif
3455 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3456 layout = SIL_FAULT_PERF_EVENT;
3457 else if (IS_ENABLED(CONFIG_SPARC) &&
3458 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3459 layout = SIL_FAULT_TRAPNO;
3460 else if (IS_ENABLED(CONFIG_ALPHA) &&
3461 ((sig == SIGFPE) ||
3462 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3463 layout = SIL_FAULT_TRAPNO;
3464 }
3465 else if (si_code <= NSIGPOLL)
3466 layout = SIL_POLL;
3467 } else {
3468 if (si_code == SI_TIMER)
3469 layout = SIL_TIMER;
3470 else if (si_code == SI_SIGIO)
3471 layout = SIL_POLL;
3472 else if (si_code < 0)
3473 layout = SIL_RT;
3474 }
3475 return layout;
3476 }
3477
si_expansion(const siginfo_t __user * info)3478 static inline char __user *si_expansion(const siginfo_t __user *info)
3479 {
3480 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3481 }
3482
copy_siginfo_to_user(siginfo_t __user * to,const kernel_siginfo_t * from)3483 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3484 {
3485 char __user *expansion = si_expansion(to);
3486 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3487 return -EFAULT;
3488 if (clear_user(expansion, SI_EXPANSION_SIZE))
3489 return -EFAULT;
3490 return 0;
3491 }
3492
post_copy_siginfo_from_user(kernel_siginfo_t * info,const siginfo_t __user * from)3493 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3494 const siginfo_t __user *from)
3495 {
3496 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3497 char __user *expansion = si_expansion(from);
3498 char buf[SI_EXPANSION_SIZE];
3499 int i;
3500 /*
3501 * An unknown si_code might need more than
3502 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3503 * extra bytes are 0. This guarantees copy_siginfo_to_user
3504 * will return this data to userspace exactly.
3505 */
3506 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3507 return -EFAULT;
3508 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3509 if (buf[i] != 0)
3510 return -E2BIG;
3511 }
3512 }
3513 return 0;
3514 }
3515
__copy_siginfo_from_user(int signo,kernel_siginfo_t * to,const siginfo_t __user * from)3516 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3517 const siginfo_t __user *from)
3518 {
3519 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3520 return -EFAULT;
3521 to->si_signo = signo;
3522 return post_copy_siginfo_from_user(to, from);
3523 }
3524
copy_siginfo_from_user(kernel_siginfo_t * to,const siginfo_t __user * from)3525 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3526 {
3527 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3528 return -EFAULT;
3529 return post_copy_siginfo_from_user(to, from);
3530 }
3531
3532 #ifdef CONFIG_COMPAT
3533 /**
3534 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3535 * @to: compat siginfo destination
3536 * @from: kernel siginfo source
3537 *
3538 * Note: This function does not work properly for the SIGCHLD on x32, but
3539 * fortunately it doesn't have to. The only valid callers for this function are
3540 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3541 * The latter does not care because SIGCHLD will never cause a coredump.
3542 */
copy_siginfo_to_external32(struct compat_siginfo * to,const struct kernel_siginfo * from)3543 void copy_siginfo_to_external32(struct compat_siginfo *to,
3544 const struct kernel_siginfo *from)
3545 {
3546 memset(to, 0, sizeof(*to));
3547
3548 to->si_signo = from->si_signo;
3549 to->si_errno = from->si_errno;
3550 to->si_code = from->si_code;
3551 switch(siginfo_layout(from->si_signo, from->si_code)) {
3552 case SIL_KILL:
3553 to->si_pid = from->si_pid;
3554 to->si_uid = from->si_uid;
3555 break;
3556 case SIL_TIMER:
3557 to->si_tid = from->si_tid;
3558 to->si_overrun = from->si_overrun;
3559 to->si_int = from->si_int;
3560 break;
3561 case SIL_POLL:
3562 to->si_band = from->si_band;
3563 to->si_fd = from->si_fd;
3564 break;
3565 case SIL_FAULT:
3566 to->si_addr = ptr_to_compat(from->si_addr);
3567 break;
3568 case SIL_FAULT_TRAPNO:
3569 to->si_addr = ptr_to_compat(from->si_addr);
3570 to->si_trapno = from->si_trapno;
3571 break;
3572 case SIL_FAULT_MCEERR:
3573 to->si_addr = ptr_to_compat(from->si_addr);
3574 to->si_addr_lsb = from->si_addr_lsb;
3575 break;
3576 case SIL_FAULT_BNDERR:
3577 to->si_addr = ptr_to_compat(from->si_addr);
3578 to->si_lower = ptr_to_compat(from->si_lower);
3579 to->si_upper = ptr_to_compat(from->si_upper);
3580 break;
3581 case SIL_FAULT_PKUERR:
3582 to->si_addr = ptr_to_compat(from->si_addr);
3583 to->si_pkey = from->si_pkey;
3584 break;
3585 case SIL_FAULT_PERF_EVENT:
3586 to->si_addr = ptr_to_compat(from->si_addr);
3587 to->si_perf_data = from->si_perf_data;
3588 to->si_perf_type = from->si_perf_type;
3589 to->si_perf_flags = from->si_perf_flags;
3590 break;
3591 case SIL_CHLD:
3592 to->si_pid = from->si_pid;
3593 to->si_uid = from->si_uid;
3594 to->si_status = from->si_status;
3595 to->si_utime = from->si_utime;
3596 to->si_stime = from->si_stime;
3597 break;
3598 case SIL_RT:
3599 to->si_pid = from->si_pid;
3600 to->si_uid = from->si_uid;
3601 to->si_int = from->si_int;
3602 break;
3603 case SIL_SYS:
3604 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3605 to->si_syscall = from->si_syscall;
3606 to->si_arch = from->si_arch;
3607 break;
3608 }
3609 }
3610
__copy_siginfo_to_user32(struct compat_siginfo __user * to,const struct kernel_siginfo * from)3611 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3612 const struct kernel_siginfo *from)
3613 {
3614 struct compat_siginfo new;
3615
3616 copy_siginfo_to_external32(&new, from);
3617 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3618 return -EFAULT;
3619 return 0;
3620 }
3621
post_copy_siginfo_from_user32(kernel_siginfo_t * to,const struct compat_siginfo * from)3622 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3623 const struct compat_siginfo *from)
3624 {
3625 clear_siginfo(to);
3626 to->si_signo = from->si_signo;
3627 to->si_errno = from->si_errno;
3628 to->si_code = from->si_code;
3629 switch(siginfo_layout(from->si_signo, from->si_code)) {
3630 case SIL_KILL:
3631 to->si_pid = from->si_pid;
3632 to->si_uid = from->si_uid;
3633 break;
3634 case SIL_TIMER:
3635 to->si_tid = from->si_tid;
3636 to->si_overrun = from->si_overrun;
3637 to->si_int = from->si_int;
3638 break;
3639 case SIL_POLL:
3640 to->si_band = from->si_band;
3641 to->si_fd = from->si_fd;
3642 break;
3643 case SIL_FAULT:
3644 to->si_addr = compat_ptr(from->si_addr);
3645 break;
3646 case SIL_FAULT_TRAPNO:
3647 to->si_addr = compat_ptr(from->si_addr);
3648 to->si_trapno = from->si_trapno;
3649 break;
3650 case SIL_FAULT_MCEERR:
3651 to->si_addr = compat_ptr(from->si_addr);
3652 to->si_addr_lsb = from->si_addr_lsb;
3653 break;
3654 case SIL_FAULT_BNDERR:
3655 to->si_addr = compat_ptr(from->si_addr);
3656 to->si_lower = compat_ptr(from->si_lower);
3657 to->si_upper = compat_ptr(from->si_upper);
3658 break;
3659 case SIL_FAULT_PKUERR:
3660 to->si_addr = compat_ptr(from->si_addr);
3661 to->si_pkey = from->si_pkey;
3662 break;
3663 case SIL_FAULT_PERF_EVENT:
3664 to->si_addr = compat_ptr(from->si_addr);
3665 to->si_perf_data = from->si_perf_data;
3666 to->si_perf_type = from->si_perf_type;
3667 to->si_perf_flags = from->si_perf_flags;
3668 break;
3669 case SIL_CHLD:
3670 to->si_pid = from->si_pid;
3671 to->si_uid = from->si_uid;
3672 to->si_status = from->si_status;
3673 #ifdef CONFIG_X86_X32_ABI
3674 if (in_x32_syscall()) {
3675 to->si_utime = from->_sifields._sigchld_x32._utime;
3676 to->si_stime = from->_sifields._sigchld_x32._stime;
3677 } else
3678 #endif
3679 {
3680 to->si_utime = from->si_utime;
3681 to->si_stime = from->si_stime;
3682 }
3683 break;
3684 case SIL_RT:
3685 to->si_pid = from->si_pid;
3686 to->si_uid = from->si_uid;
3687 to->si_int = from->si_int;
3688 break;
3689 case SIL_SYS:
3690 to->si_call_addr = compat_ptr(from->si_call_addr);
3691 to->si_syscall = from->si_syscall;
3692 to->si_arch = from->si_arch;
3693 break;
3694 }
3695 return 0;
3696 }
3697
__copy_siginfo_from_user32(int signo,struct kernel_siginfo * to,const struct compat_siginfo __user * ufrom)3698 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3699 const struct compat_siginfo __user *ufrom)
3700 {
3701 struct compat_siginfo from;
3702
3703 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3704 return -EFAULT;
3705
3706 from.si_signo = signo;
3707 return post_copy_siginfo_from_user32(to, &from);
3708 }
3709
copy_siginfo_from_user32(struct kernel_siginfo * to,const struct compat_siginfo __user * ufrom)3710 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3711 const struct compat_siginfo __user *ufrom)
3712 {
3713 struct compat_siginfo from;
3714
3715 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3716 return -EFAULT;
3717
3718 return post_copy_siginfo_from_user32(to, &from);
3719 }
3720 #endif /* CONFIG_COMPAT */
3721
3722 /**
3723 * do_sigtimedwait - wait for queued signals specified in @which
3724 * @which: queued signals to wait for
3725 * @info: if non-null, the signal's siginfo is returned here
3726 * @ts: upper bound on process time suspension
3727 */
do_sigtimedwait(const sigset_t * which,kernel_siginfo_t * info,const struct timespec64 * ts)3728 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3729 const struct timespec64 *ts)
3730 {
3731 ktime_t *to = NULL, timeout = KTIME_MAX;
3732 struct task_struct *tsk = current;
3733 sigset_t mask = *which;
3734 enum pid_type type;
3735 int sig, ret = 0;
3736
3737 if (ts) {
3738 if (!timespec64_valid(ts))
3739 return -EINVAL;
3740 timeout = timespec64_to_ktime(*ts);
3741 to = &timeout;
3742 }
3743
3744 /*
3745 * Invert the set of allowed signals to get those we want to block.
3746 */
3747 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3748 signotset(&mask);
3749
3750 spin_lock_irq(&tsk->sighand->siglock);
3751 sig = dequeue_signal(&mask, info, &type);
3752 if (!sig && timeout) {
3753 /*
3754 * None ready, temporarily unblock those we're interested
3755 * while we are sleeping in so that we'll be awakened when
3756 * they arrive. Unblocking is always fine, we can avoid
3757 * set_current_blocked().
3758 */
3759 tsk->real_blocked = tsk->blocked;
3760 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3761 recalc_sigpending();
3762 spin_unlock_irq(&tsk->sighand->siglock);
3763
3764 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3765 ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3766 HRTIMER_MODE_REL);
3767 spin_lock_irq(&tsk->sighand->siglock);
3768 __set_task_blocked(tsk, &tsk->real_blocked);
3769 sigemptyset(&tsk->real_blocked);
3770 sig = dequeue_signal(&mask, info, &type);
3771 }
3772 spin_unlock_irq(&tsk->sighand->siglock);
3773
3774 if (sig)
3775 return sig;
3776 return ret ? -EINTR : -EAGAIN;
3777 }
3778
3779 /**
3780 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3781 * in @uthese
3782 * @uthese: queued signals to wait for
3783 * @uinfo: if non-null, the signal's siginfo is returned here
3784 * @uts: upper bound on process time suspension
3785 * @sigsetsize: size of sigset_t type
3786 */
SYSCALL_DEFINE4(rt_sigtimedwait,const sigset_t __user *,uthese,siginfo_t __user *,uinfo,const struct __kernel_timespec __user *,uts,size_t,sigsetsize)3787 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3788 siginfo_t __user *, uinfo,
3789 const struct __kernel_timespec __user *, uts,
3790 size_t, sigsetsize)
3791 {
3792 sigset_t these;
3793 struct timespec64 ts;
3794 kernel_siginfo_t info;
3795 int ret;
3796
3797 /* XXX: Don't preclude handling different sized sigset_t's. */
3798 if (sigsetsize != sizeof(sigset_t))
3799 return -EINVAL;
3800
3801 if (copy_from_user(&these, uthese, sizeof(these)))
3802 return -EFAULT;
3803
3804 if (uts) {
3805 if (get_timespec64(&ts, uts))
3806 return -EFAULT;
3807 }
3808
3809 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3810
3811 if (ret > 0 && uinfo) {
3812 if (copy_siginfo_to_user(uinfo, &info))
3813 ret = -EFAULT;
3814 }
3815
3816 return ret;
3817 }
3818
3819 #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE4(rt_sigtimedwait_time32,const sigset_t __user *,uthese,siginfo_t __user *,uinfo,const struct old_timespec32 __user *,uts,size_t,sigsetsize)3820 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3821 siginfo_t __user *, uinfo,
3822 const struct old_timespec32 __user *, uts,
3823 size_t, sigsetsize)
3824 {
3825 sigset_t these;
3826 struct timespec64 ts;
3827 kernel_siginfo_t info;
3828 int ret;
3829
3830 if (sigsetsize != sizeof(sigset_t))
3831 return -EINVAL;
3832
3833 if (copy_from_user(&these, uthese, sizeof(these)))
3834 return -EFAULT;
3835
3836 if (uts) {
3837 if (get_old_timespec32(&ts, uts))
3838 return -EFAULT;
3839 }
3840
3841 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3842
3843 if (ret > 0 && uinfo) {
3844 if (copy_siginfo_to_user(uinfo, &info))
3845 ret = -EFAULT;
3846 }
3847
3848 return ret;
3849 }
3850 #endif
3851
3852 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64,compat_sigset_t __user *,uthese,struct compat_siginfo __user *,uinfo,struct __kernel_timespec __user *,uts,compat_size_t,sigsetsize)3853 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3854 struct compat_siginfo __user *, uinfo,
3855 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3856 {
3857 sigset_t s;
3858 struct timespec64 t;
3859 kernel_siginfo_t info;
3860 long ret;
3861
3862 if (sigsetsize != sizeof(sigset_t))
3863 return -EINVAL;
3864
3865 if (get_compat_sigset(&s, uthese))
3866 return -EFAULT;
3867
3868 if (uts) {
3869 if (get_timespec64(&t, uts))
3870 return -EFAULT;
3871 }
3872
3873 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3874
3875 if (ret > 0 && uinfo) {
3876 if (copy_siginfo_to_user32(uinfo, &info))
3877 ret = -EFAULT;
3878 }
3879
3880 return ret;
3881 }
3882
3883 #ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32,compat_sigset_t __user *,uthese,struct compat_siginfo __user *,uinfo,struct old_timespec32 __user *,uts,compat_size_t,sigsetsize)3884 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3885 struct compat_siginfo __user *, uinfo,
3886 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3887 {
3888 sigset_t s;
3889 struct timespec64 t;
3890 kernel_siginfo_t info;
3891 long ret;
3892
3893 if (sigsetsize != sizeof(sigset_t))
3894 return -EINVAL;
3895
3896 if (get_compat_sigset(&s, uthese))
3897 return -EFAULT;
3898
3899 if (uts) {
3900 if (get_old_timespec32(&t, uts))
3901 return -EFAULT;
3902 }
3903
3904 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3905
3906 if (ret > 0 && uinfo) {
3907 if (copy_siginfo_to_user32(uinfo, &info))
3908 ret = -EFAULT;
3909 }
3910
3911 return ret;
3912 }
3913 #endif
3914 #endif
3915
prepare_kill_siginfo(int sig,struct kernel_siginfo * info,enum pid_type type)3916 static void prepare_kill_siginfo(int sig, struct kernel_siginfo *info,
3917 enum pid_type type)
3918 {
3919 clear_siginfo(info);
3920 info->si_signo = sig;
3921 info->si_errno = 0;
3922 info->si_code = (type == PIDTYPE_PID) ? SI_TKILL : SI_USER;
3923 info->si_pid = task_tgid_vnr(current);
3924 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3925 }
3926
3927 /**
3928 * sys_kill - send a signal to a process
3929 * @pid: the PID of the process
3930 * @sig: signal to be sent
3931 */
SYSCALL_DEFINE2(kill,pid_t,pid,int,sig)3932 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3933 {
3934 struct kernel_siginfo info;
3935
3936 prepare_kill_siginfo(sig, &info, PIDTYPE_TGID);
3937
3938 return kill_something_info(sig, &info, pid);
3939 }
3940
3941 /*
3942 * Verify that the signaler and signalee either are in the same pid namespace
3943 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3944 * namespace.
3945 */
access_pidfd_pidns(struct pid * pid)3946 static bool access_pidfd_pidns(struct pid *pid)
3947 {
3948 struct pid_namespace *active = task_active_pid_ns(current);
3949 struct pid_namespace *p = ns_of_pid(pid);
3950
3951 for (;;) {
3952 if (!p)
3953 return false;
3954 if (p == active)
3955 break;
3956 p = p->parent;
3957 }
3958
3959 return true;
3960 }
3961
copy_siginfo_from_user_any(kernel_siginfo_t * kinfo,siginfo_t __user * info)3962 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3963 siginfo_t __user *info)
3964 {
3965 #ifdef CONFIG_COMPAT
3966 /*
3967 * Avoid hooking up compat syscalls and instead handle necessary
3968 * conversions here. Note, this is a stop-gap measure and should not be
3969 * considered a generic solution.
3970 */
3971 if (in_compat_syscall())
3972 return copy_siginfo_from_user32(
3973 kinfo, (struct compat_siginfo __user *)info);
3974 #endif
3975 return copy_siginfo_from_user(kinfo, info);
3976 }
3977
pidfd_to_pid(const struct file * file)3978 static struct pid *pidfd_to_pid(const struct file *file)
3979 {
3980 struct pid *pid;
3981
3982 pid = pidfd_pid(file);
3983 if (!IS_ERR(pid))
3984 return pid;
3985
3986 return tgid_pidfd_to_pid(file);
3987 }
3988
3989 #define PIDFD_SEND_SIGNAL_FLAGS \
3990 (PIDFD_SIGNAL_THREAD | PIDFD_SIGNAL_THREAD_GROUP | \
3991 PIDFD_SIGNAL_PROCESS_GROUP)
3992
3993 /**
3994 * sys_pidfd_send_signal - Signal a process through a pidfd
3995 * @pidfd: file descriptor of the process
3996 * @sig: signal to send
3997 * @info: signal info
3998 * @flags: future flags
3999 *
4000 * Send the signal to the thread group or to the individual thread depending
4001 * on PIDFD_THREAD.
4002 * In the future extension to @flags may be used to override the default scope
4003 * of @pidfd.
4004 *
4005 * Return: 0 on success, negative errno on failure
4006 */
SYSCALL_DEFINE4(pidfd_send_signal,int,pidfd,int,sig,siginfo_t __user *,info,unsigned int,flags)4007 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
4008 siginfo_t __user *, info, unsigned int, flags)
4009 {
4010 int ret;
4011 struct pid *pid;
4012 kernel_siginfo_t kinfo;
4013 enum pid_type type;
4014
4015 /* Enforce flags be set to 0 until we add an extension. */
4016 if (flags & ~PIDFD_SEND_SIGNAL_FLAGS)
4017 return -EINVAL;
4018
4019 /* Ensure that only a single signal scope determining flag is set. */
4020 if (hweight32(flags & PIDFD_SEND_SIGNAL_FLAGS) > 1)
4021 return -EINVAL;
4022
4023 CLASS(fd, f)(pidfd);
4024 if (fd_empty(f))
4025 return -EBADF;
4026
4027 /* Is this a pidfd? */
4028 pid = pidfd_to_pid(fd_file(f));
4029 if (IS_ERR(pid))
4030 return PTR_ERR(pid);
4031
4032 if (!access_pidfd_pidns(pid))
4033 return -EINVAL;
4034
4035 switch (flags) {
4036 case 0:
4037 /* Infer scope from the type of pidfd. */
4038 if (fd_file(f)->f_flags & PIDFD_THREAD)
4039 type = PIDTYPE_PID;
4040 else
4041 type = PIDTYPE_TGID;
4042 break;
4043 case PIDFD_SIGNAL_THREAD:
4044 type = PIDTYPE_PID;
4045 break;
4046 case PIDFD_SIGNAL_THREAD_GROUP:
4047 type = PIDTYPE_TGID;
4048 break;
4049 case PIDFD_SIGNAL_PROCESS_GROUP:
4050 type = PIDTYPE_PGID;
4051 break;
4052 }
4053
4054 if (info) {
4055 ret = copy_siginfo_from_user_any(&kinfo, info);
4056 if (unlikely(ret))
4057 return ret;
4058
4059 if (unlikely(sig != kinfo.si_signo))
4060 return -EINVAL;
4061
4062 /* Only allow sending arbitrary signals to yourself. */
4063 if ((task_pid(current) != pid || type > PIDTYPE_TGID) &&
4064 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
4065 return -EPERM;
4066 } else {
4067 prepare_kill_siginfo(sig, &kinfo, type);
4068 }
4069
4070 if (type == PIDTYPE_PGID)
4071 return kill_pgrp_info(sig, &kinfo, pid);
4072 else
4073 return kill_pid_info_type(sig, &kinfo, pid, type);
4074 }
4075
4076 static int
do_send_specific(pid_t tgid,pid_t pid,int sig,struct kernel_siginfo * info)4077 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
4078 {
4079 struct task_struct *p;
4080 int error = -ESRCH;
4081
4082 rcu_read_lock();
4083 p = find_task_by_vpid(pid);
4084 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
4085 error = check_kill_permission(sig, info, p);
4086 /*
4087 * The null signal is a permissions and process existence
4088 * probe. No signal is actually delivered.
4089 */
4090 if (!error && sig) {
4091 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
4092 /*
4093 * If lock_task_sighand() failed we pretend the task
4094 * dies after receiving the signal. The window is tiny,
4095 * and the signal is private anyway.
4096 */
4097 if (unlikely(error == -ESRCH))
4098 error = 0;
4099 }
4100 }
4101 rcu_read_unlock();
4102
4103 return error;
4104 }
4105
do_tkill(pid_t tgid,pid_t pid,int sig)4106 static int do_tkill(pid_t tgid, pid_t pid, int sig)
4107 {
4108 struct kernel_siginfo info;
4109
4110 prepare_kill_siginfo(sig, &info, PIDTYPE_PID);
4111
4112 return do_send_specific(tgid, pid, sig, &info);
4113 }
4114
4115 /**
4116 * sys_tgkill - send signal to one specific thread
4117 * @tgid: the thread group ID of the thread
4118 * @pid: the PID of the thread
4119 * @sig: signal to be sent
4120 *
4121 * This syscall also checks the @tgid and returns -ESRCH even if the PID
4122 * exists but it's not belonging to the target process anymore. This
4123 * method solves the problem of threads exiting and PIDs getting reused.
4124 */
SYSCALL_DEFINE3(tgkill,pid_t,tgid,pid_t,pid,int,sig)4125 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
4126 {
4127 /* This is only valid for single tasks */
4128 if (pid <= 0 || tgid <= 0)
4129 return -EINVAL;
4130
4131 return do_tkill(tgid, pid, sig);
4132 }
4133
4134 /**
4135 * sys_tkill - send signal to one specific task
4136 * @pid: the PID of the task
4137 * @sig: signal to be sent
4138 *
4139 * Send a signal to only one task, even if it's a CLONE_THREAD task.
4140 */
SYSCALL_DEFINE2(tkill,pid_t,pid,int,sig)4141 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4142 {
4143 /* This is only valid for single tasks */
4144 if (pid <= 0)
4145 return -EINVAL;
4146
4147 return do_tkill(0, pid, sig);
4148 }
4149
do_rt_sigqueueinfo(pid_t pid,int sig,kernel_siginfo_t * info)4150 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4151 {
4152 /* Not even root can pretend to send signals from the kernel.
4153 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4154 */
4155 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4156 (task_pid_vnr(current) != pid))
4157 return -EPERM;
4158
4159 /* POSIX.1b doesn't mention process groups. */
4160 return kill_proc_info(sig, info, pid);
4161 }
4162
4163 /**
4164 * sys_rt_sigqueueinfo - send signal information to a signal
4165 * @pid: the PID of the thread
4166 * @sig: signal to be sent
4167 * @uinfo: signal info to be sent
4168 */
SYSCALL_DEFINE3(rt_sigqueueinfo,pid_t,pid,int,sig,siginfo_t __user *,uinfo)4169 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4170 siginfo_t __user *, uinfo)
4171 {
4172 kernel_siginfo_t info;
4173 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4174 if (unlikely(ret))
4175 return ret;
4176 return do_rt_sigqueueinfo(pid, sig, &info);
4177 }
4178
4179 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,compat_pid_t,pid,int,sig,struct compat_siginfo __user *,uinfo)4180 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4181 compat_pid_t, pid,
4182 int, sig,
4183 struct compat_siginfo __user *, uinfo)
4184 {
4185 kernel_siginfo_t info;
4186 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4187 if (unlikely(ret))
4188 return ret;
4189 return do_rt_sigqueueinfo(pid, sig, &info);
4190 }
4191 #endif
4192
do_rt_tgsigqueueinfo(pid_t tgid,pid_t pid,int sig,kernel_siginfo_t * info)4193 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4194 {
4195 /* This is only valid for single tasks */
4196 if (pid <= 0 || tgid <= 0)
4197 return -EINVAL;
4198
4199 /* Not even root can pretend to send signals from the kernel.
4200 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4201 */
4202 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4203 (task_pid_vnr(current) != pid))
4204 return -EPERM;
4205
4206 return do_send_specific(tgid, pid, sig, info);
4207 }
4208
SYSCALL_DEFINE4(rt_tgsigqueueinfo,pid_t,tgid,pid_t,pid,int,sig,siginfo_t __user *,uinfo)4209 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4210 siginfo_t __user *, uinfo)
4211 {
4212 kernel_siginfo_t info;
4213 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4214 if (unlikely(ret))
4215 return ret;
4216 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4217 }
4218
4219 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,compat_pid_t,tgid,compat_pid_t,pid,int,sig,struct compat_siginfo __user *,uinfo)4220 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4221 compat_pid_t, tgid,
4222 compat_pid_t, pid,
4223 int, sig,
4224 struct compat_siginfo __user *, uinfo)
4225 {
4226 kernel_siginfo_t info;
4227 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4228 if (unlikely(ret))
4229 return ret;
4230 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4231 }
4232 #endif
4233
4234 /*
4235 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4236 */
kernel_sigaction(int sig,__sighandler_t action)4237 void kernel_sigaction(int sig, __sighandler_t action)
4238 {
4239 spin_lock_irq(¤t->sighand->siglock);
4240 current->sighand->action[sig - 1].sa.sa_handler = action;
4241 if (action == SIG_IGN) {
4242 sigset_t mask;
4243
4244 sigemptyset(&mask);
4245 sigaddset(&mask, sig);
4246
4247 flush_sigqueue_mask(current, &mask, ¤t->signal->shared_pending);
4248 flush_sigqueue_mask(current, &mask, ¤t->pending);
4249 recalc_sigpending();
4250 }
4251 spin_unlock_irq(¤t->sighand->siglock);
4252 }
4253 EXPORT_SYMBOL(kernel_sigaction);
4254
sigaction_compat_abi(struct k_sigaction * act,struct k_sigaction * oact)4255 void __weak sigaction_compat_abi(struct k_sigaction *act,
4256 struct k_sigaction *oact)
4257 {
4258 }
4259
do_sigaction(int sig,struct k_sigaction * act,struct k_sigaction * oact)4260 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4261 {
4262 struct task_struct *p = current, *t;
4263 struct k_sigaction *k;
4264 sigset_t mask;
4265
4266 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4267 return -EINVAL;
4268
4269 k = &p->sighand->action[sig-1];
4270
4271 spin_lock_irq(&p->sighand->siglock);
4272 if (k->sa.sa_flags & SA_IMMUTABLE) {
4273 spin_unlock_irq(&p->sighand->siglock);
4274 return -EINVAL;
4275 }
4276 if (oact)
4277 *oact = *k;
4278
4279 /*
4280 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4281 * e.g. by having an architecture use the bit in their uapi.
4282 */
4283 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4284
4285 /*
4286 * Clear unknown flag bits in order to allow userspace to detect missing
4287 * support for flag bits and to allow the kernel to use non-uapi bits
4288 * internally.
4289 */
4290 if (act)
4291 act->sa.sa_flags &= UAPI_SA_FLAGS;
4292 if (oact)
4293 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4294
4295 sigaction_compat_abi(act, oact);
4296
4297 if (act) {
4298 bool was_ignored = k->sa.sa_handler == SIG_IGN;
4299
4300 sigdelsetmask(&act->sa.sa_mask,
4301 sigmask(SIGKILL) | sigmask(SIGSTOP));
4302 *k = *act;
4303 /*
4304 * POSIX 3.3.1.3:
4305 * "Setting a signal action to SIG_IGN for a signal that is
4306 * pending shall cause the pending signal to be discarded,
4307 * whether or not it is blocked."
4308 *
4309 * "Setting a signal action to SIG_DFL for a signal that is
4310 * pending and whose default action is to ignore the signal
4311 * (for example, SIGCHLD), shall cause the pending signal to
4312 * be discarded, whether or not it is blocked"
4313 */
4314 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4315 sigemptyset(&mask);
4316 sigaddset(&mask, sig);
4317 flush_sigqueue_mask(p, &mask, &p->signal->shared_pending);
4318 for_each_thread(p, t)
4319 flush_sigqueue_mask(p, &mask, &t->pending);
4320 } else if (was_ignored) {
4321 posixtimer_sig_unignore(p, sig);
4322 }
4323 }
4324
4325 spin_unlock_irq(&p->sighand->siglock);
4326 return 0;
4327 }
4328
4329 #ifdef CONFIG_DYNAMIC_SIGFRAME
sigaltstack_lock(void)4330 static inline void sigaltstack_lock(void)
4331 __acquires(¤t->sighand->siglock)
4332 {
4333 spin_lock_irq(¤t->sighand->siglock);
4334 }
4335
sigaltstack_unlock(void)4336 static inline void sigaltstack_unlock(void)
4337 __releases(¤t->sighand->siglock)
4338 {
4339 spin_unlock_irq(¤t->sighand->siglock);
4340 }
4341 #else
sigaltstack_lock(void)4342 static inline void sigaltstack_lock(void) { }
sigaltstack_unlock(void)4343 static inline void sigaltstack_unlock(void) { }
4344 #endif
4345
4346 static int
do_sigaltstack(const stack_t * ss,stack_t * oss,unsigned long sp,size_t min_ss_size)4347 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4348 size_t min_ss_size)
4349 {
4350 struct task_struct *t = current;
4351 int ret = 0;
4352
4353 if (oss) {
4354 memset(oss, 0, sizeof(stack_t));
4355 oss->ss_sp = (void __user *) t->sas_ss_sp;
4356 oss->ss_size = t->sas_ss_size;
4357 oss->ss_flags = sas_ss_flags(sp) |
4358 (current->sas_ss_flags & SS_FLAG_BITS);
4359 }
4360
4361 if (ss) {
4362 void __user *ss_sp = ss->ss_sp;
4363 size_t ss_size = ss->ss_size;
4364 unsigned ss_flags = ss->ss_flags;
4365 int ss_mode;
4366
4367 if (unlikely(on_sig_stack(sp)))
4368 return -EPERM;
4369
4370 ss_mode = ss_flags & ~SS_FLAG_BITS;
4371 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4372 ss_mode != 0))
4373 return -EINVAL;
4374
4375 /*
4376 * Return before taking any locks if no actual
4377 * sigaltstack changes were requested.
4378 */
4379 if (t->sas_ss_sp == (unsigned long)ss_sp &&
4380 t->sas_ss_size == ss_size &&
4381 t->sas_ss_flags == ss_flags)
4382 return 0;
4383
4384 sigaltstack_lock();
4385 if (ss_mode == SS_DISABLE) {
4386 ss_size = 0;
4387 ss_sp = NULL;
4388 } else {
4389 if (unlikely(ss_size < min_ss_size))
4390 ret = -ENOMEM;
4391 if (!sigaltstack_size_valid(ss_size))
4392 ret = -ENOMEM;
4393 }
4394 if (!ret) {
4395 t->sas_ss_sp = (unsigned long) ss_sp;
4396 t->sas_ss_size = ss_size;
4397 t->sas_ss_flags = ss_flags;
4398 }
4399 sigaltstack_unlock();
4400 }
4401 return ret;
4402 }
4403
SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss,stack_t __user *,uoss)4404 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4405 {
4406 stack_t new, old;
4407 int err;
4408 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4409 return -EFAULT;
4410 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4411 current_user_stack_pointer(),
4412 MINSIGSTKSZ);
4413 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4414 err = -EFAULT;
4415 return err;
4416 }
4417
restore_altstack(const stack_t __user * uss)4418 int restore_altstack(const stack_t __user *uss)
4419 {
4420 stack_t new;
4421 if (copy_from_user(&new, uss, sizeof(stack_t)))
4422 return -EFAULT;
4423 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4424 MINSIGSTKSZ);
4425 /* squash all but EFAULT for now */
4426 return 0;
4427 }
4428
__save_altstack(stack_t __user * uss,unsigned long sp)4429 int __save_altstack(stack_t __user *uss, unsigned long sp)
4430 {
4431 struct task_struct *t = current;
4432 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4433 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4434 __put_user(t->sas_ss_size, &uss->ss_size);
4435 return err;
4436 }
4437
4438 #ifdef CONFIG_COMPAT
do_compat_sigaltstack(const compat_stack_t __user * uss_ptr,compat_stack_t __user * uoss_ptr)4439 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4440 compat_stack_t __user *uoss_ptr)
4441 {
4442 stack_t uss, uoss;
4443 int ret;
4444
4445 if (uss_ptr) {
4446 compat_stack_t uss32;
4447 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4448 return -EFAULT;
4449 uss.ss_sp = compat_ptr(uss32.ss_sp);
4450 uss.ss_flags = uss32.ss_flags;
4451 uss.ss_size = uss32.ss_size;
4452 }
4453 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4454 compat_user_stack_pointer(),
4455 COMPAT_MINSIGSTKSZ);
4456 if (ret >= 0 && uoss_ptr) {
4457 compat_stack_t old;
4458 memset(&old, 0, sizeof(old));
4459 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4460 old.ss_flags = uoss.ss_flags;
4461 old.ss_size = uoss.ss_size;
4462 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4463 ret = -EFAULT;
4464 }
4465 return ret;
4466 }
4467
COMPAT_SYSCALL_DEFINE2(sigaltstack,const compat_stack_t __user *,uss_ptr,compat_stack_t __user *,uoss_ptr)4468 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4469 const compat_stack_t __user *, uss_ptr,
4470 compat_stack_t __user *, uoss_ptr)
4471 {
4472 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4473 }
4474
compat_restore_altstack(const compat_stack_t __user * uss)4475 int compat_restore_altstack(const compat_stack_t __user *uss)
4476 {
4477 int err = do_compat_sigaltstack(uss, NULL);
4478 /* squash all but -EFAULT for now */
4479 return err == -EFAULT ? err : 0;
4480 }
4481
__compat_save_altstack(compat_stack_t __user * uss,unsigned long sp)4482 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4483 {
4484 int err;
4485 struct task_struct *t = current;
4486 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4487 &uss->ss_sp) |
4488 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4489 __put_user(t->sas_ss_size, &uss->ss_size);
4490 return err;
4491 }
4492 #endif
4493
4494 #ifdef __ARCH_WANT_SYS_SIGPENDING
4495
4496 /**
4497 * sys_sigpending - examine pending signals
4498 * @uset: where mask of pending signal is returned
4499 */
SYSCALL_DEFINE1(sigpending,old_sigset_t __user *,uset)4500 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4501 {
4502 sigset_t set;
4503
4504 if (sizeof(old_sigset_t) > sizeof(*uset))
4505 return -EINVAL;
4506
4507 do_sigpending(&set);
4508
4509 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4510 return -EFAULT;
4511
4512 return 0;
4513 }
4514
4515 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE1(sigpending,compat_old_sigset_t __user *,set32)4516 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4517 {
4518 sigset_t set;
4519
4520 do_sigpending(&set);
4521
4522 return put_user(set.sig[0], set32);
4523 }
4524 #endif
4525
4526 #endif
4527
4528 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4529 /**
4530 * sys_sigprocmask - examine and change blocked signals
4531 * @how: whether to add, remove, or set signals
4532 * @nset: signals to add or remove (if non-null)
4533 * @oset: previous value of signal mask if non-null
4534 *
4535 * Some platforms have their own version with special arguments;
4536 * others support only sys_rt_sigprocmask.
4537 */
4538
SYSCALL_DEFINE3(sigprocmask,int,how,old_sigset_t __user *,nset,old_sigset_t __user *,oset)4539 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4540 old_sigset_t __user *, oset)
4541 {
4542 old_sigset_t old_set, new_set;
4543 sigset_t new_blocked;
4544
4545 old_set = current->blocked.sig[0];
4546
4547 if (nset) {
4548 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4549 return -EFAULT;
4550
4551 new_blocked = current->blocked;
4552
4553 switch (how) {
4554 case SIG_BLOCK:
4555 sigaddsetmask(&new_blocked, new_set);
4556 break;
4557 case SIG_UNBLOCK:
4558 sigdelsetmask(&new_blocked, new_set);
4559 break;
4560 case SIG_SETMASK:
4561 new_blocked.sig[0] = new_set;
4562 break;
4563 default:
4564 return -EINVAL;
4565 }
4566
4567 set_current_blocked(&new_blocked);
4568 }
4569
4570 if (oset) {
4571 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4572 return -EFAULT;
4573 }
4574
4575 return 0;
4576 }
4577 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4578
4579 #ifndef CONFIG_ODD_RT_SIGACTION
4580 /**
4581 * sys_rt_sigaction - alter an action taken by a process
4582 * @sig: signal to be sent
4583 * @act: new sigaction
4584 * @oact: used to save the previous sigaction
4585 * @sigsetsize: size of sigset_t type
4586 */
SYSCALL_DEFINE4(rt_sigaction,int,sig,const struct sigaction __user *,act,struct sigaction __user *,oact,size_t,sigsetsize)4587 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4588 const struct sigaction __user *, act,
4589 struct sigaction __user *, oact,
4590 size_t, sigsetsize)
4591 {
4592 struct k_sigaction new_sa, old_sa;
4593 int ret;
4594
4595 /* XXX: Don't preclude handling different sized sigset_t's. */
4596 if (sigsetsize != sizeof(sigset_t))
4597 return -EINVAL;
4598
4599 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4600 return -EFAULT;
4601
4602 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4603 if (ret)
4604 return ret;
4605
4606 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4607 return -EFAULT;
4608
4609 return 0;
4610 }
4611 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigaction,int,sig,const struct compat_sigaction __user *,act,struct compat_sigaction __user *,oact,compat_size_t,sigsetsize)4612 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4613 const struct compat_sigaction __user *, act,
4614 struct compat_sigaction __user *, oact,
4615 compat_size_t, sigsetsize)
4616 {
4617 struct k_sigaction new_ka, old_ka;
4618 #ifdef __ARCH_HAS_SA_RESTORER
4619 compat_uptr_t restorer;
4620 #endif
4621 int ret;
4622
4623 /* XXX: Don't preclude handling different sized sigset_t's. */
4624 if (sigsetsize != sizeof(compat_sigset_t))
4625 return -EINVAL;
4626
4627 if (act) {
4628 compat_uptr_t handler;
4629 ret = get_user(handler, &act->sa_handler);
4630 new_ka.sa.sa_handler = compat_ptr(handler);
4631 #ifdef __ARCH_HAS_SA_RESTORER
4632 ret |= get_user(restorer, &act->sa_restorer);
4633 new_ka.sa.sa_restorer = compat_ptr(restorer);
4634 #endif
4635 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4636 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4637 if (ret)
4638 return -EFAULT;
4639 }
4640
4641 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4642 if (!ret && oact) {
4643 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4644 &oact->sa_handler);
4645 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4646 sizeof(oact->sa_mask));
4647 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4648 #ifdef __ARCH_HAS_SA_RESTORER
4649 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4650 &oact->sa_restorer);
4651 #endif
4652 }
4653 return ret;
4654 }
4655 #endif
4656 #endif /* !CONFIG_ODD_RT_SIGACTION */
4657
4658 #ifdef CONFIG_OLD_SIGACTION
SYSCALL_DEFINE3(sigaction,int,sig,const struct old_sigaction __user *,act,struct old_sigaction __user *,oact)4659 SYSCALL_DEFINE3(sigaction, int, sig,
4660 const struct old_sigaction __user *, act,
4661 struct old_sigaction __user *, oact)
4662 {
4663 struct k_sigaction new_ka, old_ka;
4664 int ret;
4665
4666 if (act) {
4667 old_sigset_t mask;
4668 if (!access_ok(act, sizeof(*act)) ||
4669 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4670 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4671 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4672 __get_user(mask, &act->sa_mask))
4673 return -EFAULT;
4674 #ifdef __ARCH_HAS_KA_RESTORER
4675 new_ka.ka_restorer = NULL;
4676 #endif
4677 siginitset(&new_ka.sa.sa_mask, mask);
4678 }
4679
4680 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4681
4682 if (!ret && oact) {
4683 if (!access_ok(oact, sizeof(*oact)) ||
4684 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4685 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4686 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4687 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4688 return -EFAULT;
4689 }
4690
4691 return ret;
4692 }
4693 #endif
4694 #ifdef CONFIG_COMPAT_OLD_SIGACTION
COMPAT_SYSCALL_DEFINE3(sigaction,int,sig,const struct compat_old_sigaction __user *,act,struct compat_old_sigaction __user *,oact)4695 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4696 const struct compat_old_sigaction __user *, act,
4697 struct compat_old_sigaction __user *, oact)
4698 {
4699 struct k_sigaction new_ka, old_ka;
4700 int ret;
4701 compat_old_sigset_t mask;
4702 compat_uptr_t handler, restorer;
4703
4704 if (act) {
4705 if (!access_ok(act, sizeof(*act)) ||
4706 __get_user(handler, &act->sa_handler) ||
4707 __get_user(restorer, &act->sa_restorer) ||
4708 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4709 __get_user(mask, &act->sa_mask))
4710 return -EFAULT;
4711
4712 #ifdef __ARCH_HAS_KA_RESTORER
4713 new_ka.ka_restorer = NULL;
4714 #endif
4715 new_ka.sa.sa_handler = compat_ptr(handler);
4716 new_ka.sa.sa_restorer = compat_ptr(restorer);
4717 siginitset(&new_ka.sa.sa_mask, mask);
4718 }
4719
4720 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4721
4722 if (!ret && oact) {
4723 if (!access_ok(oact, sizeof(*oact)) ||
4724 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4725 &oact->sa_handler) ||
4726 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4727 &oact->sa_restorer) ||
4728 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4729 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4730 return -EFAULT;
4731 }
4732 return ret;
4733 }
4734 #endif
4735
4736 #ifdef CONFIG_SGETMASK_SYSCALL
4737
4738 /*
4739 * For backwards compatibility. Functionality superseded by sigprocmask.
4740 */
SYSCALL_DEFINE0(sgetmask)4741 SYSCALL_DEFINE0(sgetmask)
4742 {
4743 /* SMP safe */
4744 return current->blocked.sig[0];
4745 }
4746
SYSCALL_DEFINE1(ssetmask,int,newmask)4747 SYSCALL_DEFINE1(ssetmask, int, newmask)
4748 {
4749 int old = current->blocked.sig[0];
4750 sigset_t newset;
4751
4752 siginitset(&newset, newmask);
4753 set_current_blocked(&newset);
4754
4755 return old;
4756 }
4757 #endif /* CONFIG_SGETMASK_SYSCALL */
4758
4759 #ifdef __ARCH_WANT_SYS_SIGNAL
4760 /*
4761 * For backwards compatibility. Functionality superseded by sigaction.
4762 */
SYSCALL_DEFINE2(signal,int,sig,__sighandler_t,handler)4763 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4764 {
4765 struct k_sigaction new_sa, old_sa;
4766 int ret;
4767
4768 new_sa.sa.sa_handler = handler;
4769 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4770 sigemptyset(&new_sa.sa.sa_mask);
4771
4772 ret = do_sigaction(sig, &new_sa, &old_sa);
4773
4774 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4775 }
4776 #endif /* __ARCH_WANT_SYS_SIGNAL */
4777
4778 #ifdef __ARCH_WANT_SYS_PAUSE
4779
SYSCALL_DEFINE0(pause)4780 SYSCALL_DEFINE0(pause)
4781 {
4782 while (!signal_pending(current)) {
4783 __set_current_state(TASK_INTERRUPTIBLE);
4784 schedule();
4785 }
4786 return -ERESTARTNOHAND;
4787 }
4788
4789 #endif
4790
sigsuspend(sigset_t * set)4791 static int sigsuspend(sigset_t *set)
4792 {
4793 current->saved_sigmask = current->blocked;
4794 set_current_blocked(set);
4795
4796 while (!signal_pending(current)) {
4797 __set_current_state(TASK_INTERRUPTIBLE);
4798 schedule();
4799 }
4800 set_restore_sigmask();
4801 return -ERESTARTNOHAND;
4802 }
4803
4804 /**
4805 * sys_rt_sigsuspend - replace the signal mask for a value with the
4806 * @unewset value until a signal is received
4807 * @unewset: new signal mask value
4808 * @sigsetsize: size of sigset_t type
4809 */
SYSCALL_DEFINE2(rt_sigsuspend,sigset_t __user *,unewset,size_t,sigsetsize)4810 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4811 {
4812 sigset_t newset;
4813
4814 /* XXX: Don't preclude handling different sized sigset_t's. */
4815 if (sigsetsize != sizeof(sigset_t))
4816 return -EINVAL;
4817
4818 if (copy_from_user(&newset, unewset, sizeof(newset)))
4819 return -EFAULT;
4820 return sigsuspend(&newset);
4821 }
4822
4823 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigsuspend,compat_sigset_t __user *,unewset,compat_size_t,sigsetsize)4824 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4825 {
4826 sigset_t newset;
4827
4828 /* XXX: Don't preclude handling different sized sigset_t's. */
4829 if (sigsetsize != sizeof(sigset_t))
4830 return -EINVAL;
4831
4832 if (get_compat_sigset(&newset, unewset))
4833 return -EFAULT;
4834 return sigsuspend(&newset);
4835 }
4836 #endif
4837
4838 #ifdef CONFIG_OLD_SIGSUSPEND
SYSCALL_DEFINE1(sigsuspend,old_sigset_t,mask)4839 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4840 {
4841 sigset_t blocked;
4842 siginitset(&blocked, mask);
4843 return sigsuspend(&blocked);
4844 }
4845 #endif
4846 #ifdef CONFIG_OLD_SIGSUSPEND3
SYSCALL_DEFINE3(sigsuspend,int,unused1,int,unused2,old_sigset_t,mask)4847 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4848 {
4849 sigset_t blocked;
4850 siginitset(&blocked, mask);
4851 return sigsuspend(&blocked);
4852 }
4853 #endif
4854
arch_vma_name(struct vm_area_struct * vma)4855 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4856 {
4857 return NULL;
4858 }
4859
siginfo_buildtime_checks(void)4860 static inline void siginfo_buildtime_checks(void)
4861 {
4862 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4863
4864 /* Verify the offsets in the two siginfos match */
4865 #define CHECK_OFFSET(field) \
4866 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4867
4868 /* kill */
4869 CHECK_OFFSET(si_pid);
4870 CHECK_OFFSET(si_uid);
4871
4872 /* timer */
4873 CHECK_OFFSET(si_tid);
4874 CHECK_OFFSET(si_overrun);
4875 CHECK_OFFSET(si_value);
4876
4877 /* rt */
4878 CHECK_OFFSET(si_pid);
4879 CHECK_OFFSET(si_uid);
4880 CHECK_OFFSET(si_value);
4881
4882 /* sigchld */
4883 CHECK_OFFSET(si_pid);
4884 CHECK_OFFSET(si_uid);
4885 CHECK_OFFSET(si_status);
4886 CHECK_OFFSET(si_utime);
4887 CHECK_OFFSET(si_stime);
4888
4889 /* sigfault */
4890 CHECK_OFFSET(si_addr);
4891 CHECK_OFFSET(si_trapno);
4892 CHECK_OFFSET(si_addr_lsb);
4893 CHECK_OFFSET(si_lower);
4894 CHECK_OFFSET(si_upper);
4895 CHECK_OFFSET(si_pkey);
4896 CHECK_OFFSET(si_perf_data);
4897 CHECK_OFFSET(si_perf_type);
4898 CHECK_OFFSET(si_perf_flags);
4899
4900 /* sigpoll */
4901 CHECK_OFFSET(si_band);
4902 CHECK_OFFSET(si_fd);
4903
4904 /* sigsys */
4905 CHECK_OFFSET(si_call_addr);
4906 CHECK_OFFSET(si_syscall);
4907 CHECK_OFFSET(si_arch);
4908 #undef CHECK_OFFSET
4909
4910 /* usb asyncio */
4911 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4912 offsetof(struct siginfo, si_addr));
4913 if (sizeof(int) == sizeof(void __user *)) {
4914 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4915 sizeof(void __user *));
4916 } else {
4917 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4918 sizeof_field(struct siginfo, si_uid)) !=
4919 sizeof(void __user *));
4920 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4921 offsetof(struct siginfo, si_uid));
4922 }
4923 #ifdef CONFIG_COMPAT
4924 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4925 offsetof(struct compat_siginfo, si_addr));
4926 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4927 sizeof(compat_uptr_t));
4928 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4929 sizeof_field(struct siginfo, si_pid));
4930 #endif
4931 }
4932
4933 #if defined(CONFIG_SYSCTL)
4934 static struct ctl_table signal_debug_table[] = {
4935 #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4936 {
4937 .procname = "exception-trace",
4938 .data = &show_unhandled_signals,
4939 .maxlen = sizeof(int),
4940 .mode = 0644,
4941 .proc_handler = proc_dointvec
4942 },
4943 #endif
4944 };
4945
init_signal_sysctls(void)4946 static int __init init_signal_sysctls(void)
4947 {
4948 register_sysctl_init("debug", signal_debug_table);
4949 return 0;
4950 }
4951 early_initcall(init_signal_sysctls);
4952 #endif /* CONFIG_SYSCTL */
4953
signals_init(void)4954 void __init signals_init(void)
4955 {
4956 siginfo_buildtime_checks();
4957
4958 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4959 }
4960
4961 #ifdef CONFIG_KGDB_KDB
4962 #include <linux/kdb.h>
4963 /*
4964 * kdb_send_sig - Allows kdb to send signals without exposing
4965 * signal internals. This function checks if the required locks are
4966 * available before calling the main signal code, to avoid kdb
4967 * deadlocks.
4968 */
kdb_send_sig(struct task_struct * t,int sig)4969 void kdb_send_sig(struct task_struct *t, int sig)
4970 {
4971 static struct task_struct *kdb_prev_t;
4972 int new_t, ret;
4973 if (!spin_trylock(&t->sighand->siglock)) {
4974 kdb_printf("Can't do kill command now.\n"
4975 "The sigmask lock is held somewhere else in "
4976 "kernel, try again later\n");
4977 return;
4978 }
4979 new_t = kdb_prev_t != t;
4980 kdb_prev_t = t;
4981 if (!task_is_running(t) && new_t) {
4982 spin_unlock(&t->sighand->siglock);
4983 kdb_printf("Process is not RUNNING, sending a signal from "
4984 "kdb risks deadlock\n"
4985 "on the run queue locks. "
4986 "The signal has _not_ been sent.\n"
4987 "Reissue the kill command if you want to risk "
4988 "the deadlock.\n");
4989 return;
4990 }
4991 ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4992 spin_unlock(&t->sighand->siglock);
4993 if (ret)
4994 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4995 sig, t->pid);
4996 else
4997 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4998 }
4999 #endif /* CONFIG_KGDB_KDB */
5000