xref: /linux/arch/um/os-Linux/signal.c (revision 399ead3a6d76cbdd29a716660db5c84a314dab70)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4  * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5  * Copyright (C) 2004 PathScale, Inc
6  * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7  */
8 
9 #include <stdlib.h>
10 #include <stdarg.h>
11 #include <stdbool.h>
12 #include <errno.h>
13 #include <signal.h>
14 #include <string.h>
15 #include <strings.h>
16 #include <as-layout.h>
17 #include <kern_util.h>
18 #include <os.h>
19 #include <sysdep/mcontext.h>
20 #include <um_malloc.h>
21 #include <sys/ucontext.h>
22 #include <timetravel.h>
23 #include "internal.h"
24 
25 void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *, void *mc) = {
26 	[SIGTRAP]	= relay_signal,
27 	[SIGFPE]	= relay_signal,
28 	[SIGILL]	= relay_signal,
29 	[SIGWINCH]	= winch,
30 	[SIGBUS]	= relay_signal,
31 	[SIGSEGV]	= segv_handler,
32 	[SIGIO]		= sigio_handler,
33 	[SIGCHLD]	= sigchld_handler,
34 };
35 
sig_handler_common(int sig,struct siginfo * si,mcontext_t * mc)36 static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
37 {
38 	struct uml_pt_regs r;
39 	int save_errno = errno;
40 
41 	r.is_user = 0;
42 	if (sig == SIGSEGV) {
43 		/* For segfaults, we want the data from the sigcontext. */
44 		get_regs_from_mc(&r, mc);
45 		GET_FAULTINFO_FROM_MC(r.faultinfo, mc);
46 	}
47 
48 	/* enable signals if sig isn't IRQ signal */
49 	if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGCHLD))
50 		unblock_signals_trace();
51 
52 	(*sig_info[sig])(sig, si, &r, mc);
53 
54 	errno = save_errno;
55 }
56 
57 /*
58  * These are the asynchronous signals.  SIGPROF is excluded because we want to
59  * be able to profile all of UML, not just the non-critical sections.  If
60  * profiling is not thread-safe, then that is not my problem.  We can disable
61  * profiling when SMP is enabled in that case.
62  */
63 #define SIGIO_BIT 0
64 #define SIGIO_MASK (1 << SIGIO_BIT)
65 
66 #define SIGALRM_BIT 1
67 #define SIGALRM_MASK (1 << SIGALRM_BIT)
68 
69 #define SIGCHLD_BIT 2
70 #define SIGCHLD_MASK (1 << SIGCHLD_BIT)
71 
72 __thread int signals_enabled;
73 #if IS_ENABLED(CONFIG_UML_TIME_TRAVEL_SUPPORT)
74 static int signals_blocked, signals_blocked_pending;
75 #endif
76 static __thread unsigned int signals_pending;
77 static __thread unsigned int signals_active;
78 
sig_handler(int sig,struct siginfo * si,mcontext_t * mc)79 static void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
80 {
81 	int enabled = signals_enabled;
82 
83 #if IS_ENABLED(CONFIG_UML_TIME_TRAVEL_SUPPORT)
84 	if ((signals_blocked ||
85 	     __atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) &&
86 	    (sig == SIGIO)) {
87 		/* increment so unblock will do another round */
88 		__atomic_add_fetch(&signals_blocked_pending, 1,
89 				   __ATOMIC_SEQ_CST);
90 		return;
91 	}
92 #endif
93 
94 	if (!enabled && (sig == SIGIO)) {
95 		/*
96 		 * In TT_MODE_EXTERNAL, need to still call time-travel
97 		 * handlers. This will mark signals_pending by itself
98 		 * (only if necessary.)
99 		 * Note we won't get here if signals are hard-blocked
100 		 * (which is handled above), in that case the hard-
101 		 * unblock will handle things.
102 		 */
103 		if (time_travel_mode == TT_MODE_EXTERNAL)
104 			sigio_run_timetravel_handlers();
105 		else
106 			signals_pending |= SIGIO_MASK;
107 		return;
108 	}
109 
110 	if (!enabled && (sig == SIGCHLD)) {
111 		signals_pending |= SIGCHLD_MASK;
112 		return;
113 	}
114 
115 	block_signals_trace();
116 
117 	sig_handler_common(sig, si, mc);
118 
119 	um_set_signals_trace(enabled);
120 }
121 
timer_real_alarm_handler(mcontext_t * mc)122 static void timer_real_alarm_handler(mcontext_t *mc)
123 {
124 	struct uml_pt_regs regs;
125 
126 	if (mc != NULL)
127 		get_regs_from_mc(&regs, mc);
128 	else
129 		memset(&regs, 0, sizeof(regs));
130 	timer_handler(SIGALRM, NULL, &regs);
131 }
132 
timer_alarm_handler(int sig,struct siginfo * unused_si,mcontext_t * mc)133 static void timer_alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc)
134 {
135 	int enabled;
136 
137 	enabled = signals_enabled;
138 	if (!signals_enabled) {
139 		signals_pending |= SIGALRM_MASK;
140 		return;
141 	}
142 
143 	block_signals_trace();
144 
145 	signals_active |= SIGALRM_MASK;
146 
147 	timer_real_alarm_handler(mc);
148 
149 	signals_active &= ~SIGALRM_MASK;
150 
151 	um_set_signals_trace(enabled);
152 }
153 
deliver_alarm(void)154 void deliver_alarm(void) {
155     timer_alarm_handler(SIGALRM, NULL, NULL);
156 }
157 
timer_set_signal_handler(void)158 void timer_set_signal_handler(void)
159 {
160 	set_handler(SIGALRM);
161 }
162 
timer_alarm_pending(void)163 int timer_alarm_pending(void)
164 {
165 	return !!(signals_pending & SIGALRM_MASK);
166 }
167 
set_sigstack(void * sig_stack,int size)168 void set_sigstack(void *sig_stack, int size)
169 {
170 	stack_t stack = {
171 		.ss_flags = 0,
172 		.ss_sp = sig_stack,
173 		.ss_size = size
174 	};
175 
176 	if (sigaltstack(&stack, NULL) != 0)
177 		panic("enabling signal stack failed, errno = %d\n", errno);
178 }
179 
sigusr1_handler(int sig,struct siginfo * unused_si,mcontext_t * mc)180 static void sigusr1_handler(int sig, struct siginfo *unused_si, mcontext_t *mc)
181 {
182 	uml_pm_wake();
183 }
184 
register_pm_wake_signal(void)185 void register_pm_wake_signal(void)
186 {
187 	set_handler(SIGUSR1);
188 }
189 
190 static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
191 	[SIGSEGV] = sig_handler,
192 	[SIGBUS] = sig_handler,
193 	[SIGILL] = sig_handler,
194 	[SIGFPE] = sig_handler,
195 	[SIGTRAP] = sig_handler,
196 
197 	[SIGIO] = sig_handler,
198 	[SIGWINCH] = sig_handler,
199 	/* SIGCHLD is only actually registered in seccomp mode. */
200 	[SIGCHLD] = sig_handler,
201 	[SIGALRM] = timer_alarm_handler,
202 
203 	[SIGUSR1] = sigusr1_handler,
204 };
205 
hard_handler(int sig,siginfo_t * si,void * p)206 static void hard_handler(int sig, siginfo_t *si, void *p)
207 {
208 	ucontext_t *uc = p;
209 	mcontext_t *mc = &uc->uc_mcontext;
210 
211 	(*handlers[sig])(sig, (struct siginfo *)si, mc);
212 }
213 
set_handler(int sig)214 void set_handler(int sig)
215 {
216 	struct sigaction action;
217 	int flags = SA_SIGINFO | SA_ONSTACK;
218 	sigset_t sig_mask;
219 
220 	action.sa_sigaction = hard_handler;
221 
222 	/* block irq ones */
223 	sigemptyset(&action.sa_mask);
224 	sigaddset(&action.sa_mask, SIGIO);
225 	sigaddset(&action.sa_mask, SIGWINCH);
226 	sigaddset(&action.sa_mask, SIGALRM);
227 
228 	if (sig == SIGSEGV)
229 		flags |= SA_NODEFER;
230 
231 	if (sigismember(&action.sa_mask, sig))
232 		flags |= SA_RESTART; /* if it's an irq signal */
233 
234 	action.sa_flags = flags;
235 	action.sa_restorer = NULL;
236 	if (sigaction(sig, &action, NULL) < 0)
237 		panic("sigaction failed - errno = %d\n", errno);
238 
239 	sigemptyset(&sig_mask);
240 	sigaddset(&sig_mask, sig);
241 	if (sigprocmask(SIG_UNBLOCK, &sig_mask, NULL) < 0)
242 		panic("sigprocmask failed - errno = %d\n", errno);
243 }
244 
send_sigio_to_self(void)245 void send_sigio_to_self(void)
246 {
247 	kill(os_getpid(), SIGIO);
248 }
249 
change_sig(int signal,int on)250 int change_sig(int signal, int on)
251 {
252 	sigset_t sigset;
253 
254 	sigemptyset(&sigset);
255 	sigaddset(&sigset, signal);
256 	if (sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, NULL) < 0)
257 		return -errno;
258 
259 	return 0;
260 }
261 
__block_signals(void)262 static inline void __block_signals(void)
263 {
264 	if (!signals_enabled)
265 		return;
266 
267 	os_local_ipi_disable();
268 	barrier();
269 	signals_enabled = 0;
270 }
271 
__unblock_signals(void)272 static inline void __unblock_signals(void)
273 {
274 	if (signals_enabled)
275 		return;
276 
277 	signals_enabled = 1;
278 	barrier();
279 	os_local_ipi_enable();
280 }
281 
block_signals(void)282 void block_signals(void)
283 {
284 	__block_signals();
285 	/*
286 	 * This must return with signals disabled, so this barrier
287 	 * ensures that writes are flushed out before the return.
288 	 * This might matter if gcc figures out how to inline this and
289 	 * decides to shuffle this code into the caller.
290 	 */
291 	barrier();
292 }
293 
unblock_signals(void)294 void unblock_signals(void)
295 {
296 	int save_pending;
297 
298 	if (signals_enabled == 1)
299 		return;
300 
301 	__unblock_signals();
302 
303 #if IS_ENABLED(CONFIG_UML_TIME_TRAVEL_SUPPORT)
304 	deliver_time_travel_irqs();
305 #endif
306 
307 	/*
308 	 * We loop because the IRQ handler returns with interrupts off.  So,
309 	 * interrupts may have arrived and we need to re-enable them and
310 	 * recheck signals_pending.
311 	 */
312 	while (1) {
313 		/*
314 		 * Save and reset save_pending after enabling signals.  This
315 		 * way, signals_pending won't be changed while we're reading it.
316 		 *
317 		 * Setting signals_enabled and reading signals_pending must
318 		 * happen in this order, so have the barrier here.
319 		 */
320 		barrier();
321 
322 		save_pending = signals_pending;
323 		if (save_pending == 0)
324 			return;
325 
326 		signals_pending = 0;
327 
328 		/*
329 		 * We have pending interrupts, so disable signals, as the
330 		 * handlers expect them off when they are called.  They will
331 		 * be enabled again above. We need to trace this, as we're
332 		 * expected to be enabling interrupts already, but any more
333 		 * tracing that happens inside the handlers we call for the
334 		 * pending signals will mess up the tracing state.
335 		 */
336 		__block_signals();
337 		um_trace_signals_off();
338 
339 		/*
340 		 * Deal with SIGIO first because the alarm handler might
341 		 * schedule, leaving the pending SIGIO stranded until we come
342 		 * back here.
343 		 *
344 		 * SIGIO's handler doesn't use siginfo or mcontext,
345 		 * so they can be NULL.
346 		 */
347 		if (save_pending & SIGIO_MASK)
348 			sig_handler_common(SIGIO, NULL, NULL);
349 
350 		if (save_pending & SIGCHLD_MASK) {
351 			struct uml_pt_regs regs = {};
352 
353 			sigchld_handler(SIGCHLD, NULL, &regs, NULL);
354 		}
355 
356 		/* Do not reenter the handler */
357 
358 		if ((save_pending & SIGALRM_MASK) && (!(signals_active & SIGALRM_MASK)))
359 			timer_real_alarm_handler(NULL);
360 
361 		/* Rerun the loop only if there is still pending SIGIO and not in TIMER handler */
362 
363 		if (!(signals_pending & SIGIO_MASK) && (signals_active & SIGALRM_MASK))
364 			return;
365 
366 		/* Re-enable signals and trace that we're doing so. */
367 		um_trace_signals_on();
368 		__unblock_signals();
369 	}
370 }
371 
um_get_signals(void)372 int um_get_signals(void)
373 {
374 	return signals_enabled;
375 }
376 
um_set_signals(int enable)377 int um_set_signals(int enable)
378 {
379 	int ret;
380 	if (signals_enabled == enable)
381 		return enable;
382 
383 	ret = signals_enabled;
384 	if (enable)
385 		unblock_signals();
386 	else block_signals();
387 
388 	return ret;
389 }
390 
um_set_signals_trace(int enable)391 int um_set_signals_trace(int enable)
392 {
393 	int ret;
394 	if (signals_enabled == enable)
395 		return enable;
396 
397 	ret = signals_enabled;
398 	if (enable)
399 		unblock_signals_trace();
400 	else
401 		block_signals_trace();
402 
403 	return ret;
404 }
405 
406 #if IS_ENABLED(CONFIG_UML_TIME_TRAVEL_SUPPORT)
mark_sigio_pending(void)407 void mark_sigio_pending(void)
408 {
409 	/*
410 	 * It would seem that this should be atomic so
411 	 * it isn't a read-modify-write with a signal
412 	 * that could happen in the middle, losing the
413 	 * value set by the signal.
414 	 *
415 	 * However, this function is only called when in
416 	 * time-travel=ext simulation mode, in which case
417 	 * the only signal ever pending is SIGIO, which
418 	 * is blocked while this can be called, and the
419 	 * timer signal (SIGALRM) cannot happen.
420 	 */
421 	signals_pending |= SIGIO_MASK;
422 }
423 
block_signals_hard(void)424 void block_signals_hard(void)
425 {
426 	signals_blocked++;
427 	barrier();
428 }
429 
unblock_signals_hard(void)430 void unblock_signals_hard(void)
431 {
432 	static bool unblocking;
433 
434 	if (!signals_blocked)
435 		panic("unblocking signals while not blocked");
436 
437 	if (--signals_blocked)
438 		return;
439 	/*
440 	 * Must be set to 0 before we check pending so the
441 	 * SIGIO handler will run as normal unless we're still
442 	 * going to process signals_blocked_pending.
443 	 */
444 	barrier();
445 
446 	/*
447 	 * Note that block_signals_hard()/unblock_signals_hard() can be called
448 	 * within the unblock_signals()/sigio_run_timetravel_handlers() below.
449 	 * This would still be prone to race conditions since it's actually a
450 	 * call _within_ e.g. vu_req_read_message(), where we observed this
451 	 * issue, which loops. Thus, if the inner call handles the recorded
452 	 * pending signals, we can get out of the inner call with the real
453 	 * signal hander no longer blocked, and still have a race. Thus don't
454 	 * handle unblocking in the inner call, if it happens, but only in
455 	 * the outermost call - 'unblocking' serves as an ownership for the
456 	 * signals_blocked_pending decrement.
457 	 */
458 	if (unblocking)
459 		return;
460 	unblocking = true;
461 
462 	while (__atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) {
463 		if (signals_enabled) {
464 			/* signals are enabled so we can touch this */
465 			signals_pending |= SIGIO_MASK;
466 			/*
467 			 * this is a bit inefficient, but that's
468 			 * not really important
469 			 */
470 			block_signals();
471 			unblock_signals();
472 		} else {
473 			/*
474 			 * we need to run time-travel handlers even
475 			 * if not enabled
476 			 */
477 			sigio_run_timetravel_handlers();
478 		}
479 
480 		/*
481 		 * The decrement of signals_blocked_pending must be atomic so
482 		 * that the signal handler will either happen before or after
483 		 * the decrement, not during a read-modify-write:
484 		 *  - If it happens before, it can increment it and we'll
485 		 *    decrement it and do another round in the loop.
486 		 *  - If it happens after it'll see 0 for both signals_blocked
487 		 *    and signals_blocked_pending and thus run the handler as
488 		 *    usual (subject to signals_enabled, but that's unrelated.)
489 		 *
490 		 * Note that a call to unblock_signals_hard() within the calls
491 		 * to unblock_signals() or sigio_run_timetravel_handlers() above
492 		 * will do nothing due to the 'unblocking' state, so this cannot
493 		 * underflow as the only one decrementing will be the outermost
494 		 * one.
495 		 */
496 		if (__atomic_sub_fetch(&signals_blocked_pending, 1,
497 				       __ATOMIC_SEQ_CST) < 0)
498 			panic("signals_blocked_pending underflow");
499 	}
500 
501 	unblocking = false;
502 }
503 #endif
504