xref: /linux/arch/um/os-Linux/signal.c (revision 831c1926ee728c3e747255f7c0f434762e8e863d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4  * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5  * Copyright (C) 2004 PathScale, Inc
6  * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7  */
8 
9 #include <stdlib.h>
10 #include <stdarg.h>
11 #include <stdbool.h>
12 #include <errno.h>
13 #include <signal.h>
14 #include <string.h>
15 #include <strings.h>
16 #include <as-layout.h>
17 #include <kern_util.h>
18 #include <os.h>
19 #include <sysdep/mcontext.h>
20 #include <um_malloc.h>
21 #include <sys/ucontext.h>
22 #include <timetravel.h>
23 
24 void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
25 	[SIGTRAP]	= relay_signal,
26 	[SIGFPE]	= relay_signal,
27 	[SIGILL]	= relay_signal,
28 	[SIGWINCH]	= winch,
29 	[SIGBUS]	= relay_signal,
30 	[SIGSEGV]	= segv_handler,
31 	[SIGIO]		= sigio_handler,
32 };
33 
sig_handler_common(int sig,struct siginfo * si,mcontext_t * mc)34 static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
35 {
36 	struct uml_pt_regs r;
37 	int save_errno = errno;
38 
39 	r.is_user = 0;
40 	if (sig == SIGSEGV) {
41 		/* For segfaults, we want the data from the sigcontext. */
42 		get_regs_from_mc(&r, mc);
43 		GET_FAULTINFO_FROM_MC(r.faultinfo, mc);
44 	}
45 
46 	/* enable signals if sig isn't IRQ signal */
47 	if ((sig != SIGIO) && (sig != SIGWINCH))
48 		unblock_signals_trace();
49 
50 	(*sig_info[sig])(sig, si, &r);
51 
52 	errno = save_errno;
53 }
54 
55 /*
56  * These are the asynchronous signals.  SIGPROF is excluded because we want to
57  * be able to profile all of UML, not just the non-critical sections.  If
58  * profiling is not thread-safe, then that is not my problem.  We can disable
59  * profiling when SMP is enabled in that case.
60  */
61 #define SIGIO_BIT 0
62 #define SIGIO_MASK (1 << SIGIO_BIT)
63 
64 #define SIGALRM_BIT 1
65 #define SIGALRM_MASK (1 << SIGALRM_BIT)
66 
67 int signals_enabled;
68 #if IS_ENABLED(CONFIG_UML_TIME_TRAVEL_SUPPORT)
69 static int signals_blocked, signals_blocked_pending;
70 #endif
71 static unsigned int signals_pending;
72 static unsigned int signals_active = 0;
73 
sig_handler(int sig,struct siginfo * si,mcontext_t * mc)74 static void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
75 {
76 	int enabled = signals_enabled;
77 
78 #if IS_ENABLED(CONFIG_UML_TIME_TRAVEL_SUPPORT)
79 	if ((signals_blocked ||
80 	     __atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) &&
81 	    (sig == SIGIO)) {
82 		/* increment so unblock will do another round */
83 		__atomic_add_fetch(&signals_blocked_pending, 1,
84 				   __ATOMIC_SEQ_CST);
85 		return;
86 	}
87 #endif
88 
89 	if (!enabled && (sig == SIGIO)) {
90 		/*
91 		 * In TT_MODE_EXTERNAL, need to still call time-travel
92 		 * handlers. This will mark signals_pending by itself
93 		 * (only if necessary.)
94 		 * Note we won't get here if signals are hard-blocked
95 		 * (which is handled above), in that case the hard-
96 		 * unblock will handle things.
97 		 */
98 		if (time_travel_mode == TT_MODE_EXTERNAL)
99 			sigio_run_timetravel_handlers();
100 		else
101 			signals_pending |= SIGIO_MASK;
102 		return;
103 	}
104 
105 	block_signals_trace();
106 
107 	sig_handler_common(sig, si, mc);
108 
109 	um_set_signals_trace(enabled);
110 }
111 
timer_real_alarm_handler(mcontext_t * mc)112 static void timer_real_alarm_handler(mcontext_t *mc)
113 {
114 	struct uml_pt_regs regs;
115 
116 	if (mc != NULL)
117 		get_regs_from_mc(&regs, mc);
118 	else
119 		memset(&regs, 0, sizeof(regs));
120 	timer_handler(SIGALRM, NULL, &regs);
121 }
122 
timer_alarm_handler(int sig,struct siginfo * unused_si,mcontext_t * mc)123 static void timer_alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc)
124 {
125 	int enabled;
126 
127 	enabled = signals_enabled;
128 	if (!signals_enabled) {
129 		signals_pending |= SIGALRM_MASK;
130 		return;
131 	}
132 
133 	block_signals_trace();
134 
135 	signals_active |= SIGALRM_MASK;
136 
137 	timer_real_alarm_handler(mc);
138 
139 	signals_active &= ~SIGALRM_MASK;
140 
141 	um_set_signals_trace(enabled);
142 }
143 
deliver_alarm(void)144 void deliver_alarm(void) {
145     timer_alarm_handler(SIGALRM, NULL, NULL);
146 }
147 
timer_set_signal_handler(void)148 void timer_set_signal_handler(void)
149 {
150 	set_handler(SIGALRM);
151 }
152 
set_sigstack(void * sig_stack,int size)153 void set_sigstack(void *sig_stack, int size)
154 {
155 	stack_t stack = {
156 		.ss_flags = 0,
157 		.ss_sp = sig_stack,
158 		.ss_size = size
159 	};
160 
161 	if (sigaltstack(&stack, NULL) != 0)
162 		panic("enabling signal stack failed, errno = %d\n", errno);
163 }
164 
sigusr1_handler(int sig,struct siginfo * unused_si,mcontext_t * mc)165 static void sigusr1_handler(int sig, struct siginfo *unused_si, mcontext_t *mc)
166 {
167 	uml_pm_wake();
168 }
169 
register_pm_wake_signal(void)170 void register_pm_wake_signal(void)
171 {
172 	set_handler(SIGUSR1);
173 }
174 
175 static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
176 	[SIGSEGV] = sig_handler,
177 	[SIGBUS] = sig_handler,
178 	[SIGILL] = sig_handler,
179 	[SIGFPE] = sig_handler,
180 	[SIGTRAP] = sig_handler,
181 
182 	[SIGIO] = sig_handler,
183 	[SIGWINCH] = sig_handler,
184 	[SIGALRM] = timer_alarm_handler,
185 
186 	[SIGUSR1] = sigusr1_handler,
187 };
188 
hard_handler(int sig,siginfo_t * si,void * p)189 static void hard_handler(int sig, siginfo_t *si, void *p)
190 {
191 	ucontext_t *uc = p;
192 	mcontext_t *mc = &uc->uc_mcontext;
193 
194 	(*handlers[sig])(sig, (struct siginfo *)si, mc);
195 }
196 
set_handler(int sig)197 void set_handler(int sig)
198 {
199 	struct sigaction action;
200 	int flags = SA_SIGINFO | SA_ONSTACK;
201 	sigset_t sig_mask;
202 
203 	action.sa_sigaction = hard_handler;
204 
205 	/* block irq ones */
206 	sigemptyset(&action.sa_mask);
207 	sigaddset(&action.sa_mask, SIGIO);
208 	sigaddset(&action.sa_mask, SIGWINCH);
209 	sigaddset(&action.sa_mask, SIGALRM);
210 
211 	if (sig == SIGSEGV)
212 		flags |= SA_NODEFER;
213 
214 	if (sigismember(&action.sa_mask, sig))
215 		flags |= SA_RESTART; /* if it's an irq signal */
216 
217 	action.sa_flags = flags;
218 	action.sa_restorer = NULL;
219 	if (sigaction(sig, &action, NULL) < 0)
220 		panic("sigaction failed - errno = %d\n", errno);
221 
222 	sigemptyset(&sig_mask);
223 	sigaddset(&sig_mask, sig);
224 	if (sigprocmask(SIG_UNBLOCK, &sig_mask, NULL) < 0)
225 		panic("sigprocmask failed - errno = %d\n", errno);
226 }
227 
send_sigio_to_self(void)228 void send_sigio_to_self(void)
229 {
230 	kill(os_getpid(), SIGIO);
231 }
232 
change_sig(int signal,int on)233 int change_sig(int signal, int on)
234 {
235 	sigset_t sigset;
236 
237 	sigemptyset(&sigset);
238 	sigaddset(&sigset, signal);
239 	if (sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, NULL) < 0)
240 		return -errno;
241 
242 	return 0;
243 }
244 
block_signals(void)245 void block_signals(void)
246 {
247 	signals_enabled = 0;
248 	/*
249 	 * This must return with signals disabled, so this barrier
250 	 * ensures that writes are flushed out before the return.
251 	 * This might matter if gcc figures out how to inline this and
252 	 * decides to shuffle this code into the caller.
253 	 */
254 	barrier();
255 }
256 
unblock_signals(void)257 void unblock_signals(void)
258 {
259 	int save_pending;
260 
261 	if (signals_enabled == 1)
262 		return;
263 
264 	signals_enabled = 1;
265 #if IS_ENABLED(CONFIG_UML_TIME_TRAVEL_SUPPORT)
266 	deliver_time_travel_irqs();
267 #endif
268 
269 	/*
270 	 * We loop because the IRQ handler returns with interrupts off.  So,
271 	 * interrupts may have arrived and we need to re-enable them and
272 	 * recheck signals_pending.
273 	 */
274 	while (1) {
275 		/*
276 		 * Save and reset save_pending after enabling signals.  This
277 		 * way, signals_pending won't be changed while we're reading it.
278 		 *
279 		 * Setting signals_enabled and reading signals_pending must
280 		 * happen in this order, so have the barrier here.
281 		 */
282 		barrier();
283 
284 		save_pending = signals_pending;
285 		if (save_pending == 0)
286 			return;
287 
288 		signals_pending = 0;
289 
290 		/*
291 		 * We have pending interrupts, so disable signals, as the
292 		 * handlers expect them off when they are called.  They will
293 		 * be enabled again above. We need to trace this, as we're
294 		 * expected to be enabling interrupts already, but any more
295 		 * tracing that happens inside the handlers we call for the
296 		 * pending signals will mess up the tracing state.
297 		 */
298 		signals_enabled = 0;
299 		um_trace_signals_off();
300 
301 		/*
302 		 * Deal with SIGIO first because the alarm handler might
303 		 * schedule, leaving the pending SIGIO stranded until we come
304 		 * back here.
305 		 *
306 		 * SIGIO's handler doesn't use siginfo or mcontext,
307 		 * so they can be NULL.
308 		 */
309 		if (save_pending & SIGIO_MASK)
310 			sig_handler_common(SIGIO, NULL, NULL);
311 
312 		/* Do not reenter the handler */
313 
314 		if ((save_pending & SIGALRM_MASK) && (!(signals_active & SIGALRM_MASK)))
315 			timer_real_alarm_handler(NULL);
316 
317 		/* Rerun the loop only if there is still pending SIGIO and not in TIMER handler */
318 
319 		if (!(signals_pending & SIGIO_MASK) && (signals_active & SIGALRM_MASK))
320 			return;
321 
322 		/* Re-enable signals and trace that we're doing so. */
323 		um_trace_signals_on();
324 		signals_enabled = 1;
325 	}
326 }
327 
um_set_signals(int enable)328 int um_set_signals(int enable)
329 {
330 	int ret;
331 	if (signals_enabled == enable)
332 		return enable;
333 
334 	ret = signals_enabled;
335 	if (enable)
336 		unblock_signals();
337 	else block_signals();
338 
339 	return ret;
340 }
341 
um_set_signals_trace(int enable)342 int um_set_signals_trace(int enable)
343 {
344 	int ret;
345 	if (signals_enabled == enable)
346 		return enable;
347 
348 	ret = signals_enabled;
349 	if (enable)
350 		unblock_signals_trace();
351 	else
352 		block_signals_trace();
353 
354 	return ret;
355 }
356 
357 #if IS_ENABLED(CONFIG_UML_TIME_TRAVEL_SUPPORT)
mark_sigio_pending(void)358 void mark_sigio_pending(void)
359 {
360 	/*
361 	 * It would seem that this should be atomic so
362 	 * it isn't a read-modify-write with a signal
363 	 * that could happen in the middle, losing the
364 	 * value set by the signal.
365 	 *
366 	 * However, this function is only called when in
367 	 * time-travel=ext simulation mode, in which case
368 	 * the only signal ever pending is SIGIO, which
369 	 * is blocked while this can be called, and the
370 	 * timer signal (SIGALRM) cannot happen.
371 	 */
372 	signals_pending |= SIGIO_MASK;
373 }
374 
block_signals_hard(void)375 void block_signals_hard(void)
376 {
377 	signals_blocked++;
378 	barrier();
379 }
380 
unblock_signals_hard(void)381 void unblock_signals_hard(void)
382 {
383 	static bool unblocking;
384 
385 	if (!signals_blocked)
386 		panic("unblocking signals while not blocked");
387 
388 	if (--signals_blocked)
389 		return;
390 	/*
391 	 * Must be set to 0 before we check pending so the
392 	 * SIGIO handler will run as normal unless we're still
393 	 * going to process signals_blocked_pending.
394 	 */
395 	barrier();
396 
397 	/*
398 	 * Note that block_signals_hard()/unblock_signals_hard() can be called
399 	 * within the unblock_signals()/sigio_run_timetravel_handlers() below.
400 	 * This would still be prone to race conditions since it's actually a
401 	 * call _within_ e.g. vu_req_read_message(), where we observed this
402 	 * issue, which loops. Thus, if the inner call handles the recorded
403 	 * pending signals, we can get out of the inner call with the real
404 	 * signal hander no longer blocked, and still have a race. Thus don't
405 	 * handle unblocking in the inner call, if it happens, but only in
406 	 * the outermost call - 'unblocking' serves as an ownership for the
407 	 * signals_blocked_pending decrement.
408 	 */
409 	if (unblocking)
410 		return;
411 	unblocking = true;
412 
413 	while (__atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) {
414 		if (signals_enabled) {
415 			/* signals are enabled so we can touch this */
416 			signals_pending |= SIGIO_MASK;
417 			/*
418 			 * this is a bit inefficient, but that's
419 			 * not really important
420 			 */
421 			block_signals();
422 			unblock_signals();
423 		} else {
424 			/*
425 			 * we need to run time-travel handlers even
426 			 * if not enabled
427 			 */
428 			sigio_run_timetravel_handlers();
429 		}
430 
431 		/*
432 		 * The decrement of signals_blocked_pending must be atomic so
433 		 * that the signal handler will either happen before or after
434 		 * the decrement, not during a read-modify-write:
435 		 *  - If it happens before, it can increment it and we'll
436 		 *    decrement it and do another round in the loop.
437 		 *  - If it happens after it'll see 0 for both signals_blocked
438 		 *    and signals_blocked_pending and thus run the handler as
439 		 *    usual (subject to signals_enabled, but that's unrelated.)
440 		 *
441 		 * Note that a call to unblock_signals_hard() within the calls
442 		 * to unblock_signals() or sigio_run_timetravel_handlers() above
443 		 * will do nothing due to the 'unblocking' state, so this cannot
444 		 * underflow as the only one decrementing will be the outermost
445 		 * one.
446 		 */
447 		if (__atomic_sub_fetch(&signals_blocked_pending, 1,
448 				       __ATOMIC_SEQ_CST) < 0)
449 			panic("signals_blocked_pending underflow");
450 	}
451 
452 	unblocking = false;
453 }
454 #endif
455