xref: /linux/arch/um/kernel/time.c (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4  * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5  * Copyright (C) 2012-2014 Cisco Systems
6  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7  * Copyright (C) 2019 Intel Corporation
8  */
9 
10 #include <linux/clockchips.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/jiffies.h>
14 #include <linux/mm.h>
15 #include <linux/sched.h>
16 #include <linux/spinlock.h>
17 #include <linux/threads.h>
18 #include <asm/irq.h>
19 #include <asm/param.h>
20 #include <kern_util.h>
21 #include <os.h>
22 #include <linux/time-internal.h>
23 #include <linux/um_timetravel.h>
24 #include <shared/init.h>
25 
26 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
27 enum time_travel_mode time_travel_mode;
28 EXPORT_SYMBOL_GPL(time_travel_mode);
29 
30 static bool time_travel_start_set;
31 static unsigned long long time_travel_start;
32 static unsigned long long time_travel_time;
33 static LIST_HEAD(time_travel_events);
34 static LIST_HEAD(time_travel_irqs);
35 static unsigned long long time_travel_timer_interval;
36 static unsigned long long time_travel_next_event;
37 static struct time_travel_event time_travel_timer_event;
38 static int time_travel_ext_fd = -1;
39 static unsigned int time_travel_ext_waiting;
40 static bool time_travel_ext_prev_request_valid;
41 static unsigned long long time_travel_ext_prev_request;
42 static bool time_travel_ext_free_until_valid;
43 static unsigned long long time_travel_ext_free_until;
44 
45 static void time_travel_set_time(unsigned long long ns)
46 {
47 	if (unlikely(ns < time_travel_time))
48 		panic("time-travel: time goes backwards %lld -> %lld\n",
49 		      time_travel_time, ns);
50 	else if (unlikely(ns >= S64_MAX))
51 		panic("The system was going to sleep forever, aborting");
52 
53 	time_travel_time = ns;
54 }
55 
56 enum time_travel_message_handling {
57 	TTMH_IDLE,
58 	TTMH_POLL,
59 	TTMH_READ,
60 };
61 
62 static void time_travel_handle_message(struct um_timetravel_msg *msg,
63 				       enum time_travel_message_handling mode)
64 {
65 	struct um_timetravel_msg resp = {
66 		.op = UM_TIMETRAVEL_ACK,
67 	};
68 	int ret;
69 
70 	/*
71 	 * We can't unlock here, but interrupt signals with a timetravel_handler
72 	 * (see um_request_irq_tt) get to the timetravel_handler anyway.
73 	 */
74 	if (mode != TTMH_READ) {
75 		BUG_ON(mode == TTMH_IDLE && !irqs_disabled());
76 
77 		while (os_poll(1, &time_travel_ext_fd) != 0) {
78 			/* nothing */
79 		}
80 	}
81 
82 	ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
83 
84 	if (ret == 0)
85 		panic("time-travel external link is broken\n");
86 	if (ret != sizeof(*msg))
87 		panic("invalid time-travel message - %d bytes\n", ret);
88 
89 	switch (msg->op) {
90 	default:
91 		WARN_ONCE(1, "time-travel: unexpected message %lld\n",
92 			  (unsigned long long)msg->op);
93 		break;
94 	case UM_TIMETRAVEL_ACK:
95 		return;
96 	case UM_TIMETRAVEL_RUN:
97 		time_travel_set_time(msg->time);
98 		break;
99 	case UM_TIMETRAVEL_FREE_UNTIL:
100 		time_travel_ext_free_until_valid = true;
101 		time_travel_ext_free_until = msg->time;
102 		break;
103 	}
104 
105 	resp.seq = msg->seq;
106 	os_write_file(time_travel_ext_fd, &resp, sizeof(resp));
107 }
108 
109 static u64 time_travel_ext_req(u32 op, u64 time)
110 {
111 	static int seq;
112 	int mseq = ++seq;
113 	struct um_timetravel_msg msg = {
114 		.op = op,
115 		.time = time,
116 		.seq = mseq,
117 	};
118 
119 	/*
120 	 * We need to block even the timetravel handlers of SIGIO here and
121 	 * only restore their use when we got the ACK - otherwise we may
122 	 * (will) get interrupted by that, try to queue the IRQ for future
123 	 * processing and thus send another request while we're still waiting
124 	 * for an ACK, but the peer doesn't know we got interrupted and will
125 	 * send the ACKs in the same order as the message, but we'd need to
126 	 * see them in the opposite order ...
127 	 *
128 	 * This wouldn't matter *too* much, but some ACKs carry the
129 	 * current time (for UM_TIMETRAVEL_GET) and getting another
130 	 * ACK without a time would confuse us a lot!
131 	 *
132 	 * The sequence number assignment that happens here lets us
133 	 * debug such message handling issues more easily.
134 	 */
135 	block_signals_hard();
136 	os_write_file(time_travel_ext_fd, &msg, sizeof(msg));
137 
138 	while (msg.op != UM_TIMETRAVEL_ACK)
139 		time_travel_handle_message(&msg, TTMH_READ);
140 
141 	if (msg.seq != mseq)
142 		panic("time-travel: ACK message has different seqno! op=%d, seq=%d != %d time=%lld\n",
143 		      msg.op, msg.seq, mseq, msg.time);
144 
145 	if (op == UM_TIMETRAVEL_GET)
146 		time_travel_set_time(msg.time);
147 	unblock_signals_hard();
148 
149 	return msg.time;
150 }
151 
152 void __time_travel_wait_readable(int fd)
153 {
154 	int fds[2] = { fd, time_travel_ext_fd };
155 	int ret;
156 
157 	if (time_travel_mode != TT_MODE_EXTERNAL)
158 		return;
159 
160 	while ((ret = os_poll(2, fds))) {
161 		struct um_timetravel_msg msg;
162 
163 		if (ret == 1)
164 			time_travel_handle_message(&msg, TTMH_READ);
165 	}
166 }
167 EXPORT_SYMBOL_GPL(__time_travel_wait_readable);
168 
169 static void time_travel_ext_update_request(unsigned long long time)
170 {
171 	if (time_travel_mode != TT_MODE_EXTERNAL)
172 		return;
173 
174 	/* asked for exactly this time previously */
175 	if (time_travel_ext_prev_request_valid &&
176 	    time == time_travel_ext_prev_request)
177 		return;
178 
179 	/*
180 	 * if we're running and are allowed to run past the request
181 	 * then we don't need to update it either
182 	 */
183 	if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
184 	    time < time_travel_ext_free_until)
185 		return;
186 
187 	time_travel_ext_prev_request = time;
188 	time_travel_ext_prev_request_valid = true;
189 	time_travel_ext_req(UM_TIMETRAVEL_REQUEST, time);
190 }
191 
192 void __time_travel_propagate_time(void)
193 {
194 	static unsigned long long last_propagated;
195 
196 	if (last_propagated == time_travel_time)
197 		return;
198 
199 	time_travel_ext_req(UM_TIMETRAVEL_UPDATE, time_travel_time);
200 	last_propagated = time_travel_time;
201 }
202 EXPORT_SYMBOL_GPL(__time_travel_propagate_time);
203 
204 /* returns true if we must do a wait to the simtime device */
205 static bool time_travel_ext_request(unsigned long long time)
206 {
207 	/*
208 	 * If we received an external sync point ("free until") then we
209 	 * don't have to request/wait for anything until then, unless
210 	 * we're already waiting.
211 	 */
212 	if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
213 	    time < time_travel_ext_free_until)
214 		return false;
215 
216 	time_travel_ext_update_request(time);
217 	return true;
218 }
219 
220 static void time_travel_ext_wait(bool idle)
221 {
222 	struct um_timetravel_msg msg = {
223 		.op = UM_TIMETRAVEL_ACK,
224 	};
225 
226 	time_travel_ext_prev_request_valid = false;
227 	time_travel_ext_free_until_valid = false;
228 	time_travel_ext_waiting++;
229 
230 	time_travel_ext_req(UM_TIMETRAVEL_WAIT, -1);
231 
232 	/*
233 	 * Here we are deep in the idle loop, so we have to break out of the
234 	 * kernel abstraction in a sense and implement this in terms of the
235 	 * UML system waiting on the VQ interrupt while sleeping, when we get
236 	 * the signal it'll call time_travel_ext_vq_notify_done() completing the
237 	 * call.
238 	 */
239 	while (msg.op != UM_TIMETRAVEL_RUN)
240 		time_travel_handle_message(&msg, idle ? TTMH_IDLE : TTMH_POLL);
241 
242 	time_travel_ext_waiting--;
243 
244 	/* we might request more stuff while polling - reset when we run */
245 	time_travel_ext_prev_request_valid = false;
246 }
247 
248 static void time_travel_ext_get_time(void)
249 {
250 	time_travel_ext_req(UM_TIMETRAVEL_GET, -1);
251 }
252 
253 static void __time_travel_update_time(unsigned long long ns, bool idle)
254 {
255 	if (time_travel_mode == TT_MODE_EXTERNAL && time_travel_ext_request(ns))
256 		time_travel_ext_wait(idle);
257 	else
258 		time_travel_set_time(ns);
259 }
260 
261 static struct time_travel_event *time_travel_first_event(void)
262 {
263 	return list_first_entry_or_null(&time_travel_events,
264 					struct time_travel_event,
265 					list);
266 }
267 
268 static void __time_travel_add_event(struct time_travel_event *e,
269 				    unsigned long long time)
270 {
271 	struct time_travel_event *tmp;
272 	bool inserted = false;
273 	unsigned long flags;
274 
275 	if (e->pending)
276 		return;
277 
278 	e->pending = true;
279 	e->time = time;
280 
281 	local_irq_save(flags);
282 	list_for_each_entry(tmp, &time_travel_events, list) {
283 		/*
284 		 * Add the new entry before one with higher time,
285 		 * or if they're equal and both on stack, because
286 		 * in that case we need to unwind the stack in the
287 		 * right order, and the later event (timer sleep
288 		 * or such) must be dequeued first.
289 		 */
290 		if ((tmp->time > e->time) ||
291 		    (tmp->time == e->time && tmp->onstack && e->onstack)) {
292 			list_add_tail(&e->list, &tmp->list);
293 			inserted = true;
294 			break;
295 		}
296 	}
297 
298 	if (!inserted)
299 		list_add_tail(&e->list, &time_travel_events);
300 
301 	tmp = time_travel_first_event();
302 	time_travel_ext_update_request(tmp->time);
303 	time_travel_next_event = tmp->time;
304 	local_irq_restore(flags);
305 }
306 
307 static void time_travel_add_event(struct time_travel_event *e,
308 				  unsigned long long time)
309 {
310 	if (WARN_ON(!e->fn))
311 		return;
312 
313 	__time_travel_add_event(e, time);
314 }
315 
316 void time_travel_add_event_rel(struct time_travel_event *e,
317 			       unsigned long long delay_ns)
318 {
319 	time_travel_add_event(e, time_travel_time + delay_ns);
320 }
321 
322 void time_travel_periodic_timer(struct time_travel_event *e)
323 {
324 	time_travel_add_event(&time_travel_timer_event,
325 			      time_travel_time + time_travel_timer_interval);
326 	deliver_alarm();
327 }
328 
329 void deliver_time_travel_irqs(void)
330 {
331 	struct time_travel_event *e;
332 	unsigned long flags;
333 
334 	/*
335 	 * Don't do anything for most cases. Note that because here we have
336 	 * to disable IRQs (and re-enable later) we'll actually recurse at
337 	 * the end of the function, so this is strictly necessary.
338 	 */
339 	if (likely(list_empty(&time_travel_irqs)))
340 		return;
341 
342 	local_irq_save(flags);
343 	irq_enter();
344 	while ((e = list_first_entry_or_null(&time_travel_irqs,
345 					     struct time_travel_event,
346 					     list))) {
347 		list_del(&e->list);
348 		e->pending = false;
349 		e->fn(e);
350 	}
351 	irq_exit();
352 	local_irq_restore(flags);
353 }
354 
355 static void time_travel_deliver_event(struct time_travel_event *e)
356 {
357 	if (e == &time_travel_timer_event) {
358 		/*
359 		 * deliver_alarm() does the irq_enter/irq_exit
360 		 * by itself, so must handle it specially here
361 		 */
362 		e->fn(e);
363 	} else if (irqs_disabled()) {
364 		list_add_tail(&e->list, &time_travel_irqs);
365 		/*
366 		 * set pending again, it was set to false when the
367 		 * event was deleted from the original list, but
368 		 * now it's still pending until we deliver the IRQ.
369 		 */
370 		e->pending = true;
371 	} else {
372 		unsigned long flags;
373 
374 		local_irq_save(flags);
375 		irq_enter();
376 		e->fn(e);
377 		irq_exit();
378 		local_irq_restore(flags);
379 	}
380 }
381 
382 bool time_travel_del_event(struct time_travel_event *e)
383 {
384 	unsigned long flags;
385 
386 	if (!e->pending)
387 		return false;
388 	local_irq_save(flags);
389 	list_del(&e->list);
390 	e->pending = false;
391 	local_irq_restore(flags);
392 	return true;
393 }
394 
395 static void time_travel_update_time(unsigned long long next, bool idle)
396 {
397 	struct time_travel_event ne = {
398 		.onstack = true,
399 	};
400 	struct time_travel_event *e;
401 	bool finished = idle;
402 
403 	/* add it without a handler - we deal with that specifically below */
404 	__time_travel_add_event(&ne, next);
405 
406 	do {
407 		e = time_travel_first_event();
408 
409 		BUG_ON(!e);
410 		__time_travel_update_time(e->time, idle);
411 
412 		/* new events may have been inserted while we were waiting */
413 		if (e == time_travel_first_event()) {
414 			BUG_ON(!time_travel_del_event(e));
415 			BUG_ON(time_travel_time != e->time);
416 
417 			if (e == &ne) {
418 				finished = true;
419 			} else {
420 				if (e->onstack)
421 					panic("On-stack event dequeued outside of the stack! time=%lld, event time=%lld, event=%pS\n",
422 					      time_travel_time, e->time, e);
423 				time_travel_deliver_event(e);
424 			}
425 		}
426 
427 		e = time_travel_first_event();
428 		if (e)
429 			time_travel_ext_update_request(e->time);
430 	} while (ne.pending && !finished);
431 
432 	time_travel_del_event(&ne);
433 }
434 
435 static void time_travel_update_time_rel(unsigned long long offs)
436 {
437 	unsigned long flags;
438 
439 	/*
440 	 * Disable interrupts before calculating the new time so
441 	 * that a real timer interrupt (signal) can't happen at
442 	 * a bad time e.g. after we read time_travel_time but
443 	 * before we've completed updating the time.
444 	 */
445 	local_irq_save(flags);
446 	time_travel_update_time(time_travel_time + offs, false);
447 	local_irq_restore(flags);
448 }
449 
450 void time_travel_ndelay(unsigned long nsec)
451 {
452 	/*
453 	 * Not strictly needed to use _rel() version since this is
454 	 * only used in INFCPU/EXT modes, but it doesn't hurt and
455 	 * is more readable too.
456 	 */
457 	time_travel_update_time_rel(nsec);
458 }
459 EXPORT_SYMBOL(time_travel_ndelay);
460 
461 void time_travel_add_irq_event(struct time_travel_event *e)
462 {
463 	BUG_ON(time_travel_mode != TT_MODE_EXTERNAL);
464 
465 	time_travel_ext_get_time();
466 	/*
467 	 * We could model interrupt latency here, for now just
468 	 * don't have any latency at all and request the exact
469 	 * same time (again) to run the interrupt...
470 	 */
471 	time_travel_add_event(e, time_travel_time);
472 }
473 EXPORT_SYMBOL_GPL(time_travel_add_irq_event);
474 
475 static void time_travel_oneshot_timer(struct time_travel_event *e)
476 {
477 	deliver_alarm();
478 }
479 
480 void time_travel_sleep(void)
481 {
482 	/*
483 	 * Wait "forever" (using S64_MAX because there are some potential
484 	 * wrapping issues, especially with the current TT_MODE_EXTERNAL
485 	 * controller application.
486 	 */
487 	unsigned long long next = S64_MAX;
488 
489 	if (time_travel_mode == TT_MODE_BASIC)
490 		os_timer_disable();
491 
492 	time_travel_update_time(next, true);
493 
494 	if (time_travel_mode == TT_MODE_BASIC &&
495 	    time_travel_timer_event.pending) {
496 		if (time_travel_timer_event.fn == time_travel_periodic_timer) {
497 			/*
498 			 * This is somewhat wrong - we should get the first
499 			 * one sooner like the os_timer_one_shot() below...
500 			 */
501 			os_timer_set_interval(time_travel_timer_interval);
502 		} else {
503 			os_timer_one_shot(time_travel_timer_event.time - next);
504 		}
505 	}
506 }
507 
508 static void time_travel_handle_real_alarm(void)
509 {
510 	time_travel_set_time(time_travel_next_event);
511 
512 	time_travel_del_event(&time_travel_timer_event);
513 
514 	if (time_travel_timer_event.fn == time_travel_periodic_timer)
515 		time_travel_add_event(&time_travel_timer_event,
516 				      time_travel_time +
517 				      time_travel_timer_interval);
518 }
519 
520 static void time_travel_set_interval(unsigned long long interval)
521 {
522 	time_travel_timer_interval = interval;
523 }
524 
525 static int time_travel_connect_external(const char *socket)
526 {
527 	const char *sep;
528 	unsigned long long id = (unsigned long long)-1;
529 	int rc;
530 
531 	if ((sep = strchr(socket, ':'))) {
532 		char buf[25] = {};
533 		if (sep - socket > sizeof(buf) - 1)
534 			goto invalid_number;
535 
536 		memcpy(buf, socket, sep - socket);
537 		if (kstrtoull(buf, 0, &id)) {
538 invalid_number:
539 			panic("time-travel: invalid external ID in string '%s'\n",
540 			      socket);
541 			return -EINVAL;
542 		}
543 
544 		socket = sep + 1;
545 	}
546 
547 	rc = os_connect_socket(socket);
548 	if (rc < 0) {
549 		panic("time-travel: failed to connect to external socket %s\n",
550 		      socket);
551 		return rc;
552 	}
553 
554 	time_travel_ext_fd = rc;
555 
556 	time_travel_ext_req(UM_TIMETRAVEL_START, id);
557 
558 	return 1;
559 }
560 
561 static void time_travel_set_start(void)
562 {
563 	if (time_travel_start_set)
564 		return;
565 
566 	switch (time_travel_mode) {
567 	case TT_MODE_EXTERNAL:
568 		time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
569 		/* controller gave us the *current* time, so adjust by that */
570 		time_travel_ext_get_time();
571 		time_travel_start -= time_travel_time;
572 		break;
573 	case TT_MODE_INFCPU:
574 	case TT_MODE_BASIC:
575 		if (!time_travel_start_set)
576 			time_travel_start = os_persistent_clock_emulation();
577 		break;
578 	case TT_MODE_OFF:
579 		/* we just read the host clock with os_persistent_clock_emulation() */
580 		break;
581 	}
582 
583 	time_travel_start_set = true;
584 }
585 #else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
586 #define time_travel_start_set 0
587 #define time_travel_start 0
588 #define time_travel_time 0
589 #define time_travel_ext_waiting 0
590 
591 static inline void time_travel_update_time(unsigned long long ns, bool idle)
592 {
593 }
594 
595 static inline void time_travel_update_time_rel(unsigned long long offs)
596 {
597 }
598 
599 static inline void time_travel_handle_real_alarm(void)
600 {
601 }
602 
603 static void time_travel_set_interval(unsigned long long interval)
604 {
605 }
606 
607 static inline void time_travel_set_start(void)
608 {
609 }
610 
611 /* fail link if this actually gets used */
612 extern u64 time_travel_ext_req(u32 op, u64 time);
613 
614 /* these are empty macros so the struct/fn need not exist */
615 #define time_travel_add_event(e, time) do { } while (0)
616 /* externally not usable - redefine here so we can */
617 #undef time_travel_del_event
618 #define time_travel_del_event(e) do { } while (0)
619 #endif
620 
621 void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
622 {
623 	unsigned long flags;
624 
625 	/*
626 	 * In basic time-travel mode we still get real interrupts
627 	 * (signals) but since we don't read time from the OS, we
628 	 * must update the simulated time here to the expiry when
629 	 * we get a signal.
630 	 * This is not the case in inf-cpu mode, since there we
631 	 * never get any real signals from the OS.
632 	 */
633 	if (time_travel_mode == TT_MODE_BASIC)
634 		time_travel_handle_real_alarm();
635 
636 	local_irq_save(flags);
637 	do_IRQ(TIMER_IRQ, regs);
638 	local_irq_restore(flags);
639 }
640 
641 static int itimer_shutdown(struct clock_event_device *evt)
642 {
643 	if (time_travel_mode != TT_MODE_OFF)
644 		time_travel_del_event(&time_travel_timer_event);
645 
646 	if (time_travel_mode != TT_MODE_INFCPU &&
647 	    time_travel_mode != TT_MODE_EXTERNAL)
648 		os_timer_disable();
649 
650 	return 0;
651 }
652 
653 static int itimer_set_periodic(struct clock_event_device *evt)
654 {
655 	unsigned long long interval = NSEC_PER_SEC / HZ;
656 
657 	if (time_travel_mode != TT_MODE_OFF) {
658 		time_travel_del_event(&time_travel_timer_event);
659 		time_travel_set_event_fn(&time_travel_timer_event,
660 					 time_travel_periodic_timer);
661 		time_travel_set_interval(interval);
662 		time_travel_add_event(&time_travel_timer_event,
663 				      time_travel_time + interval);
664 	}
665 
666 	if (time_travel_mode != TT_MODE_INFCPU &&
667 	    time_travel_mode != TT_MODE_EXTERNAL)
668 		os_timer_set_interval(interval);
669 
670 	return 0;
671 }
672 
673 static int itimer_next_event(unsigned long delta,
674 			     struct clock_event_device *evt)
675 {
676 	delta += 1;
677 
678 	if (time_travel_mode != TT_MODE_OFF) {
679 		time_travel_del_event(&time_travel_timer_event);
680 		time_travel_set_event_fn(&time_travel_timer_event,
681 					 time_travel_oneshot_timer);
682 		time_travel_add_event(&time_travel_timer_event,
683 				      time_travel_time + delta);
684 	}
685 
686 	if (time_travel_mode != TT_MODE_INFCPU &&
687 	    time_travel_mode != TT_MODE_EXTERNAL)
688 		return os_timer_one_shot(delta);
689 
690 	return 0;
691 }
692 
693 static int itimer_one_shot(struct clock_event_device *evt)
694 {
695 	return itimer_next_event(0, evt);
696 }
697 
698 static struct clock_event_device timer_clockevent = {
699 	.name			= "posix-timer",
700 	.rating			= 250,
701 	.cpumask		= cpu_possible_mask,
702 	.features		= CLOCK_EVT_FEAT_PERIODIC |
703 				  CLOCK_EVT_FEAT_ONESHOT,
704 	.set_state_shutdown	= itimer_shutdown,
705 	.set_state_periodic	= itimer_set_periodic,
706 	.set_state_oneshot	= itimer_one_shot,
707 	.set_next_event		= itimer_next_event,
708 	.shift			= 0,
709 	.max_delta_ns		= 0xffffffff,
710 	.max_delta_ticks	= 0xffffffff,
711 	.min_delta_ns		= TIMER_MIN_DELTA,
712 	.min_delta_ticks	= TIMER_MIN_DELTA, // microsecond resolution should be enough for anyone, same as 640K RAM
713 	.irq			= 0,
714 	.mult			= 1,
715 };
716 
717 static irqreturn_t um_timer(int irq, void *dev)
718 {
719 	if (get_current()->mm != NULL)
720 	{
721         /* userspace - relay signal, results in correct userspace timers */
722 		os_alarm_process(get_current()->mm->context.id.u.pid);
723 	}
724 
725 	(*timer_clockevent.event_handler)(&timer_clockevent);
726 
727 	return IRQ_HANDLED;
728 }
729 
730 static u64 timer_read(struct clocksource *cs)
731 {
732 	if (time_travel_mode != TT_MODE_OFF) {
733 		/*
734 		 * We make reading the timer cost a bit so that we don't get
735 		 * stuck in loops that expect time to move more than the
736 		 * exact requested sleep amount, e.g. python's socket server,
737 		 * see https://bugs.python.org/issue37026.
738 		 *
739 		 * However, don't do that when we're in interrupt or such as
740 		 * then we might recurse into our own processing, and get to
741 		 * even more waiting, and that's not good - it messes up the
742 		 * "what do I do next" and onstack event we use to know when
743 		 * to return from time_travel_update_time().
744 		 */
745 		if (!irqs_disabled() && !in_interrupt() && !in_softirq() &&
746 		    !time_travel_ext_waiting)
747 			time_travel_update_time_rel(TIMER_MULTIPLIER);
748 		return time_travel_time / TIMER_MULTIPLIER;
749 	}
750 
751 	return os_nsecs() / TIMER_MULTIPLIER;
752 }
753 
754 static struct clocksource timer_clocksource = {
755 	.name		= "timer",
756 	.rating		= 300,
757 	.read		= timer_read,
758 	.mask		= CLOCKSOURCE_MASK(64),
759 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
760 };
761 
762 static void __init um_timer_setup(void)
763 {
764 	int err;
765 
766 	err = request_irq(TIMER_IRQ, um_timer, IRQF_TIMER, "hr timer", NULL);
767 	if (err != 0)
768 		printk(KERN_ERR "register_timer : request_irq failed - "
769 		       "errno = %d\n", -err);
770 
771 	err = os_timer_create();
772 	if (err != 0) {
773 		printk(KERN_ERR "creation of timer failed - errno = %d\n", -err);
774 		return;
775 	}
776 
777 	err = clocksource_register_hz(&timer_clocksource, NSEC_PER_SEC/TIMER_MULTIPLIER);
778 	if (err) {
779 		printk(KERN_ERR "clocksource_register_hz returned %d\n", err);
780 		return;
781 	}
782 	clockevents_register_device(&timer_clockevent);
783 }
784 
785 void read_persistent_clock64(struct timespec64 *ts)
786 {
787 	long long nsecs;
788 
789 	time_travel_set_start();
790 
791 	if (time_travel_mode != TT_MODE_OFF)
792 		nsecs = time_travel_start + time_travel_time;
793 	else
794 		nsecs = os_persistent_clock_emulation();
795 
796 	set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
797 				  nsecs % NSEC_PER_SEC);
798 }
799 
800 void __init time_init(void)
801 {
802 	timer_set_signal_handler();
803 	late_time_init = um_timer_setup;
804 }
805 
806 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
807 unsigned long calibrate_delay_is_known(void)
808 {
809 	if (time_travel_mode == TT_MODE_INFCPU ||
810 	    time_travel_mode == TT_MODE_EXTERNAL)
811 		return 1;
812 	return 0;
813 }
814 
815 int setup_time_travel(char *str)
816 {
817 	if (strcmp(str, "=inf-cpu") == 0) {
818 		time_travel_mode = TT_MODE_INFCPU;
819 		timer_clockevent.name = "time-travel-timer-infcpu";
820 		timer_clocksource.name = "time-travel-clock";
821 		return 1;
822 	}
823 
824 	if (strncmp(str, "=ext:", 5) == 0) {
825 		time_travel_mode = TT_MODE_EXTERNAL;
826 		timer_clockevent.name = "time-travel-timer-external";
827 		timer_clocksource.name = "time-travel-clock-external";
828 		return time_travel_connect_external(str + 5);
829 	}
830 
831 	if (!*str) {
832 		time_travel_mode = TT_MODE_BASIC;
833 		timer_clockevent.name = "time-travel-timer";
834 		timer_clocksource.name = "time-travel-clock";
835 		return 1;
836 	}
837 
838 	return -EINVAL;
839 }
840 
841 __setup("time-travel", setup_time_travel);
842 __uml_help(setup_time_travel,
843 "time-travel\n"
844 "This option just enables basic time travel mode, in which the clock/timers\n"
845 "inside the UML instance skip forward when there's nothing to do, rather than\n"
846 "waiting for real time to elapse. However, instance CPU speed is limited by\n"
847 "the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
848 "clock (but quicker when there's nothing to do).\n"
849 "\n"
850 "time-travel=inf-cpu\n"
851 "This enables time travel mode with infinite processing power, in which there\n"
852 "are no wall clock timers, and any CPU processing happens - as seen from the\n"
853 "guest - instantly. This can be useful for accurate simulation regardless of\n"
854 "debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
855 "easily lead to getting stuck (e.g. if anything in the system busy loops).\n"
856 "\n"
857 "time-travel=ext:[ID:]/path/to/socket\n"
858 "This enables time travel mode similar to =inf-cpu, except the system will\n"
859 "use the given socket to coordinate with a central scheduler, in order to\n"
860 "have more than one system simultaneously be on simulated time. The virtio\n"
861 "driver code in UML knows about this so you can also simulate networks and\n"
862 "devices using it, assuming the device has the right capabilities.\n"
863 "The optional ID is a 64-bit integer that's sent to the central scheduler.\n");
864 
865 int setup_time_travel_start(char *str)
866 {
867 	int err;
868 
869 	err = kstrtoull(str, 0, &time_travel_start);
870 	if (err)
871 		return err;
872 
873 	time_travel_start_set = 1;
874 	return 1;
875 }
876 
877 __setup("time-travel-start", setup_time_travel_start);
878 __uml_help(setup_time_travel_start,
879 "time-travel-start=<seconds>\n"
880 "Configure the UML instance's wall clock to start at this value rather than\n"
881 "the host's wall clock at the time of UML boot.\n");
882 #endif
883