xref: /linux/arch/um/kernel/time.c (revision 001821b0e79716c4e17c71d8e053a23599a7a508)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4  * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5  * Copyright (C) 2012-2014 Cisco Systems
6  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7  * Copyright (C) 2019 Intel Corporation
8  */
9 
10 #include <linux/clockchips.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/jiffies.h>
14 #include <linux/mm.h>
15 #include <linux/sched.h>
16 #include <linux/spinlock.h>
17 #include <linux/threads.h>
18 #include <asm/irq.h>
19 #include <asm/param.h>
20 #include <kern_util.h>
21 #include <os.h>
22 #include <linux/delay.h>
23 #include <linux/time-internal.h>
24 #include <linux/um_timetravel.h>
25 #include <shared/init.h>
26 
27 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
28 enum time_travel_mode time_travel_mode;
29 EXPORT_SYMBOL_GPL(time_travel_mode);
30 
31 static bool time_travel_start_set;
32 static unsigned long long time_travel_start;
33 static unsigned long long time_travel_time;
34 static LIST_HEAD(time_travel_events);
35 static LIST_HEAD(time_travel_irqs);
36 static unsigned long long time_travel_timer_interval;
37 static unsigned long long time_travel_next_event;
38 static struct time_travel_event time_travel_timer_event;
39 static int time_travel_ext_fd = -1;
40 static unsigned int time_travel_ext_waiting;
41 static bool time_travel_ext_prev_request_valid;
42 static unsigned long long time_travel_ext_prev_request;
43 static bool time_travel_ext_free_until_valid;
44 static unsigned long long time_travel_ext_free_until;
45 
46 static void time_travel_set_time(unsigned long long ns)
47 {
48 	if (unlikely(ns < time_travel_time))
49 		panic("time-travel: time goes backwards %lld -> %lld\n",
50 		      time_travel_time, ns);
51 	else if (unlikely(ns >= S64_MAX))
52 		panic("The system was going to sleep forever, aborting");
53 
54 	time_travel_time = ns;
55 }
56 
57 enum time_travel_message_handling {
58 	TTMH_IDLE,
59 	TTMH_POLL,
60 	TTMH_READ,
61 };
62 
63 static void time_travel_handle_message(struct um_timetravel_msg *msg,
64 				       enum time_travel_message_handling mode)
65 {
66 	struct um_timetravel_msg resp = {
67 		.op = UM_TIMETRAVEL_ACK,
68 	};
69 	int ret;
70 
71 	/*
72 	 * We can't unlock here, but interrupt signals with a timetravel_handler
73 	 * (see um_request_irq_tt) get to the timetravel_handler anyway.
74 	 */
75 	if (mode != TTMH_READ) {
76 		BUG_ON(mode == TTMH_IDLE && !irqs_disabled());
77 
78 		while (os_poll(1, &time_travel_ext_fd) != 0) {
79 			/* nothing */
80 		}
81 	}
82 
83 	ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
84 
85 	if (ret == 0)
86 		panic("time-travel external link is broken\n");
87 	if (ret != sizeof(*msg))
88 		panic("invalid time-travel message - %d bytes\n", ret);
89 
90 	switch (msg->op) {
91 	default:
92 		WARN_ONCE(1, "time-travel: unexpected message %lld\n",
93 			  (unsigned long long)msg->op);
94 		break;
95 	case UM_TIMETRAVEL_ACK:
96 		return;
97 	case UM_TIMETRAVEL_RUN:
98 		time_travel_set_time(msg->time);
99 		break;
100 	case UM_TIMETRAVEL_FREE_UNTIL:
101 		time_travel_ext_free_until_valid = true;
102 		time_travel_ext_free_until = msg->time;
103 		break;
104 	}
105 
106 	resp.seq = msg->seq;
107 	os_write_file(time_travel_ext_fd, &resp, sizeof(resp));
108 }
109 
110 static u64 time_travel_ext_req(u32 op, u64 time)
111 {
112 	static int seq;
113 	int mseq = ++seq;
114 	struct um_timetravel_msg msg = {
115 		.op = op,
116 		.time = time,
117 		.seq = mseq,
118 	};
119 
120 	/*
121 	 * We need to block even the timetravel handlers of SIGIO here and
122 	 * only restore their use when we got the ACK - otherwise we may
123 	 * (will) get interrupted by that, try to queue the IRQ for future
124 	 * processing and thus send another request while we're still waiting
125 	 * for an ACK, but the peer doesn't know we got interrupted and will
126 	 * send the ACKs in the same order as the message, but we'd need to
127 	 * see them in the opposite order ...
128 	 *
129 	 * This wouldn't matter *too* much, but some ACKs carry the
130 	 * current time (for UM_TIMETRAVEL_GET) and getting another
131 	 * ACK without a time would confuse us a lot!
132 	 *
133 	 * The sequence number assignment that happens here lets us
134 	 * debug such message handling issues more easily.
135 	 */
136 	block_signals_hard();
137 	os_write_file(time_travel_ext_fd, &msg, sizeof(msg));
138 
139 	while (msg.op != UM_TIMETRAVEL_ACK)
140 		time_travel_handle_message(&msg, TTMH_READ);
141 
142 	if (msg.seq != mseq)
143 		panic("time-travel: ACK message has different seqno! op=%d, seq=%d != %d time=%lld\n",
144 		      msg.op, msg.seq, mseq, msg.time);
145 
146 	if (op == UM_TIMETRAVEL_GET)
147 		time_travel_set_time(msg.time);
148 	unblock_signals_hard();
149 
150 	return msg.time;
151 }
152 
153 void __time_travel_wait_readable(int fd)
154 {
155 	int fds[2] = { fd, time_travel_ext_fd };
156 	int ret;
157 
158 	if (time_travel_mode != TT_MODE_EXTERNAL)
159 		return;
160 
161 	while ((ret = os_poll(2, fds))) {
162 		struct um_timetravel_msg msg;
163 
164 		if (ret == 1)
165 			time_travel_handle_message(&msg, TTMH_READ);
166 	}
167 }
168 EXPORT_SYMBOL_GPL(__time_travel_wait_readable);
169 
170 static void time_travel_ext_update_request(unsigned long long time)
171 {
172 	if (time_travel_mode != TT_MODE_EXTERNAL)
173 		return;
174 
175 	/* asked for exactly this time previously */
176 	if (time_travel_ext_prev_request_valid &&
177 	    time == time_travel_ext_prev_request)
178 		return;
179 
180 	/*
181 	 * if we're running and are allowed to run past the request
182 	 * then we don't need to update it either
183 	 */
184 	if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
185 	    time < time_travel_ext_free_until)
186 		return;
187 
188 	time_travel_ext_prev_request = time;
189 	time_travel_ext_prev_request_valid = true;
190 	time_travel_ext_req(UM_TIMETRAVEL_REQUEST, time);
191 }
192 
193 void __time_travel_propagate_time(void)
194 {
195 	static unsigned long long last_propagated;
196 
197 	if (last_propagated == time_travel_time)
198 		return;
199 
200 	time_travel_ext_req(UM_TIMETRAVEL_UPDATE, time_travel_time);
201 	last_propagated = time_travel_time;
202 }
203 EXPORT_SYMBOL_GPL(__time_travel_propagate_time);
204 
205 /* returns true if we must do a wait to the simtime device */
206 static bool time_travel_ext_request(unsigned long long time)
207 {
208 	/*
209 	 * If we received an external sync point ("free until") then we
210 	 * don't have to request/wait for anything until then, unless
211 	 * we're already waiting.
212 	 */
213 	if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
214 	    time < time_travel_ext_free_until)
215 		return false;
216 
217 	time_travel_ext_update_request(time);
218 	return true;
219 }
220 
221 static void time_travel_ext_wait(bool idle)
222 {
223 	struct um_timetravel_msg msg = {
224 		.op = UM_TIMETRAVEL_ACK,
225 	};
226 
227 	time_travel_ext_prev_request_valid = false;
228 	time_travel_ext_free_until_valid = false;
229 	time_travel_ext_waiting++;
230 
231 	time_travel_ext_req(UM_TIMETRAVEL_WAIT, -1);
232 
233 	/*
234 	 * Here we are deep in the idle loop, so we have to break out of the
235 	 * kernel abstraction in a sense and implement this in terms of the
236 	 * UML system waiting on the VQ interrupt while sleeping, when we get
237 	 * the signal it'll call time_travel_ext_vq_notify_done() completing the
238 	 * call.
239 	 */
240 	while (msg.op != UM_TIMETRAVEL_RUN)
241 		time_travel_handle_message(&msg, idle ? TTMH_IDLE : TTMH_POLL);
242 
243 	time_travel_ext_waiting--;
244 
245 	/* we might request more stuff while polling - reset when we run */
246 	time_travel_ext_prev_request_valid = false;
247 }
248 
249 static void time_travel_ext_get_time(void)
250 {
251 	time_travel_ext_req(UM_TIMETRAVEL_GET, -1);
252 }
253 
254 static void __time_travel_update_time(unsigned long long ns, bool idle)
255 {
256 	if (time_travel_mode == TT_MODE_EXTERNAL && time_travel_ext_request(ns))
257 		time_travel_ext_wait(idle);
258 	else
259 		time_travel_set_time(ns);
260 }
261 
262 static struct time_travel_event *time_travel_first_event(void)
263 {
264 	return list_first_entry_or_null(&time_travel_events,
265 					struct time_travel_event,
266 					list);
267 }
268 
269 static void __time_travel_add_event(struct time_travel_event *e,
270 				    unsigned long long time)
271 {
272 	struct time_travel_event *tmp;
273 	bool inserted = false;
274 	unsigned long flags;
275 
276 	if (e->pending)
277 		return;
278 
279 	e->pending = true;
280 	e->time = time;
281 
282 	local_irq_save(flags);
283 	list_for_each_entry(tmp, &time_travel_events, list) {
284 		/*
285 		 * Add the new entry before one with higher time,
286 		 * or if they're equal and both on stack, because
287 		 * in that case we need to unwind the stack in the
288 		 * right order, and the later event (timer sleep
289 		 * or such) must be dequeued first.
290 		 */
291 		if ((tmp->time > e->time) ||
292 		    (tmp->time == e->time && tmp->onstack && e->onstack)) {
293 			list_add_tail(&e->list, &tmp->list);
294 			inserted = true;
295 			break;
296 		}
297 	}
298 
299 	if (!inserted)
300 		list_add_tail(&e->list, &time_travel_events);
301 
302 	tmp = time_travel_first_event();
303 	time_travel_ext_update_request(tmp->time);
304 	time_travel_next_event = tmp->time;
305 	local_irq_restore(flags);
306 }
307 
308 static void time_travel_add_event(struct time_travel_event *e,
309 				  unsigned long long time)
310 {
311 	if (WARN_ON(!e->fn))
312 		return;
313 
314 	__time_travel_add_event(e, time);
315 }
316 
317 void time_travel_add_event_rel(struct time_travel_event *e,
318 			       unsigned long long delay_ns)
319 {
320 	time_travel_add_event(e, time_travel_time + delay_ns);
321 }
322 
323 static void time_travel_periodic_timer(struct time_travel_event *e)
324 {
325 	time_travel_add_event(&time_travel_timer_event,
326 			      time_travel_time + time_travel_timer_interval);
327 	deliver_alarm();
328 }
329 
330 void deliver_time_travel_irqs(void)
331 {
332 	struct time_travel_event *e;
333 	unsigned long flags;
334 
335 	/*
336 	 * Don't do anything for most cases. Note that because here we have
337 	 * to disable IRQs (and re-enable later) we'll actually recurse at
338 	 * the end of the function, so this is strictly necessary.
339 	 */
340 	if (likely(list_empty(&time_travel_irqs)))
341 		return;
342 
343 	local_irq_save(flags);
344 	irq_enter();
345 	while ((e = list_first_entry_or_null(&time_travel_irqs,
346 					     struct time_travel_event,
347 					     list))) {
348 		list_del(&e->list);
349 		e->pending = false;
350 		e->fn(e);
351 	}
352 	irq_exit();
353 	local_irq_restore(flags);
354 }
355 
356 static void time_travel_deliver_event(struct time_travel_event *e)
357 {
358 	if (e == &time_travel_timer_event) {
359 		/*
360 		 * deliver_alarm() does the irq_enter/irq_exit
361 		 * by itself, so must handle it specially here
362 		 */
363 		e->fn(e);
364 	} else if (irqs_disabled()) {
365 		list_add_tail(&e->list, &time_travel_irqs);
366 		/*
367 		 * set pending again, it was set to false when the
368 		 * event was deleted from the original list, but
369 		 * now it's still pending until we deliver the IRQ.
370 		 */
371 		e->pending = true;
372 	} else {
373 		unsigned long flags;
374 
375 		local_irq_save(flags);
376 		irq_enter();
377 		e->fn(e);
378 		irq_exit();
379 		local_irq_restore(flags);
380 	}
381 }
382 
383 bool time_travel_del_event(struct time_travel_event *e)
384 {
385 	unsigned long flags;
386 
387 	if (!e->pending)
388 		return false;
389 	local_irq_save(flags);
390 	list_del(&e->list);
391 	e->pending = false;
392 	local_irq_restore(flags);
393 	return true;
394 }
395 
396 static void time_travel_update_time(unsigned long long next, bool idle)
397 {
398 	struct time_travel_event ne = {
399 		.onstack = true,
400 	};
401 	struct time_travel_event *e;
402 	bool finished = idle;
403 
404 	/* add it without a handler - we deal with that specifically below */
405 	__time_travel_add_event(&ne, next);
406 
407 	do {
408 		e = time_travel_first_event();
409 
410 		BUG_ON(!e);
411 		__time_travel_update_time(e->time, idle);
412 
413 		/* new events may have been inserted while we were waiting */
414 		if (e == time_travel_first_event()) {
415 			BUG_ON(!time_travel_del_event(e));
416 			BUG_ON(time_travel_time != e->time);
417 
418 			if (e == &ne) {
419 				finished = true;
420 			} else {
421 				if (e->onstack)
422 					panic("On-stack event dequeued outside of the stack! time=%lld, event time=%lld, event=%pS\n",
423 					      time_travel_time, e->time, e);
424 				time_travel_deliver_event(e);
425 			}
426 		}
427 
428 		e = time_travel_first_event();
429 		if (e)
430 			time_travel_ext_update_request(e->time);
431 	} while (ne.pending && !finished);
432 
433 	time_travel_del_event(&ne);
434 }
435 
436 static void time_travel_update_time_rel(unsigned long long offs)
437 {
438 	unsigned long flags;
439 
440 	/*
441 	 * Disable interrupts before calculating the new time so
442 	 * that a real timer interrupt (signal) can't happen at
443 	 * a bad time e.g. after we read time_travel_time but
444 	 * before we've completed updating the time.
445 	 */
446 	local_irq_save(flags);
447 	time_travel_update_time(time_travel_time + offs, false);
448 	local_irq_restore(flags);
449 }
450 
451 void time_travel_ndelay(unsigned long nsec)
452 {
453 	/*
454 	 * Not strictly needed to use _rel() version since this is
455 	 * only used in INFCPU/EXT modes, but it doesn't hurt and
456 	 * is more readable too.
457 	 */
458 	time_travel_update_time_rel(nsec);
459 }
460 EXPORT_SYMBOL(time_travel_ndelay);
461 
462 void time_travel_add_irq_event(struct time_travel_event *e)
463 {
464 	BUG_ON(time_travel_mode != TT_MODE_EXTERNAL);
465 
466 	time_travel_ext_get_time();
467 	/*
468 	 * We could model interrupt latency here, for now just
469 	 * don't have any latency at all and request the exact
470 	 * same time (again) to run the interrupt...
471 	 */
472 	time_travel_add_event(e, time_travel_time);
473 }
474 EXPORT_SYMBOL_GPL(time_travel_add_irq_event);
475 
476 static void time_travel_oneshot_timer(struct time_travel_event *e)
477 {
478 	deliver_alarm();
479 }
480 
481 void time_travel_sleep(void)
482 {
483 	/*
484 	 * Wait "forever" (using S64_MAX because there are some potential
485 	 * wrapping issues, especially with the current TT_MODE_EXTERNAL
486 	 * controller application.
487 	 */
488 	unsigned long long next = S64_MAX;
489 
490 	if (time_travel_mode == TT_MODE_BASIC)
491 		os_timer_disable();
492 
493 	time_travel_update_time(next, true);
494 
495 	if (time_travel_mode == TT_MODE_BASIC &&
496 	    time_travel_timer_event.pending) {
497 		if (time_travel_timer_event.fn == time_travel_periodic_timer) {
498 			/*
499 			 * This is somewhat wrong - we should get the first
500 			 * one sooner like the os_timer_one_shot() below...
501 			 */
502 			os_timer_set_interval(time_travel_timer_interval);
503 		} else {
504 			os_timer_one_shot(time_travel_timer_event.time - next);
505 		}
506 	}
507 }
508 
509 static void time_travel_handle_real_alarm(void)
510 {
511 	time_travel_set_time(time_travel_next_event);
512 
513 	time_travel_del_event(&time_travel_timer_event);
514 
515 	if (time_travel_timer_event.fn == time_travel_periodic_timer)
516 		time_travel_add_event(&time_travel_timer_event,
517 				      time_travel_time +
518 				      time_travel_timer_interval);
519 }
520 
521 static void time_travel_set_interval(unsigned long long interval)
522 {
523 	time_travel_timer_interval = interval;
524 }
525 
526 static int time_travel_connect_external(const char *socket)
527 {
528 	const char *sep;
529 	unsigned long long id = (unsigned long long)-1;
530 	int rc;
531 
532 	if ((sep = strchr(socket, ':'))) {
533 		char buf[25] = {};
534 		if (sep - socket > sizeof(buf) - 1)
535 			goto invalid_number;
536 
537 		memcpy(buf, socket, sep - socket);
538 		if (kstrtoull(buf, 0, &id)) {
539 invalid_number:
540 			panic("time-travel: invalid external ID in string '%s'\n",
541 			      socket);
542 			return -EINVAL;
543 		}
544 
545 		socket = sep + 1;
546 	}
547 
548 	rc = os_connect_socket(socket);
549 	if (rc < 0) {
550 		panic("time-travel: failed to connect to external socket %s\n",
551 		      socket);
552 		return rc;
553 	}
554 
555 	time_travel_ext_fd = rc;
556 
557 	time_travel_ext_req(UM_TIMETRAVEL_START, id);
558 
559 	return 1;
560 }
561 
562 static void time_travel_set_start(void)
563 {
564 	if (time_travel_start_set)
565 		return;
566 
567 	switch (time_travel_mode) {
568 	case TT_MODE_EXTERNAL:
569 		time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
570 		/* controller gave us the *current* time, so adjust by that */
571 		time_travel_ext_get_time();
572 		time_travel_start -= time_travel_time;
573 		break;
574 	case TT_MODE_INFCPU:
575 	case TT_MODE_BASIC:
576 		if (!time_travel_start_set)
577 			time_travel_start = os_persistent_clock_emulation();
578 		break;
579 	case TT_MODE_OFF:
580 		/* we just read the host clock with os_persistent_clock_emulation() */
581 		break;
582 	}
583 
584 	time_travel_start_set = true;
585 }
586 #else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
587 #define time_travel_start_set 0
588 #define time_travel_start 0
589 #define time_travel_time 0
590 #define time_travel_ext_waiting 0
591 
592 static inline void time_travel_update_time(unsigned long long ns, bool idle)
593 {
594 }
595 
596 static inline void time_travel_update_time_rel(unsigned long long offs)
597 {
598 }
599 
600 static inline void time_travel_handle_real_alarm(void)
601 {
602 }
603 
604 static void time_travel_set_interval(unsigned long long interval)
605 {
606 }
607 
608 static inline void time_travel_set_start(void)
609 {
610 }
611 
612 /* fail link if this actually gets used */
613 extern u64 time_travel_ext_req(u32 op, u64 time);
614 
615 /* these are empty macros so the struct/fn need not exist */
616 #define time_travel_add_event(e, time) do { } while (0)
617 /* externally not usable - redefine here so we can */
618 #undef time_travel_del_event
619 #define time_travel_del_event(e) do { } while (0)
620 #endif
621 
622 void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
623 {
624 	unsigned long flags;
625 
626 	/*
627 	 * In basic time-travel mode we still get real interrupts
628 	 * (signals) but since we don't read time from the OS, we
629 	 * must update the simulated time here to the expiry when
630 	 * we get a signal.
631 	 * This is not the case in inf-cpu mode, since there we
632 	 * never get any real signals from the OS.
633 	 */
634 	if (time_travel_mode == TT_MODE_BASIC)
635 		time_travel_handle_real_alarm();
636 
637 	local_irq_save(flags);
638 	do_IRQ(TIMER_IRQ, regs);
639 	local_irq_restore(flags);
640 }
641 
642 static int itimer_shutdown(struct clock_event_device *evt)
643 {
644 	if (time_travel_mode != TT_MODE_OFF)
645 		time_travel_del_event(&time_travel_timer_event);
646 
647 	if (time_travel_mode != TT_MODE_INFCPU &&
648 	    time_travel_mode != TT_MODE_EXTERNAL)
649 		os_timer_disable();
650 
651 	return 0;
652 }
653 
654 static int itimer_set_periodic(struct clock_event_device *evt)
655 {
656 	unsigned long long interval = NSEC_PER_SEC / HZ;
657 
658 	if (time_travel_mode != TT_MODE_OFF) {
659 		time_travel_del_event(&time_travel_timer_event);
660 		time_travel_set_event_fn(&time_travel_timer_event,
661 					 time_travel_periodic_timer);
662 		time_travel_set_interval(interval);
663 		time_travel_add_event(&time_travel_timer_event,
664 				      time_travel_time + interval);
665 	}
666 
667 	if (time_travel_mode != TT_MODE_INFCPU &&
668 	    time_travel_mode != TT_MODE_EXTERNAL)
669 		os_timer_set_interval(interval);
670 
671 	return 0;
672 }
673 
674 static int itimer_next_event(unsigned long delta,
675 			     struct clock_event_device *evt)
676 {
677 	delta += 1;
678 
679 	if (time_travel_mode != TT_MODE_OFF) {
680 		time_travel_del_event(&time_travel_timer_event);
681 		time_travel_set_event_fn(&time_travel_timer_event,
682 					 time_travel_oneshot_timer);
683 		time_travel_add_event(&time_travel_timer_event,
684 				      time_travel_time + delta);
685 	}
686 
687 	if (time_travel_mode != TT_MODE_INFCPU &&
688 	    time_travel_mode != TT_MODE_EXTERNAL)
689 		return os_timer_one_shot(delta);
690 
691 	return 0;
692 }
693 
694 static int itimer_one_shot(struct clock_event_device *evt)
695 {
696 	return itimer_next_event(0, evt);
697 }
698 
699 static struct clock_event_device timer_clockevent = {
700 	.name			= "posix-timer",
701 	.rating			= 250,
702 	.cpumask		= cpu_possible_mask,
703 	.features		= CLOCK_EVT_FEAT_PERIODIC |
704 				  CLOCK_EVT_FEAT_ONESHOT,
705 	.set_state_shutdown	= itimer_shutdown,
706 	.set_state_periodic	= itimer_set_periodic,
707 	.set_state_oneshot	= itimer_one_shot,
708 	.set_next_event		= itimer_next_event,
709 	.shift			= 0,
710 	.max_delta_ns		= 0xffffffff,
711 	.max_delta_ticks	= 0xffffffff,
712 	.min_delta_ns		= TIMER_MIN_DELTA,
713 	.min_delta_ticks	= TIMER_MIN_DELTA, // microsecond resolution should be enough for anyone, same as 640K RAM
714 	.irq			= 0,
715 	.mult			= 1,
716 };
717 
718 static irqreturn_t um_timer(int irq, void *dev)
719 {
720 	if (get_current()->mm != NULL)
721 	{
722         /* userspace - relay signal, results in correct userspace timers */
723 		os_alarm_process(get_current()->mm->context.id.u.pid);
724 	}
725 
726 	(*timer_clockevent.event_handler)(&timer_clockevent);
727 
728 	return IRQ_HANDLED;
729 }
730 
731 static u64 timer_read(struct clocksource *cs)
732 {
733 	if (time_travel_mode != TT_MODE_OFF) {
734 		/*
735 		 * We make reading the timer cost a bit so that we don't get
736 		 * stuck in loops that expect time to move more than the
737 		 * exact requested sleep amount, e.g. python's socket server,
738 		 * see https://bugs.python.org/issue37026.
739 		 *
740 		 * However, don't do that when we're in interrupt or such as
741 		 * then we might recurse into our own processing, and get to
742 		 * even more waiting, and that's not good - it messes up the
743 		 * "what do I do next" and onstack event we use to know when
744 		 * to return from time_travel_update_time().
745 		 */
746 		if (!irqs_disabled() && !in_interrupt() && !in_softirq() &&
747 		    !time_travel_ext_waiting)
748 			time_travel_update_time_rel(TIMER_MULTIPLIER);
749 		return time_travel_time / TIMER_MULTIPLIER;
750 	}
751 
752 	return os_nsecs() / TIMER_MULTIPLIER;
753 }
754 
755 static struct clocksource timer_clocksource = {
756 	.name		= "timer",
757 	.rating		= 300,
758 	.read		= timer_read,
759 	.mask		= CLOCKSOURCE_MASK(64),
760 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
761 };
762 
763 static void __init um_timer_setup(void)
764 {
765 	int err;
766 
767 	err = request_irq(TIMER_IRQ, um_timer, IRQF_TIMER, "hr timer", NULL);
768 	if (err != 0)
769 		printk(KERN_ERR "register_timer : request_irq failed - "
770 		       "errno = %d\n", -err);
771 
772 	err = os_timer_create();
773 	if (err != 0) {
774 		printk(KERN_ERR "creation of timer failed - errno = %d\n", -err);
775 		return;
776 	}
777 
778 	err = clocksource_register_hz(&timer_clocksource, NSEC_PER_SEC/TIMER_MULTIPLIER);
779 	if (err) {
780 		printk(KERN_ERR "clocksource_register_hz returned %d\n", err);
781 		return;
782 	}
783 	clockevents_register_device(&timer_clockevent);
784 }
785 
786 void read_persistent_clock64(struct timespec64 *ts)
787 {
788 	long long nsecs;
789 
790 	time_travel_set_start();
791 
792 	if (time_travel_mode != TT_MODE_OFF)
793 		nsecs = time_travel_start + time_travel_time;
794 	else
795 		nsecs = os_persistent_clock_emulation();
796 
797 	set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
798 				  nsecs % NSEC_PER_SEC);
799 }
800 
801 void __init time_init(void)
802 {
803 	timer_set_signal_handler();
804 	late_time_init = um_timer_setup;
805 }
806 
807 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
808 unsigned long calibrate_delay_is_known(void)
809 {
810 	if (time_travel_mode == TT_MODE_INFCPU ||
811 	    time_travel_mode == TT_MODE_EXTERNAL)
812 		return 1;
813 	return 0;
814 }
815 
816 static int setup_time_travel(char *str)
817 {
818 	if (strcmp(str, "=inf-cpu") == 0) {
819 		time_travel_mode = TT_MODE_INFCPU;
820 		timer_clockevent.name = "time-travel-timer-infcpu";
821 		timer_clocksource.name = "time-travel-clock";
822 		return 1;
823 	}
824 
825 	if (strncmp(str, "=ext:", 5) == 0) {
826 		time_travel_mode = TT_MODE_EXTERNAL;
827 		timer_clockevent.name = "time-travel-timer-external";
828 		timer_clocksource.name = "time-travel-clock-external";
829 		return time_travel_connect_external(str + 5);
830 	}
831 
832 	if (!*str) {
833 		time_travel_mode = TT_MODE_BASIC;
834 		timer_clockevent.name = "time-travel-timer";
835 		timer_clocksource.name = "time-travel-clock";
836 		return 1;
837 	}
838 
839 	return -EINVAL;
840 }
841 
842 __setup("time-travel", setup_time_travel);
843 __uml_help(setup_time_travel,
844 "time-travel\n"
845 "This option just enables basic time travel mode, in which the clock/timers\n"
846 "inside the UML instance skip forward when there's nothing to do, rather than\n"
847 "waiting for real time to elapse. However, instance CPU speed is limited by\n"
848 "the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
849 "clock (but quicker when there's nothing to do).\n"
850 "\n"
851 "time-travel=inf-cpu\n"
852 "This enables time travel mode with infinite processing power, in which there\n"
853 "are no wall clock timers, and any CPU processing happens - as seen from the\n"
854 "guest - instantly. This can be useful for accurate simulation regardless of\n"
855 "debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
856 "easily lead to getting stuck (e.g. if anything in the system busy loops).\n"
857 "\n"
858 "time-travel=ext:[ID:]/path/to/socket\n"
859 "This enables time travel mode similar to =inf-cpu, except the system will\n"
860 "use the given socket to coordinate with a central scheduler, in order to\n"
861 "have more than one system simultaneously be on simulated time. The virtio\n"
862 "driver code in UML knows about this so you can also simulate networks and\n"
863 "devices using it, assuming the device has the right capabilities.\n"
864 "The optional ID is a 64-bit integer that's sent to the central scheduler.\n");
865 
866 static int setup_time_travel_start(char *str)
867 {
868 	int err;
869 
870 	err = kstrtoull(str, 0, &time_travel_start);
871 	if (err)
872 		return err;
873 
874 	time_travel_start_set = 1;
875 	return 1;
876 }
877 
878 __setup("time-travel-start", setup_time_travel_start);
879 __uml_help(setup_time_travel_start,
880 "time-travel-start=<seconds>\n"
881 "Configure the UML instance's wall clock to start at this value rather than\n"
882 "the host's wall clock at the time of UML boot.\n");
883 #endif
884