xref: /linux/arch/x86/xen/time.c (revision fd639726bf15fca8ee1a00dce8e0096d0ad9bd18)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xen time implementation.
4  *
5  * This is implemented in terms of a clocksource driver which uses
6  * the hypervisor clock as a nanosecond timebase, and a clockevent
7  * driver which uses the hypervisor's timer mechanism.
8  *
9  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
10  */
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/clocksource.h>
14 #include <linux/clockchips.h>
15 #include <linux/gfp.h>
16 #include <linux/slab.h>
17 #include <linux/pvclock_gtod.h>
18 #include <linux/timekeeper_internal.h>
19 
20 #include <asm/pvclock.h>
21 #include <asm/xen/hypervisor.h>
22 #include <asm/xen/hypercall.h>
23 
24 #include <xen/events.h>
25 #include <xen/features.h>
26 #include <xen/interface/xen.h>
27 #include <xen/interface/vcpu.h>
28 
29 #include "xen-ops.h"
30 
31 /* Xen may fire a timer up to this many ns early */
32 #define TIMER_SLOP	100000
33 
34 /* Get the TSC speed from Xen */
35 static unsigned long xen_tsc_khz(void)
36 {
37 	struct pvclock_vcpu_time_info *info =
38 		&HYPERVISOR_shared_info->vcpu_info[0].time;
39 
40 	return pvclock_tsc_khz(info);
41 }
42 
43 u64 xen_clocksource_read(void)
44 {
45         struct pvclock_vcpu_time_info *src;
46 	u64 ret;
47 
48 	preempt_disable_notrace();
49 	src = &__this_cpu_read(xen_vcpu)->time;
50 	ret = pvclock_clocksource_read(src);
51 	preempt_enable_notrace();
52 	return ret;
53 }
54 
55 static u64 xen_clocksource_get_cycles(struct clocksource *cs)
56 {
57 	return xen_clocksource_read();
58 }
59 
60 static void xen_read_wallclock(struct timespec *ts)
61 {
62 	struct shared_info *s = HYPERVISOR_shared_info;
63 	struct pvclock_wall_clock *wall_clock = &(s->wc);
64         struct pvclock_vcpu_time_info *vcpu_time;
65 
66 	vcpu_time = &get_cpu_var(xen_vcpu)->time;
67 	pvclock_read_wallclock(wall_clock, vcpu_time, ts);
68 	put_cpu_var(xen_vcpu);
69 }
70 
71 static void xen_get_wallclock(struct timespec *now)
72 {
73 	xen_read_wallclock(now);
74 }
75 
76 static int xen_set_wallclock(const struct timespec *now)
77 {
78 	return -ENODEV;
79 }
80 
81 static int xen_pvclock_gtod_notify(struct notifier_block *nb,
82 				   unsigned long was_set, void *priv)
83 {
84 	/* Protected by the calling core code serialization */
85 	static struct timespec64 next_sync;
86 
87 	struct xen_platform_op op;
88 	struct timespec64 now;
89 	struct timekeeper *tk = priv;
90 	static bool settime64_supported = true;
91 	int ret;
92 
93 	now.tv_sec = tk->xtime_sec;
94 	now.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
95 
96 	/*
97 	 * We only take the expensive HV call when the clock was set
98 	 * or when the 11 minutes RTC synchronization time elapsed.
99 	 */
100 	if (!was_set && timespec64_compare(&now, &next_sync) < 0)
101 		return NOTIFY_OK;
102 
103 again:
104 	if (settime64_supported) {
105 		op.cmd = XENPF_settime64;
106 		op.u.settime64.mbz = 0;
107 		op.u.settime64.secs = now.tv_sec;
108 		op.u.settime64.nsecs = now.tv_nsec;
109 		op.u.settime64.system_time = xen_clocksource_read();
110 	} else {
111 		op.cmd = XENPF_settime32;
112 		op.u.settime32.secs = now.tv_sec;
113 		op.u.settime32.nsecs = now.tv_nsec;
114 		op.u.settime32.system_time = xen_clocksource_read();
115 	}
116 
117 	ret = HYPERVISOR_platform_op(&op);
118 
119 	if (ret == -ENOSYS && settime64_supported) {
120 		settime64_supported = false;
121 		goto again;
122 	}
123 	if (ret < 0)
124 		return NOTIFY_BAD;
125 
126 	/*
127 	 * Move the next drift compensation time 11 minutes
128 	 * ahead. That's emulating the sync_cmos_clock() update for
129 	 * the hardware RTC.
130 	 */
131 	next_sync = now;
132 	next_sync.tv_sec += 11 * 60;
133 
134 	return NOTIFY_OK;
135 }
136 
137 static struct notifier_block xen_pvclock_gtod_notifier = {
138 	.notifier_call = xen_pvclock_gtod_notify,
139 };
140 
141 static struct clocksource xen_clocksource __read_mostly = {
142 	.name = "xen",
143 	.rating = 400,
144 	.read = xen_clocksource_get_cycles,
145 	.mask = ~0,
146 	.flags = CLOCK_SOURCE_IS_CONTINUOUS,
147 };
148 
149 /*
150    Xen clockevent implementation
151 
152    Xen has two clockevent implementations:
153 
154    The old timer_op one works with all released versions of Xen prior
155    to version 3.0.4.  This version of the hypervisor provides a
156    single-shot timer with nanosecond resolution.  However, sharing the
157    same event channel is a 100Hz tick which is delivered while the
158    vcpu is running.  We don't care about or use this tick, but it will
159    cause the core time code to think the timer fired too soon, and
160    will end up resetting it each time.  It could be filtered, but
161    doing so has complications when the ktime clocksource is not yet
162    the xen clocksource (ie, at boot time).
163 
164    The new vcpu_op-based timer interface allows the tick timer period
165    to be changed or turned off.  The tick timer is not useful as a
166    periodic timer because events are only delivered to running vcpus.
167    The one-shot timer can report when a timeout is in the past, so
168    set_next_event is capable of returning -ETIME when appropriate.
169    This interface is used when available.
170 */
171 
172 
173 /*
174   Get a hypervisor absolute time.  In theory we could maintain an
175   offset between the kernel's time and the hypervisor's time, and
176   apply that to a kernel's absolute timeout.  Unfortunately the
177   hypervisor and kernel times can drift even if the kernel is using
178   the Xen clocksource, because ntp can warp the kernel's clocksource.
179 */
180 static s64 get_abs_timeout(unsigned long delta)
181 {
182 	return xen_clocksource_read() + delta;
183 }
184 
185 static int xen_timerop_shutdown(struct clock_event_device *evt)
186 {
187 	/* cancel timeout */
188 	HYPERVISOR_set_timer_op(0);
189 
190 	return 0;
191 }
192 
193 static int xen_timerop_set_next_event(unsigned long delta,
194 				      struct clock_event_device *evt)
195 {
196 	WARN_ON(!clockevent_state_oneshot(evt));
197 
198 	if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
199 		BUG();
200 
201 	/* We may have missed the deadline, but there's no real way of
202 	   knowing for sure.  If the event was in the past, then we'll
203 	   get an immediate interrupt. */
204 
205 	return 0;
206 }
207 
208 static const struct clock_event_device xen_timerop_clockevent = {
209 	.name			= "xen",
210 	.features		= CLOCK_EVT_FEAT_ONESHOT,
211 
212 	.max_delta_ns		= 0xffffffff,
213 	.max_delta_ticks	= 0xffffffff,
214 	.min_delta_ns		= TIMER_SLOP,
215 	.min_delta_ticks	= TIMER_SLOP,
216 
217 	.mult			= 1,
218 	.shift			= 0,
219 	.rating			= 500,
220 
221 	.set_state_shutdown	= xen_timerop_shutdown,
222 	.set_next_event		= xen_timerop_set_next_event,
223 };
224 
225 static int xen_vcpuop_shutdown(struct clock_event_device *evt)
226 {
227 	int cpu = smp_processor_id();
228 
229 	if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, xen_vcpu_nr(cpu),
230 			       NULL) ||
231 	    HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
232 			       NULL))
233 		BUG();
234 
235 	return 0;
236 }
237 
238 static int xen_vcpuop_set_oneshot(struct clock_event_device *evt)
239 {
240 	int cpu = smp_processor_id();
241 
242 	if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
243 			       NULL))
244 		BUG();
245 
246 	return 0;
247 }
248 
249 static int xen_vcpuop_set_next_event(unsigned long delta,
250 				     struct clock_event_device *evt)
251 {
252 	int cpu = smp_processor_id();
253 	struct vcpu_set_singleshot_timer single;
254 	int ret;
255 
256 	WARN_ON(!clockevent_state_oneshot(evt));
257 
258 	single.timeout_abs_ns = get_abs_timeout(delta);
259 	/* Get an event anyway, even if the timeout is already expired */
260 	single.flags = 0;
261 
262 	ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, xen_vcpu_nr(cpu),
263 				 &single);
264 	BUG_ON(ret != 0);
265 
266 	return ret;
267 }
268 
269 static const struct clock_event_device xen_vcpuop_clockevent = {
270 	.name = "xen",
271 	.features = CLOCK_EVT_FEAT_ONESHOT,
272 
273 	.max_delta_ns = 0xffffffff,
274 	.max_delta_ticks = 0xffffffff,
275 	.min_delta_ns = TIMER_SLOP,
276 	.min_delta_ticks = TIMER_SLOP,
277 
278 	.mult = 1,
279 	.shift = 0,
280 	.rating = 500,
281 
282 	.set_state_shutdown = xen_vcpuop_shutdown,
283 	.set_state_oneshot = xen_vcpuop_set_oneshot,
284 	.set_next_event = xen_vcpuop_set_next_event,
285 };
286 
287 static const struct clock_event_device *xen_clockevent =
288 	&xen_timerop_clockevent;
289 
290 struct xen_clock_event_device {
291 	struct clock_event_device evt;
292 	char name[16];
293 };
294 static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
295 
296 static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
297 {
298 	struct clock_event_device *evt = this_cpu_ptr(&xen_clock_events.evt);
299 	irqreturn_t ret;
300 
301 	ret = IRQ_NONE;
302 	if (evt->event_handler) {
303 		evt->event_handler(evt);
304 		ret = IRQ_HANDLED;
305 	}
306 
307 	return ret;
308 }
309 
310 void xen_teardown_timer(int cpu)
311 {
312 	struct clock_event_device *evt;
313 	evt = &per_cpu(xen_clock_events, cpu).evt;
314 
315 	if (evt->irq >= 0) {
316 		unbind_from_irqhandler(evt->irq, NULL);
317 		evt->irq = -1;
318 	}
319 }
320 
321 void xen_setup_timer(int cpu)
322 {
323 	struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
324 	struct clock_event_device *evt = &xevt->evt;
325 	int irq;
326 
327 	WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
328 	if (evt->irq >= 0)
329 		xen_teardown_timer(cpu);
330 
331 	printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
332 
333 	snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
334 
335 	irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
336 				      IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
337 				      IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
338 				      xevt->name, NULL);
339 	(void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
340 
341 	memcpy(evt, xen_clockevent, sizeof(*evt));
342 
343 	evt->cpumask = cpumask_of(cpu);
344 	evt->irq = irq;
345 }
346 
347 
348 void xen_setup_cpu_clockevents(void)
349 {
350 	clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
351 }
352 
353 void xen_timer_resume(void)
354 {
355 	int cpu;
356 
357 	pvclock_resume();
358 
359 	if (xen_clockevent != &xen_vcpuop_clockevent)
360 		return;
361 
362 	for_each_online_cpu(cpu) {
363 		if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer,
364 				       xen_vcpu_nr(cpu), NULL))
365 			BUG();
366 	}
367 }
368 
369 static const struct pv_time_ops xen_time_ops __initconst = {
370 	.sched_clock = xen_clocksource_read,
371 	.steal_clock = xen_steal_clock,
372 };
373 
374 static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
375 
376 void xen_save_time_memory_area(void)
377 {
378 	struct vcpu_register_time_memory_area t;
379 	int ret;
380 
381 	if (!xen_clock)
382 		return;
383 
384 	t.addr.v = NULL;
385 
386 	ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
387 	if (ret != 0)
388 		pr_notice("Cannot save secondary vcpu_time_info (err %d)",
389 			  ret);
390 	else
391 		clear_page(xen_clock);
392 }
393 
394 void xen_restore_time_memory_area(void)
395 {
396 	struct vcpu_register_time_memory_area t;
397 	int ret;
398 
399 	if (!xen_clock)
400 		return;
401 
402 	t.addr.v = &xen_clock->pvti;
403 
404 	ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
405 
406 	/*
407 	 * We don't disable VCLOCK_PVCLOCK entirely if it fails to register the
408 	 * secondary time info with Xen or if we migrated to a host without the
409 	 * necessary flags. On both of these cases what happens is either
410 	 * process seeing a zeroed out pvti or seeing no PVCLOCK_TSC_STABLE_BIT
411 	 * bit set. Userspace checks the latter and if 0, it discards the data
412 	 * in pvti and fallbacks to a system call for a reliable timestamp.
413 	 */
414 	if (ret != 0)
415 		pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
416 			  ret);
417 }
418 
419 static void xen_setup_vsyscall_time_info(void)
420 {
421 	struct vcpu_register_time_memory_area t;
422 	struct pvclock_vsyscall_time_info *ti;
423 	int ret;
424 
425 	ti = (struct pvclock_vsyscall_time_info *)get_zeroed_page(GFP_KERNEL);
426 	if (!ti)
427 		return;
428 
429 	t.addr.v = &ti->pvti;
430 
431 	ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
432 	if (ret) {
433 		pr_notice("xen: VCLOCK_PVCLOCK not supported (err %d)\n", ret);
434 		free_page((unsigned long)ti);
435 		return;
436 	}
437 
438 	/*
439 	 * If primary time info had this bit set, secondary should too since
440 	 * it's the same data on both just different memory regions. But we
441 	 * still check it in case hypervisor is buggy.
442 	 */
443 	if (!(ti->pvti.flags & PVCLOCK_TSC_STABLE_BIT)) {
444 		t.addr.v = NULL;
445 		ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area,
446 					 0, &t);
447 		if (!ret)
448 			free_page((unsigned long)ti);
449 
450 		pr_notice("xen: VCLOCK_PVCLOCK not supported (tsc unstable)\n");
451 		return;
452 	}
453 
454 	xen_clock = ti;
455 	pvclock_set_pvti_cpu0_va(xen_clock);
456 
457 	xen_clocksource.archdata.vclock_mode = VCLOCK_PVCLOCK;
458 }
459 
460 static void __init xen_time_init(void)
461 {
462 	struct pvclock_vcpu_time_info *pvti;
463 	int cpu = smp_processor_id();
464 	struct timespec tp;
465 
466 	/* As Dom0 is never moved, no penalty on using TSC there */
467 	if (xen_initial_domain())
468 		xen_clocksource.rating = 275;
469 
470 	clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
471 
472 	if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
473 			       NULL) == 0) {
474 		/* Successfully turned off 100Hz tick, so we have the
475 		   vcpuop-based timer interface */
476 		printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
477 		xen_clockevent = &xen_vcpuop_clockevent;
478 	}
479 
480 	/* Set initial system time with full resolution */
481 	xen_read_wallclock(&tp);
482 	do_settimeofday(&tp);
483 
484 	setup_force_cpu_cap(X86_FEATURE_TSC);
485 
486 	/*
487 	 * We check ahead on the primary time info if this
488 	 * bit is supported hence speeding up Xen clocksource.
489 	 */
490 	pvti = &__this_cpu_read(xen_vcpu)->time;
491 	if (pvti->flags & PVCLOCK_TSC_STABLE_BIT) {
492 		pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
493 		xen_setup_vsyscall_time_info();
494 	}
495 
496 	xen_setup_runstate_info(cpu);
497 	xen_setup_timer(cpu);
498 	xen_setup_cpu_clockevents();
499 
500 	xen_time_setup_guest();
501 
502 	if (xen_initial_domain())
503 		pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
504 }
505 
506 void __ref xen_init_time_ops(void)
507 {
508 	pv_time_ops = xen_time_ops;
509 
510 	x86_init.timers.timer_init = xen_time_init;
511 	x86_init.timers.setup_percpu_clockev = x86_init_noop;
512 	x86_cpuinit.setup_percpu_clockev = x86_init_noop;
513 
514 	x86_platform.calibrate_tsc = xen_tsc_khz;
515 	x86_platform.get_wallclock = xen_get_wallclock;
516 	/* Dom0 uses the native method to set the hardware RTC. */
517 	if (!xen_initial_domain())
518 		x86_platform.set_wallclock = xen_set_wallclock;
519 }
520 
521 #ifdef CONFIG_XEN_PVHVM
522 static void xen_hvm_setup_cpu_clockevents(void)
523 {
524 	int cpu = smp_processor_id();
525 	xen_setup_runstate_info(cpu);
526 	/*
527 	 * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
528 	 * doing it xen_hvm_cpu_notify (which gets called by smp_init during
529 	 * early bootup and also during CPU hotplug events).
530 	 */
531 	xen_setup_cpu_clockevents();
532 }
533 
534 void __init xen_hvm_init_time_ops(void)
535 {
536 	/*
537 	 * vector callback is needed otherwise we cannot receive interrupts
538 	 * on cpu > 0 and at this point we don't know how many cpus are
539 	 * available.
540 	 */
541 	if (!xen_have_vector_callback)
542 		return;
543 
544 	if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
545 		printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
546 				"disable pv timer\n");
547 		return;
548 	}
549 
550 	pv_time_ops = xen_time_ops;
551 	x86_init.timers.setup_percpu_clockev = xen_time_init;
552 	x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
553 
554 	x86_platform.calibrate_tsc = xen_tsc_khz;
555 	x86_platform.get_wallclock = xen_get_wallclock;
556 	x86_platform.set_wallclock = xen_set_wallclock;
557 }
558 #endif
559