xref: /linux/arch/x86/xen/time.c (revision b2d0f5d5dc53532e6f07bc546a476a55ebdfe0f3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xen time implementation.
4  *
5  * This is implemented in terms of a clocksource driver which uses
6  * the hypervisor clock as a nanosecond timebase, and a clockevent
7  * driver which uses the hypervisor's timer mechanism.
8  *
9  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
10  */
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/clocksource.h>
14 #include <linux/clockchips.h>
15 #include <linux/gfp.h>
16 #include <linux/slab.h>
17 #include <linux/pvclock_gtod.h>
18 #include <linux/timekeeper_internal.h>
19 
20 #include <asm/pvclock.h>
21 #include <asm/xen/hypervisor.h>
22 #include <asm/xen/hypercall.h>
23 
24 #include <xen/events.h>
25 #include <xen/features.h>
26 #include <xen/interface/xen.h>
27 #include <xen/interface/vcpu.h>
28 
29 #include "xen-ops.h"
30 
31 /* Xen may fire a timer up to this many ns early */
32 #define TIMER_SLOP	100000
33 
34 /* Get the TSC speed from Xen */
35 static unsigned long xen_tsc_khz(void)
36 {
37 	struct pvclock_vcpu_time_info *info =
38 		&HYPERVISOR_shared_info->vcpu_info[0].time;
39 
40 	return pvclock_tsc_khz(info);
41 }
42 
43 u64 xen_clocksource_read(void)
44 {
45         struct pvclock_vcpu_time_info *src;
46 	u64 ret;
47 
48 	preempt_disable_notrace();
49 	src = &__this_cpu_read(xen_vcpu)->time;
50 	ret = pvclock_clocksource_read(src);
51 	preempt_enable_notrace();
52 	return ret;
53 }
54 
55 static u64 xen_clocksource_get_cycles(struct clocksource *cs)
56 {
57 	return xen_clocksource_read();
58 }
59 
60 static void xen_read_wallclock(struct timespec *ts)
61 {
62 	struct shared_info *s = HYPERVISOR_shared_info;
63 	struct pvclock_wall_clock *wall_clock = &(s->wc);
64         struct pvclock_vcpu_time_info *vcpu_time;
65 
66 	vcpu_time = &get_cpu_var(xen_vcpu)->time;
67 	pvclock_read_wallclock(wall_clock, vcpu_time, ts);
68 	put_cpu_var(xen_vcpu);
69 }
70 
71 static void xen_get_wallclock(struct timespec *now)
72 {
73 	xen_read_wallclock(now);
74 }
75 
76 static int xen_set_wallclock(const struct timespec *now)
77 {
78 	return -1;
79 }
80 
81 static int xen_pvclock_gtod_notify(struct notifier_block *nb,
82 				   unsigned long was_set, void *priv)
83 {
84 	/* Protected by the calling core code serialization */
85 	static struct timespec64 next_sync;
86 
87 	struct xen_platform_op op;
88 	struct timespec64 now;
89 	struct timekeeper *tk = priv;
90 	static bool settime64_supported = true;
91 	int ret;
92 
93 	now.tv_sec = tk->xtime_sec;
94 	now.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
95 
96 	/*
97 	 * We only take the expensive HV call when the clock was set
98 	 * or when the 11 minutes RTC synchronization time elapsed.
99 	 */
100 	if (!was_set && timespec64_compare(&now, &next_sync) < 0)
101 		return NOTIFY_OK;
102 
103 again:
104 	if (settime64_supported) {
105 		op.cmd = XENPF_settime64;
106 		op.u.settime64.mbz = 0;
107 		op.u.settime64.secs = now.tv_sec;
108 		op.u.settime64.nsecs = now.tv_nsec;
109 		op.u.settime64.system_time = xen_clocksource_read();
110 	} else {
111 		op.cmd = XENPF_settime32;
112 		op.u.settime32.secs = now.tv_sec;
113 		op.u.settime32.nsecs = now.tv_nsec;
114 		op.u.settime32.system_time = xen_clocksource_read();
115 	}
116 
117 	ret = HYPERVISOR_platform_op(&op);
118 
119 	if (ret == -ENOSYS && settime64_supported) {
120 		settime64_supported = false;
121 		goto again;
122 	}
123 	if (ret < 0)
124 		return NOTIFY_BAD;
125 
126 	/*
127 	 * Move the next drift compensation time 11 minutes
128 	 * ahead. That's emulating the sync_cmos_clock() update for
129 	 * the hardware RTC.
130 	 */
131 	next_sync = now;
132 	next_sync.tv_sec += 11 * 60;
133 
134 	return NOTIFY_OK;
135 }
136 
137 static struct notifier_block xen_pvclock_gtod_notifier = {
138 	.notifier_call = xen_pvclock_gtod_notify,
139 };
140 
141 static struct clocksource xen_clocksource __read_mostly = {
142 	.name = "xen",
143 	.rating = 400,
144 	.read = xen_clocksource_get_cycles,
145 	.mask = ~0,
146 	.flags = CLOCK_SOURCE_IS_CONTINUOUS,
147 };
148 
149 /*
150    Xen clockevent implementation
151 
152    Xen has two clockevent implementations:
153 
154    The old timer_op one works with all released versions of Xen prior
155    to version 3.0.4.  This version of the hypervisor provides a
156    single-shot timer with nanosecond resolution.  However, sharing the
157    same event channel is a 100Hz tick which is delivered while the
158    vcpu is running.  We don't care about or use this tick, but it will
159    cause the core time code to think the timer fired too soon, and
160    will end up resetting it each time.  It could be filtered, but
161    doing so has complications when the ktime clocksource is not yet
162    the xen clocksource (ie, at boot time).
163 
164    The new vcpu_op-based timer interface allows the tick timer period
165    to be changed or turned off.  The tick timer is not useful as a
166    periodic timer because events are only delivered to running vcpus.
167    The one-shot timer can report when a timeout is in the past, so
168    set_next_event is capable of returning -ETIME when appropriate.
169    This interface is used when available.
170 */
171 
172 
173 /*
174   Get a hypervisor absolute time.  In theory we could maintain an
175   offset between the kernel's time and the hypervisor's time, and
176   apply that to a kernel's absolute timeout.  Unfortunately the
177   hypervisor and kernel times can drift even if the kernel is using
178   the Xen clocksource, because ntp can warp the kernel's clocksource.
179 */
180 static s64 get_abs_timeout(unsigned long delta)
181 {
182 	return xen_clocksource_read() + delta;
183 }
184 
185 static int xen_timerop_shutdown(struct clock_event_device *evt)
186 {
187 	/* cancel timeout */
188 	HYPERVISOR_set_timer_op(0);
189 
190 	return 0;
191 }
192 
193 static int xen_timerop_set_next_event(unsigned long delta,
194 				      struct clock_event_device *evt)
195 {
196 	WARN_ON(!clockevent_state_oneshot(evt));
197 
198 	if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
199 		BUG();
200 
201 	/* We may have missed the deadline, but there's no real way of
202 	   knowing for sure.  If the event was in the past, then we'll
203 	   get an immediate interrupt. */
204 
205 	return 0;
206 }
207 
208 static const struct clock_event_device xen_timerop_clockevent = {
209 	.name			= "xen",
210 	.features		= CLOCK_EVT_FEAT_ONESHOT,
211 
212 	.max_delta_ns		= 0xffffffff,
213 	.max_delta_ticks	= 0xffffffff,
214 	.min_delta_ns		= TIMER_SLOP,
215 	.min_delta_ticks	= TIMER_SLOP,
216 
217 	.mult			= 1,
218 	.shift			= 0,
219 	.rating			= 500,
220 
221 	.set_state_shutdown	= xen_timerop_shutdown,
222 	.set_next_event		= xen_timerop_set_next_event,
223 };
224 
225 static int xen_vcpuop_shutdown(struct clock_event_device *evt)
226 {
227 	int cpu = smp_processor_id();
228 
229 	if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, xen_vcpu_nr(cpu),
230 			       NULL) ||
231 	    HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
232 			       NULL))
233 		BUG();
234 
235 	return 0;
236 }
237 
238 static int xen_vcpuop_set_oneshot(struct clock_event_device *evt)
239 {
240 	int cpu = smp_processor_id();
241 
242 	if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
243 			       NULL))
244 		BUG();
245 
246 	return 0;
247 }
248 
249 static int xen_vcpuop_set_next_event(unsigned long delta,
250 				     struct clock_event_device *evt)
251 {
252 	int cpu = smp_processor_id();
253 	struct vcpu_set_singleshot_timer single;
254 	int ret;
255 
256 	WARN_ON(!clockevent_state_oneshot(evt));
257 
258 	single.timeout_abs_ns = get_abs_timeout(delta);
259 	/* Get an event anyway, even if the timeout is already expired */
260 	single.flags = 0;
261 
262 	ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, xen_vcpu_nr(cpu),
263 				 &single);
264 	BUG_ON(ret != 0);
265 
266 	return ret;
267 }
268 
269 static const struct clock_event_device xen_vcpuop_clockevent = {
270 	.name = "xen",
271 	.features = CLOCK_EVT_FEAT_ONESHOT,
272 
273 	.max_delta_ns = 0xffffffff,
274 	.max_delta_ticks = 0xffffffff,
275 	.min_delta_ns = TIMER_SLOP,
276 	.min_delta_ticks = TIMER_SLOP,
277 
278 	.mult = 1,
279 	.shift = 0,
280 	.rating = 500,
281 
282 	.set_state_shutdown = xen_vcpuop_shutdown,
283 	.set_state_oneshot = xen_vcpuop_set_oneshot,
284 	.set_next_event = xen_vcpuop_set_next_event,
285 };
286 
287 static const struct clock_event_device *xen_clockevent =
288 	&xen_timerop_clockevent;
289 
290 struct xen_clock_event_device {
291 	struct clock_event_device evt;
292 	char name[16];
293 };
294 static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
295 
296 static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
297 {
298 	struct clock_event_device *evt = this_cpu_ptr(&xen_clock_events.evt);
299 	irqreturn_t ret;
300 
301 	ret = IRQ_NONE;
302 	if (evt->event_handler) {
303 		evt->event_handler(evt);
304 		ret = IRQ_HANDLED;
305 	}
306 
307 	return ret;
308 }
309 
310 void xen_teardown_timer(int cpu)
311 {
312 	struct clock_event_device *evt;
313 	evt = &per_cpu(xen_clock_events, cpu).evt;
314 
315 	if (evt->irq >= 0) {
316 		unbind_from_irqhandler(evt->irq, NULL);
317 		evt->irq = -1;
318 	}
319 }
320 
321 void xen_setup_timer(int cpu)
322 {
323 	struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
324 	struct clock_event_device *evt = &xevt->evt;
325 	int irq;
326 
327 	WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
328 	if (evt->irq >= 0)
329 		xen_teardown_timer(cpu);
330 
331 	printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
332 
333 	snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
334 
335 	irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
336 				      IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
337 				      IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
338 				      xevt->name, NULL);
339 	(void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
340 
341 	memcpy(evt, xen_clockevent, sizeof(*evt));
342 
343 	evt->cpumask = cpumask_of(cpu);
344 	evt->irq = irq;
345 }
346 
347 
348 void xen_setup_cpu_clockevents(void)
349 {
350 	clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
351 }
352 
353 void xen_timer_resume(void)
354 {
355 	int cpu;
356 
357 	pvclock_resume();
358 
359 	if (xen_clockevent != &xen_vcpuop_clockevent)
360 		return;
361 
362 	for_each_online_cpu(cpu) {
363 		if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer,
364 				       xen_vcpu_nr(cpu), NULL))
365 			BUG();
366 	}
367 }
368 
369 static const struct pv_time_ops xen_time_ops __initconst = {
370 	.sched_clock = xen_clocksource_read,
371 	.steal_clock = xen_steal_clock,
372 };
373 
374 static void __init xen_time_init(void)
375 {
376 	int cpu = smp_processor_id();
377 	struct timespec tp;
378 
379 	/* As Dom0 is never moved, no penalty on using TSC there */
380 	if (xen_initial_domain())
381 		xen_clocksource.rating = 275;
382 
383 	clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
384 
385 	if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
386 			       NULL) == 0) {
387 		/* Successfully turned off 100Hz tick, so we have the
388 		   vcpuop-based timer interface */
389 		printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
390 		xen_clockevent = &xen_vcpuop_clockevent;
391 	}
392 
393 	/* Set initial system time with full resolution */
394 	xen_read_wallclock(&tp);
395 	do_settimeofday(&tp);
396 
397 	setup_force_cpu_cap(X86_FEATURE_TSC);
398 
399 	xen_setup_runstate_info(cpu);
400 	xen_setup_timer(cpu);
401 	xen_setup_cpu_clockevents();
402 
403 	xen_time_setup_guest();
404 
405 	if (xen_initial_domain())
406 		pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
407 }
408 
409 void __ref xen_init_time_ops(void)
410 {
411 	pv_time_ops = xen_time_ops;
412 
413 	x86_init.timers.timer_init = xen_time_init;
414 	x86_init.timers.setup_percpu_clockev = x86_init_noop;
415 	x86_cpuinit.setup_percpu_clockev = x86_init_noop;
416 
417 	x86_platform.calibrate_tsc = xen_tsc_khz;
418 	x86_platform.get_wallclock = xen_get_wallclock;
419 	/* Dom0 uses the native method to set the hardware RTC. */
420 	if (!xen_initial_domain())
421 		x86_platform.set_wallclock = xen_set_wallclock;
422 }
423 
424 #ifdef CONFIG_XEN_PVHVM
425 static void xen_hvm_setup_cpu_clockevents(void)
426 {
427 	int cpu = smp_processor_id();
428 	xen_setup_runstate_info(cpu);
429 	/*
430 	 * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
431 	 * doing it xen_hvm_cpu_notify (which gets called by smp_init during
432 	 * early bootup and also during CPU hotplug events).
433 	 */
434 	xen_setup_cpu_clockevents();
435 }
436 
437 void __init xen_hvm_init_time_ops(void)
438 {
439 	/*
440 	 * vector callback is needed otherwise we cannot receive interrupts
441 	 * on cpu > 0 and at this point we don't know how many cpus are
442 	 * available.
443 	 */
444 	if (!xen_have_vector_callback)
445 		return;
446 
447 	if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
448 		printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
449 				"disable pv timer\n");
450 		return;
451 	}
452 
453 	pv_time_ops = xen_time_ops;
454 	x86_init.timers.setup_percpu_clockev = xen_time_init;
455 	x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
456 
457 	x86_platform.calibrate_tsc = xen_tsc_khz;
458 	x86_platform.get_wallclock = xen_get_wallclock;
459 	x86_platform.set_wallclock = xen_set_wallclock;
460 }
461 #endif
462