xref: /linux/arch/x86/kernel/kvmclock.c (revision f7511d5f66f01fc451747b24e79f3ada7a3af9af)
1 /*  KVM paravirtual clock driver. A clocksource implementation
2     Copyright (C) 2008 Glauber de Oliveira Costa, Red Hat Inc.
3 
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8 
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13 
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17 */
18 
19 #include <linux/clocksource.h>
20 #include <linux/kvm_para.h>
21 #include <asm/arch_hooks.h>
22 #include <asm/msr.h>
23 #include <asm/apic.h>
24 #include <linux/percpu.h>
25 #include <asm/reboot.h>
26 
27 #define KVM_SCALE 22
28 
29 static int kvmclock = 1;
30 
31 static int parse_no_kvmclock(char *arg)
32 {
33 	kvmclock = 0;
34 	return 0;
35 }
36 early_param("no-kvmclock", parse_no_kvmclock);
37 
38 /* The hypervisor will put information about time periodically here */
39 static DEFINE_PER_CPU_SHARED_ALIGNED(struct kvm_vcpu_time_info, hv_clock);
40 #define get_clock(cpu, field) per_cpu(hv_clock, cpu).field
41 
42 static inline u64 kvm_get_delta(u64 last_tsc)
43 {
44 	int cpu = smp_processor_id();
45 	u64 delta = native_read_tsc() - last_tsc;
46 	return (delta * get_clock(cpu, tsc_to_system_mul)) >> KVM_SCALE;
47 }
48 
49 static struct kvm_wall_clock wall_clock;
50 static cycle_t kvm_clock_read(void);
51 /*
52  * The wallclock is the time of day when we booted. Since then, some time may
53  * have elapsed since the hypervisor wrote the data. So we try to account for
54  * that with system time
55  */
56 unsigned long kvm_get_wallclock(void)
57 {
58 	u32 wc_sec, wc_nsec;
59 	u64 delta;
60 	struct timespec ts;
61 	int version, nsec;
62 	int low, high;
63 
64 	low = (int)__pa(&wall_clock);
65 	high = ((u64)__pa(&wall_clock) >> 32);
66 
67 	delta = kvm_clock_read();
68 
69 	native_write_msr(MSR_KVM_WALL_CLOCK, low, high);
70 	do {
71 		version = wall_clock.wc_version;
72 		rmb();
73 		wc_sec = wall_clock.wc_sec;
74 		wc_nsec = wall_clock.wc_nsec;
75 		rmb();
76 	} while ((wall_clock.wc_version != version) || (version & 1));
77 
78 	delta = kvm_clock_read() - delta;
79 	delta += wc_nsec;
80 	nsec = do_div(delta, NSEC_PER_SEC);
81 	set_normalized_timespec(&ts, wc_sec + delta, nsec);
82 	/*
83 	 * Of all mechanisms of time adjustment I've tested, this one
84 	 * was the champion!
85 	 */
86 	return ts.tv_sec + 1;
87 }
88 
89 int kvm_set_wallclock(unsigned long now)
90 {
91 	return 0;
92 }
93 
94 /*
95  * This is our read_clock function. The host puts an tsc timestamp each time
96  * it updates a new time. Without the tsc adjustment, we can have a situation
97  * in which a vcpu starts to run earlier (smaller system_time), but probes
98  * time later (compared to another vcpu), leading to backwards time
99  */
100 static cycle_t kvm_clock_read(void)
101 {
102 	u64 last_tsc, now;
103 	int cpu;
104 
105 	preempt_disable();
106 	cpu = smp_processor_id();
107 
108 	last_tsc = get_clock(cpu, tsc_timestamp);
109 	now = get_clock(cpu, system_time);
110 
111 	now += kvm_get_delta(last_tsc);
112 	preempt_enable();
113 
114 	return now;
115 }
116 static struct clocksource kvm_clock = {
117 	.name = "kvm-clock",
118 	.read = kvm_clock_read,
119 	.rating = 400,
120 	.mask = CLOCKSOURCE_MASK(64),
121 	.mult = 1 << KVM_SCALE,
122 	.shift = KVM_SCALE,
123 	.flags = CLOCK_SOURCE_IS_CONTINUOUS,
124 };
125 
126 static int kvm_register_clock(void)
127 {
128 	int cpu = smp_processor_id();
129 	int low, high;
130 	low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1;
131 	high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32);
132 
133 	return native_write_msr_safe(MSR_KVM_SYSTEM_TIME, low, high);
134 }
135 
136 static void kvm_setup_secondary_clock(void)
137 {
138 	/*
139 	 * Now that the first cpu already had this clocksource initialized,
140 	 * we shouldn't fail.
141 	 */
142 	WARN_ON(kvm_register_clock());
143 	/* ok, done with our trickery, call native */
144 	setup_secondary_APIC_clock();
145 }
146 
147 /*
148  * After the clock is registered, the host will keep writing to the
149  * registered memory location. If the guest happens to shutdown, this memory
150  * won't be valid. In cases like kexec, in which you install a new kernel, this
151  * means a random memory location will be kept being written. So before any
152  * kind of shutdown from our side, we unregister the clock by writting anything
153  * that does not have the 'enable' bit set in the msr
154  */
155 #ifdef CONFIG_KEXEC
156 static void kvm_crash_shutdown(struct pt_regs *regs)
157 {
158 	native_write_msr_safe(MSR_KVM_SYSTEM_TIME, 0, 0);
159 	native_machine_crash_shutdown(regs);
160 }
161 #endif
162 
163 static void kvm_shutdown(void)
164 {
165 	native_write_msr_safe(MSR_KVM_SYSTEM_TIME, 0, 0);
166 	native_machine_shutdown();
167 }
168 
169 void __init kvmclock_init(void)
170 {
171 	if (!kvm_para_available())
172 		return;
173 
174 	if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
175 		if (kvm_register_clock())
176 			return;
177 		pv_time_ops.get_wallclock = kvm_get_wallclock;
178 		pv_time_ops.set_wallclock = kvm_set_wallclock;
179 		pv_time_ops.sched_clock = kvm_clock_read;
180 		pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock;
181 		machine_ops.shutdown  = kvm_shutdown;
182 #ifdef CONFIG_KEXEC
183 		machine_ops.crash_shutdown  = kvm_crash_shutdown;
184 #endif
185 		clocksource_register(&kvm_clock);
186 	}
187 }
188