xref: /linux/arch/riscv/kvm/vcpu_timer.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <linux/uaccess.h>
13 #include <clocksource/timer-riscv.h>
14 #include <asm/delay.h>
15 #include <asm/kvm_isa.h>
16 #include <asm/kvm_nacl.h>
17 #include <asm/kvm_vcpu_timer.h>
18 
19 static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt)
20 {
21 	return get_cycles64() + gt->time_delta;
22 }
23 
24 static u64 kvm_riscv_delta_cycles2ns(u64 cycles,
25 				     struct kvm_guest_timer *gt,
26 				     struct kvm_vcpu_timer *t)
27 {
28 	unsigned long flags;
29 	u64 cycles_now, cycles_delta, delta_ns;
30 
31 	local_irq_save(flags);
32 	cycles_now = kvm_riscv_current_cycles(gt);
33 	if (cycles_now < cycles)
34 		cycles_delta = cycles - cycles_now;
35 	else
36 		cycles_delta = 0;
37 	delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift;
38 	local_irq_restore(flags);
39 
40 	return delta_ns;
41 }
42 
43 static enum hrtimer_restart kvm_riscv_vcpu_hrtimer_expired(struct hrtimer *h)
44 {
45 	u64 delta_ns;
46 	struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
47 	struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
48 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
49 
50 	if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
51 		delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
52 		hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
53 		return HRTIMER_RESTART;
54 	}
55 
56 	t->next_set = false;
57 	kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_TIMER);
58 
59 	return HRTIMER_NORESTART;
60 }
61 
62 static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
63 {
64 	if (!t->init_done || !t->next_set)
65 		return -EINVAL;
66 
67 	hrtimer_cancel(&t->hrt);
68 	t->next_set = false;
69 
70 	return 0;
71 }
72 
73 static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles)
74 {
75 #if defined(CONFIG_32BIT)
76 	ncsr_write(CSR_VSTIMECMP,  ULONG_MAX);
77 	ncsr_write(CSR_VSTIMECMPH, ncycles >> 32);
78 	ncsr_write(CSR_VSTIMECMP, (u32)ncycles);
79 #else
80 	ncsr_write(CSR_VSTIMECMP, ncycles);
81 #endif
82 	return 0;
83 }
84 
85 static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles)
86 {
87 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
88 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
89 	u64 delta_ns;
90 
91 	if (!t->init_done)
92 		return -EINVAL;
93 
94 	kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_TIMER);
95 
96 	delta_ns = kvm_riscv_delta_cycles2ns(ncycles, gt, t);
97 	t->next_cycles = ncycles;
98 	hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
99 	t->next_set = true;
100 
101 	return 0;
102 }
103 
104 int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
105 {
106 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
107 
108 	return t->timer_next_event(vcpu, ncycles);
109 }
110 
111 static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h)
112 {
113 	u64 delta_ns;
114 	struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
115 	struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
116 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
117 
118 	if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
119 		delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
120 		hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
121 		return HRTIMER_RESTART;
122 	}
123 
124 	t->next_set = false;
125 	kvm_vcpu_kick(vcpu);
126 
127 	return HRTIMER_NORESTART;
128 }
129 
130 bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu)
131 {
132 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
133 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
134 
135 	if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) ||
136 	    kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER))
137 		return true;
138 	else
139 		return false;
140 }
141 
142 static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
143 {
144 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
145 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
146 	u64 delta_ns;
147 
148 	if (!t->init_done)
149 		return;
150 
151 	delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
152 	hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
153 	t->next_set = true;
154 }
155 
156 static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)
157 {
158 	kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
159 }
160 
161 int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
162 				 const struct kvm_one_reg *reg)
163 {
164 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
165 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
166 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
167 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
168 					    KVM_REG_SIZE_MASK |
169 					    KVM_REG_RISCV_TIMER);
170 	u64 reg_val;
171 
172 	if (KVM_REG_SIZE(reg->id) != sizeof(u64))
173 		return -EINVAL;
174 	if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
175 		return -ENOENT;
176 
177 	switch (reg_num) {
178 	case KVM_REG_RISCV_TIMER_REG(frequency):
179 		reg_val = riscv_timebase;
180 		break;
181 	case KVM_REG_RISCV_TIMER_REG(time):
182 		reg_val = kvm_riscv_current_cycles(gt);
183 		break;
184 	case KVM_REG_RISCV_TIMER_REG(compare):
185 		reg_val = t->next_cycles;
186 		break;
187 	case KVM_REG_RISCV_TIMER_REG(state):
188 		reg_val = (t->next_set) ? KVM_RISCV_TIMER_STATE_ON :
189 					  KVM_RISCV_TIMER_STATE_OFF;
190 		break;
191 	default:
192 		return -ENOENT;
193 	}
194 
195 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
196 		return -EFAULT;
197 
198 	return 0;
199 }
200 
201 int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
202 				 const struct kvm_one_reg *reg)
203 {
204 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
205 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
206 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
207 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
208 					    KVM_REG_SIZE_MASK |
209 					    KVM_REG_RISCV_TIMER);
210 	u64 reg_val;
211 	int ret = 0;
212 
213 	if (KVM_REG_SIZE(reg->id) != sizeof(u64))
214 		return -EINVAL;
215 	if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
216 		return -ENOENT;
217 
218 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
219 		return -EFAULT;
220 
221 	switch (reg_num) {
222 	case KVM_REG_RISCV_TIMER_REG(frequency):
223 		if (reg_val != riscv_timebase)
224 			return -EINVAL;
225 		break;
226 	case KVM_REG_RISCV_TIMER_REG(time):
227 		gt->time_delta = reg_val - get_cycles64();
228 		break;
229 	case KVM_REG_RISCV_TIMER_REG(compare):
230 		t->next_cycles = reg_val;
231 		break;
232 	case KVM_REG_RISCV_TIMER_REG(state):
233 		if (reg_val == KVM_RISCV_TIMER_STATE_ON)
234 			ret = kvm_riscv_vcpu_timer_next_event(vcpu, reg_val);
235 		else
236 			ret = kvm_riscv_vcpu_timer_cancel(t);
237 		break;
238 	default:
239 		ret = -ENOENT;
240 		break;
241 	}
242 
243 	return ret;
244 }
245 
246 int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
247 {
248 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
249 
250 	if (t->init_done)
251 		return -EINVAL;
252 
253 	t->init_done = true;
254 	t->next_set = false;
255 
256 	/* Enable sstc for every vcpu if available in hardware */
257 	if (!kvm_riscv_isa_check_host(SSTC)) {
258 		t->sstc_enabled = true;
259 		hrtimer_setup(&t->hrt, kvm_riscv_vcpu_vstimer_expired, CLOCK_MONOTONIC,
260 			      HRTIMER_MODE_REL);
261 		t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp;
262 	} else {
263 		t->sstc_enabled = false;
264 		hrtimer_setup(&t->hrt, kvm_riscv_vcpu_hrtimer_expired, CLOCK_MONOTONIC,
265 			      HRTIMER_MODE_REL);
266 		t->timer_next_event = kvm_riscv_vcpu_update_hrtimer;
267 	}
268 
269 	return 0;
270 }
271 
272 int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu)
273 {
274 	int ret;
275 
276 	ret = kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
277 	vcpu->arch.timer.init_done = false;
278 
279 	return ret;
280 }
281 
282 int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu)
283 {
284 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
285 
286 	t->next_cycles = -1ULL;
287 	return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
288 }
289 
290 static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu)
291 {
292 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
293 
294 #if defined(CONFIG_32BIT)
295 	ncsr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
296 	ncsr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
297 #else
298 	ncsr_write(CSR_HTIMEDELTA, gt->time_delta);
299 #endif
300 }
301 
302 void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
303 {
304 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
305 
306 	kvm_riscv_vcpu_update_timedelta(vcpu);
307 
308 	if (!t->sstc_enabled)
309 		return;
310 
311 #if defined(CONFIG_32BIT)
312 	ncsr_write(CSR_VSTIMECMP, ULONG_MAX);
313 	ncsr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32));
314 	ncsr_write(CSR_VSTIMECMP, (u32)(t->next_cycles));
315 #else
316 	ncsr_write(CSR_VSTIMECMP, t->next_cycles);
317 #endif
318 
319 	/* timer should be enabled for the remaining operations */
320 	if (unlikely(!t->init_done))
321 		return;
322 
323 	kvm_riscv_vcpu_timer_unblocking(vcpu);
324 }
325 
326 void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu)
327 {
328 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
329 
330 	if (!t->sstc_enabled)
331 		return;
332 
333 #if defined(CONFIG_32BIT)
334 	t->next_cycles = ncsr_read(CSR_VSTIMECMP);
335 	t->next_cycles |= (u64)ncsr_read(CSR_VSTIMECMPH) << 32;
336 #else
337 	t->next_cycles = ncsr_read(CSR_VSTIMECMP);
338 #endif
339 }
340 
341 void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
342 {
343 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
344 
345 	if (!t->sstc_enabled)
346 		return;
347 
348 	/*
349 	 * The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
350 	 * upon every VM exit so no need to save here.
351 	 *
352 	 * If VS-timer expires when no VCPU running on a host CPU then
353 	 * WFI executed by such host CPU will be effective NOP resulting
354 	 * in no power savings. This is because as-per RISC-V Privileged
355 	 * specificaiton: "WFI is also required to resume execution for
356 	 * locally enabled interrupts pending at any privilege level,
357 	 * regardless of the global interrupt enable at each privilege
358 	 * level."
359 	 *
360 	 * To address the above issue, vstimecmp CSR must be set to -1UL
361 	 * over here when VCPU is scheduled-out or exits to user space.
362 	 */
363 
364 	csr_write(CSR_VSTIMECMP, -1UL);
365 #if defined(CONFIG_32BIT)
366 	csr_write(CSR_VSTIMECMPH, -1UL);
367 #endif
368 
369 	/* timer should be enabled for the remaining operations */
370 	if (unlikely(!t->init_done))
371 		return;
372 
373 	if (kvm_vcpu_is_blocking(vcpu))
374 		kvm_riscv_vcpu_timer_blocking(vcpu);
375 }
376 
377 void kvm_riscv_guest_timer_init(struct kvm *kvm)
378 {
379 	struct kvm_guest_timer *gt = &kvm->arch.timer;
380 
381 	riscv_cs_get_mult_shift(&gt->nsec_mult, &gt->nsec_shift);
382 	gt->time_delta = -get_cycles64();
383 }
384