xref: /linux/arch/riscv/kvm/vcpu.c (revision 221533629550e920580ab428f13ffebf54063b95)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Anup Patel <anup.patel@wdc.com>
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kdebug.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/vmalloc.h>
16 #include <linux/sched/signal.h>
17 #include <linux/fs.h>
18 #include <linux/kvm_host.h>
19 #include <asm/cacheflush.h>
20 #include <asm/kvm_mmu.h>
21 #include <asm/kvm_nacl.h>
22 #include <asm/kvm_vcpu_vector.h>
23 
24 #define CREATE_TRACE_POINTS
25 #include "trace.h"
26 
27 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
28 	KVM_GENERIC_VCPU_STATS(),
29 	STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
30 	STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
31 	STATS_DESC_COUNTER(VCPU, wrs_exit_stat),
32 	STATS_DESC_COUNTER(VCPU, mmio_exit_user),
33 	STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
34 	STATS_DESC_COUNTER(VCPU, csr_exit_user),
35 	STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
36 	STATS_DESC_COUNTER(VCPU, signal_exits),
37 	STATS_DESC_COUNTER(VCPU, exits),
38 	STATS_DESC_COUNTER(VCPU, instr_illegal_exits),
39 	STATS_DESC_COUNTER(VCPU, load_misaligned_exits),
40 	STATS_DESC_COUNTER(VCPU, store_misaligned_exits),
41 	STATS_DESC_COUNTER(VCPU, load_access_exits),
42 	STATS_DESC_COUNTER(VCPU, store_access_exits),
43 };
44 
45 const struct kvm_stats_header kvm_vcpu_stats_header = {
46 	.name_size = KVM_STATS_NAME_SIZE,
47 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
48 	.id_offset = sizeof(struct kvm_stats_header),
49 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
50 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
51 		       sizeof(kvm_vcpu_stats_desc),
52 };
53 
kvm_riscv_vcpu_context_reset(struct kvm_vcpu * vcpu,bool kvm_sbi_reset)54 static void kvm_riscv_vcpu_context_reset(struct kvm_vcpu *vcpu,
55 					 bool kvm_sbi_reset)
56 {
57 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
58 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
59 	void *vector_datap = cntx->vector.datap;
60 
61 	memset(cntx, 0, sizeof(*cntx));
62 	memset(csr, 0, sizeof(*csr));
63 	memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr));
64 
65 	/* Restore datap as it's not a part of the guest context. */
66 	cntx->vector.datap = vector_datap;
67 
68 	if (kvm_sbi_reset)
69 		kvm_riscv_vcpu_sbi_load_reset_state(vcpu);
70 
71 	/* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
72 	cntx->sstatus = SR_SPP | SR_SPIE;
73 
74 	cntx->hstatus |= HSTATUS_VTW;
75 	cntx->hstatus |= HSTATUS_SPVP;
76 	cntx->hstatus |= HSTATUS_SPV;
77 }
78 
kvm_riscv_reset_vcpu(struct kvm_vcpu * vcpu,bool kvm_sbi_reset)79 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu, bool kvm_sbi_reset)
80 {
81 	bool loaded;
82 
83 	/**
84 	 * The preemption should be disabled here because it races with
85 	 * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
86 	 * also calls vcpu_load/put.
87 	 */
88 	get_cpu();
89 	loaded = (vcpu->cpu != -1);
90 	if (loaded)
91 		kvm_arch_vcpu_put(vcpu);
92 
93 	vcpu->arch.last_exit_cpu = -1;
94 
95 	kvm_riscv_vcpu_context_reset(vcpu, kvm_sbi_reset);
96 
97 	kvm_riscv_vcpu_fp_reset(vcpu);
98 
99 	kvm_riscv_vcpu_vector_reset(vcpu);
100 
101 	kvm_riscv_vcpu_timer_reset(vcpu);
102 
103 	kvm_riscv_vcpu_aia_reset(vcpu);
104 
105 	bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
106 	bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
107 
108 	kvm_riscv_vcpu_pmu_reset(vcpu);
109 
110 	vcpu->arch.hfence_head = 0;
111 	vcpu->arch.hfence_tail = 0;
112 	memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
113 
114 	kvm_riscv_vcpu_sbi_reset(vcpu);
115 
116 	/* Reset the guest CSRs for hotplug usecase */
117 	if (loaded)
118 		kvm_arch_vcpu_load(vcpu, smp_processor_id());
119 	put_cpu();
120 }
121 
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)122 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
123 {
124 	return 0;
125 }
126 
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)127 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
128 {
129 	int rc;
130 
131 	spin_lock_init(&vcpu->arch.mp_state_lock);
132 
133 	/* Mark this VCPU never ran */
134 	vcpu->arch.ran_atleast_once = false;
135 
136 	vcpu->arch.cfg.hedeleg = KVM_HEDELEG_DEFAULT;
137 	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
138 	bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
139 
140 	/* Setup ISA features available to VCPU */
141 	kvm_riscv_vcpu_setup_isa(vcpu);
142 
143 	/* Setup vendor, arch, and implementation details */
144 	vcpu->arch.mvendorid = sbi_get_mvendorid();
145 	vcpu->arch.marchid = sbi_get_marchid();
146 	vcpu->arch.mimpid = sbi_get_mimpid();
147 
148 	/* Setup VCPU hfence queue */
149 	spin_lock_init(&vcpu->arch.hfence_lock);
150 
151 	spin_lock_init(&vcpu->arch.reset_state.lock);
152 
153 	rc = kvm_riscv_vcpu_alloc_vector_context(vcpu);
154 	if (rc)
155 		return rc;
156 
157 	/* Setup VCPU timer */
158 	kvm_riscv_vcpu_timer_init(vcpu);
159 
160 	/* setup performance monitoring */
161 	kvm_riscv_vcpu_pmu_init(vcpu);
162 
163 	/* Setup VCPU AIA */
164 	kvm_riscv_vcpu_aia_init(vcpu);
165 
166 	/*
167 	 * Setup SBI extensions
168 	 * NOTE: This must be the last thing to be initialized.
169 	 */
170 	kvm_riscv_vcpu_sbi_init(vcpu);
171 
172 	/* Reset VCPU */
173 	kvm_riscv_reset_vcpu(vcpu, false);
174 
175 	return 0;
176 }
177 
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)178 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
179 {
180 	/**
181 	 * vcpu with id 0 is the designated boot cpu.
182 	 * Keep all vcpus with non-zero id in power-off state so that
183 	 * they can be brought up using SBI HSM extension.
184 	 */
185 	if (vcpu->vcpu_idx != 0)
186 		kvm_riscv_vcpu_power_off(vcpu);
187 }
188 
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)189 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
190 {
191 	kvm_riscv_vcpu_sbi_deinit(vcpu);
192 
193 	/* Cleanup VCPU AIA context */
194 	kvm_riscv_vcpu_aia_deinit(vcpu);
195 
196 	/* Cleanup VCPU timer */
197 	kvm_riscv_vcpu_timer_deinit(vcpu);
198 
199 	kvm_riscv_vcpu_pmu_deinit(vcpu);
200 
201 	/* Free unused pages pre-allocated for G-stage page table mappings */
202 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
203 
204 	/* Free vector context space for host and guest kernel */
205 	kvm_riscv_vcpu_free_vector_context(vcpu);
206 }
207 
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)208 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
209 {
210 	return kvm_riscv_vcpu_timer_pending(vcpu);
211 }
212 
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)213 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
214 {
215 	return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
216 		!kvm_riscv_vcpu_stopped(vcpu) && !vcpu->arch.pause);
217 }
218 
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)219 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
220 {
221 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
222 }
223 
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)224 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
225 {
226 	return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
227 }
228 
229 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_arch_vcpu_get_ip(struct kvm_vcpu * vcpu)230 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
231 {
232 	return vcpu->arch.guest_context.sepc;
233 }
234 #endif
235 
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)236 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
237 {
238 	return VM_FAULT_SIGBUS;
239 }
240 
kvm_arch_vcpu_async_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)241 long kvm_arch_vcpu_async_ioctl(struct file *filp,
242 			       unsigned int ioctl, unsigned long arg)
243 {
244 	struct kvm_vcpu *vcpu = filp->private_data;
245 	void __user *argp = (void __user *)arg;
246 
247 	if (ioctl == KVM_INTERRUPT) {
248 		struct kvm_interrupt irq;
249 
250 		if (copy_from_user(&irq, argp, sizeof(irq)))
251 			return -EFAULT;
252 
253 		if (irq.irq == KVM_INTERRUPT_SET)
254 			return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
255 		else
256 			return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
257 	}
258 
259 	return -ENOIOCTLCMD;
260 }
261 
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)262 long kvm_arch_vcpu_ioctl(struct file *filp,
263 			 unsigned int ioctl, unsigned long arg)
264 {
265 	struct kvm_vcpu *vcpu = filp->private_data;
266 	void __user *argp = (void __user *)arg;
267 	long r = -EINVAL;
268 
269 	switch (ioctl) {
270 	case KVM_SET_ONE_REG:
271 	case KVM_GET_ONE_REG: {
272 		struct kvm_one_reg reg;
273 
274 		r = -EFAULT;
275 		if (copy_from_user(&reg, argp, sizeof(reg)))
276 			break;
277 
278 		if (ioctl == KVM_SET_ONE_REG)
279 			r = kvm_riscv_vcpu_set_reg(vcpu, &reg);
280 		else
281 			r = kvm_riscv_vcpu_get_reg(vcpu, &reg);
282 		break;
283 	}
284 	case KVM_GET_REG_LIST: {
285 		struct kvm_reg_list __user *user_list = argp;
286 		struct kvm_reg_list reg_list;
287 		unsigned int n;
288 
289 		r = -EFAULT;
290 		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
291 			break;
292 		n = reg_list.n;
293 		reg_list.n = kvm_riscv_vcpu_num_regs(vcpu);
294 		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
295 			break;
296 		r = -E2BIG;
297 		if (n < reg_list.n)
298 			break;
299 		r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg);
300 		break;
301 	}
302 	default:
303 		break;
304 	}
305 
306 	return r;
307 }
308 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)309 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
310 				  struct kvm_sregs *sregs)
311 {
312 	return -EINVAL;
313 }
314 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)315 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
316 				  struct kvm_sregs *sregs)
317 {
318 	return -EINVAL;
319 }
320 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)321 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
322 {
323 	return -EINVAL;
324 }
325 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)326 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
327 {
328 	return -EINVAL;
329 }
330 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)331 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
332 				  struct kvm_translation *tr)
333 {
334 	return -EINVAL;
335 }
336 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)337 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
338 {
339 	return -EINVAL;
340 }
341 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)342 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
343 {
344 	return -EINVAL;
345 }
346 
kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu * vcpu)347 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
348 {
349 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
350 	unsigned long mask, val;
351 
352 	if (READ_ONCE(vcpu->arch.irqs_pending_mask[0])) {
353 		mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[0], 0);
354 		val = READ_ONCE(vcpu->arch.irqs_pending[0]) & mask;
355 
356 		csr->hvip &= ~mask;
357 		csr->hvip |= val;
358 	}
359 
360 	/* Flush AIA high interrupts */
361 	kvm_riscv_vcpu_aia_flush_interrupts(vcpu);
362 }
363 
kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu * vcpu)364 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
365 {
366 	unsigned long hvip;
367 	struct kvm_vcpu_arch *v = &vcpu->arch;
368 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
369 
370 	/* Read current HVIP and VSIE CSRs */
371 	csr->vsie = ncsr_read(CSR_VSIE);
372 
373 	/* Sync-up HVIP.VSSIP bit changes does by Guest */
374 	hvip = ncsr_read(CSR_HVIP);
375 	if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
376 		if (hvip & (1UL << IRQ_VS_SOFT)) {
377 			if (!test_and_set_bit(IRQ_VS_SOFT,
378 					      v->irqs_pending_mask))
379 				set_bit(IRQ_VS_SOFT, v->irqs_pending);
380 		} else {
381 			if (!test_and_set_bit(IRQ_VS_SOFT,
382 					      v->irqs_pending_mask))
383 				clear_bit(IRQ_VS_SOFT, v->irqs_pending);
384 		}
385 	}
386 
387 	/* Sync up the HVIP.LCOFIP bit changes (only clear) by the guest */
388 	if ((csr->hvip ^ hvip) & (1UL << IRQ_PMU_OVF)) {
389 		if (!(hvip & (1UL << IRQ_PMU_OVF)) &&
390 		    !test_and_set_bit(IRQ_PMU_OVF, v->irqs_pending_mask))
391 			clear_bit(IRQ_PMU_OVF, v->irqs_pending);
392 	}
393 
394 	/* Sync-up AIA high interrupts */
395 	kvm_riscv_vcpu_aia_sync_interrupts(vcpu);
396 
397 	/* Sync-up timer CSRs */
398 	kvm_riscv_vcpu_timer_sync(vcpu);
399 }
400 
kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu * vcpu,unsigned int irq)401 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
402 {
403 	/*
404 	 * We only allow VS-mode software, timer, and external
405 	 * interrupts when irq is one of the local interrupts
406 	 * defined by RISC-V privilege specification.
407 	 */
408 	if (irq < IRQ_LOCAL_MAX &&
409 	    irq != IRQ_VS_SOFT &&
410 	    irq != IRQ_VS_TIMER &&
411 	    irq != IRQ_VS_EXT &&
412 	    irq != IRQ_PMU_OVF)
413 		return -EINVAL;
414 
415 	set_bit(irq, vcpu->arch.irqs_pending);
416 	smp_mb__before_atomic();
417 	set_bit(irq, vcpu->arch.irqs_pending_mask);
418 
419 	kvm_vcpu_kick(vcpu);
420 
421 	return 0;
422 }
423 
kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu * vcpu,unsigned int irq)424 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
425 {
426 	/*
427 	 * We only allow VS-mode software, timer, counter overflow and external
428 	 * interrupts when irq is one of the local interrupts
429 	 * defined by RISC-V privilege specification.
430 	 */
431 	if (irq < IRQ_LOCAL_MAX &&
432 	    irq != IRQ_VS_SOFT &&
433 	    irq != IRQ_VS_TIMER &&
434 	    irq != IRQ_VS_EXT &&
435 	    irq != IRQ_PMU_OVF)
436 		return -EINVAL;
437 
438 	clear_bit(irq, vcpu->arch.irqs_pending);
439 	smp_mb__before_atomic();
440 	set_bit(irq, vcpu->arch.irqs_pending_mask);
441 
442 	return 0;
443 }
444 
kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu * vcpu,u64 mask)445 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
446 {
447 	unsigned long ie;
448 
449 	ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
450 		<< VSIP_TO_HVIP_SHIFT) & (unsigned long)mask;
451 	ie |= vcpu->arch.guest_csr.vsie & ~IRQ_LOCAL_MASK &
452 		(unsigned long)mask;
453 	if (READ_ONCE(vcpu->arch.irqs_pending[0]) & ie)
454 		return true;
455 
456 	/* Check AIA high interrupts */
457 	return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask);
458 }
459 
__kvm_riscv_vcpu_power_off(struct kvm_vcpu * vcpu)460 void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
461 {
462 	WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
463 	kvm_make_request(KVM_REQ_SLEEP, vcpu);
464 	kvm_vcpu_kick(vcpu);
465 }
466 
kvm_riscv_vcpu_power_off(struct kvm_vcpu * vcpu)467 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
468 {
469 	spin_lock(&vcpu->arch.mp_state_lock);
470 	__kvm_riscv_vcpu_power_off(vcpu);
471 	spin_unlock(&vcpu->arch.mp_state_lock);
472 }
473 
__kvm_riscv_vcpu_power_on(struct kvm_vcpu * vcpu)474 void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
475 {
476 	WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
477 	kvm_vcpu_wake_up(vcpu);
478 }
479 
kvm_riscv_vcpu_power_on(struct kvm_vcpu * vcpu)480 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
481 {
482 	spin_lock(&vcpu->arch.mp_state_lock);
483 	__kvm_riscv_vcpu_power_on(vcpu);
484 	spin_unlock(&vcpu->arch.mp_state_lock);
485 }
486 
kvm_riscv_vcpu_stopped(struct kvm_vcpu * vcpu)487 bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu)
488 {
489 	return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
490 }
491 
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)492 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
493 				    struct kvm_mp_state *mp_state)
494 {
495 	*mp_state = READ_ONCE(vcpu->arch.mp_state);
496 
497 	return 0;
498 }
499 
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)500 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
501 				    struct kvm_mp_state *mp_state)
502 {
503 	int ret = 0;
504 
505 	spin_lock(&vcpu->arch.mp_state_lock);
506 
507 	switch (mp_state->mp_state) {
508 	case KVM_MP_STATE_RUNNABLE:
509 		WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
510 		break;
511 	case KVM_MP_STATE_STOPPED:
512 		__kvm_riscv_vcpu_power_off(vcpu);
513 		break;
514 	case KVM_MP_STATE_INIT_RECEIVED:
515 		if (vcpu->kvm->arch.mp_state_reset)
516 			kvm_riscv_reset_vcpu(vcpu, false);
517 		else
518 			ret = -EINVAL;
519 		break;
520 	default:
521 		ret = -EINVAL;
522 	}
523 
524 	spin_unlock(&vcpu->arch.mp_state_lock);
525 
526 	return ret;
527 }
528 
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)529 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
530 					struct kvm_guest_debug *dbg)
531 {
532 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
533 		vcpu->guest_debug = dbg->control;
534 		vcpu->arch.cfg.hedeleg &= ~BIT(EXC_BREAKPOINT);
535 	} else {
536 		vcpu->guest_debug = 0;
537 		vcpu->arch.cfg.hedeleg |= BIT(EXC_BREAKPOINT);
538 	}
539 
540 	return 0;
541 }
542 
kvm_riscv_vcpu_setup_config(struct kvm_vcpu * vcpu)543 static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu)
544 {
545 	const unsigned long *isa = vcpu->arch.isa;
546 	struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
547 
548 	if (riscv_isa_extension_available(isa, SVPBMT))
549 		cfg->henvcfg |= ENVCFG_PBMTE;
550 
551 	if (riscv_isa_extension_available(isa, SSTC))
552 		cfg->henvcfg |= ENVCFG_STCE;
553 
554 	if (riscv_isa_extension_available(isa, ZICBOM))
555 		cfg->henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE);
556 
557 	if (riscv_isa_extension_available(isa, ZICBOZ))
558 		cfg->henvcfg |= ENVCFG_CBZE;
559 
560 	if (riscv_isa_extension_available(isa, SVADU) &&
561 	    !riscv_isa_extension_available(isa, SVADE))
562 		cfg->henvcfg |= ENVCFG_ADUE;
563 
564 	if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
565 		cfg->hstateen0 |= SMSTATEEN0_HSENVCFG;
566 		if (riscv_isa_extension_available(isa, SSAIA))
567 			cfg->hstateen0 |= SMSTATEEN0_AIA_IMSIC |
568 					  SMSTATEEN0_AIA |
569 					  SMSTATEEN0_AIA_ISEL;
570 		if (riscv_isa_extension_available(isa, SMSTATEEN))
571 			cfg->hstateen0 |= SMSTATEEN0_SSTATEEN0;
572 	}
573 
574 	if (vcpu->guest_debug)
575 		cfg->hedeleg &= ~BIT(EXC_BREAKPOINT);
576 }
577 
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)578 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
579 {
580 	void *nsh;
581 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
582 	struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
583 
584 	if (kvm_riscv_nacl_sync_csr_available()) {
585 		nsh = nacl_shmem();
586 		nacl_csr_write(nsh, CSR_VSSTATUS, csr->vsstatus);
587 		nacl_csr_write(nsh, CSR_VSIE, csr->vsie);
588 		nacl_csr_write(nsh, CSR_VSTVEC, csr->vstvec);
589 		nacl_csr_write(nsh, CSR_VSSCRATCH, csr->vsscratch);
590 		nacl_csr_write(nsh, CSR_VSEPC, csr->vsepc);
591 		nacl_csr_write(nsh, CSR_VSCAUSE, csr->vscause);
592 		nacl_csr_write(nsh, CSR_VSTVAL, csr->vstval);
593 		nacl_csr_write(nsh, CSR_HEDELEG, cfg->hedeleg);
594 		nacl_csr_write(nsh, CSR_HVIP, csr->hvip);
595 		nacl_csr_write(nsh, CSR_VSATP, csr->vsatp);
596 		nacl_csr_write(nsh, CSR_HENVCFG, cfg->henvcfg);
597 		if (IS_ENABLED(CONFIG_32BIT))
598 			nacl_csr_write(nsh, CSR_HENVCFGH, cfg->henvcfg >> 32);
599 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
600 			nacl_csr_write(nsh, CSR_HSTATEEN0, cfg->hstateen0);
601 			if (IS_ENABLED(CONFIG_32BIT))
602 				nacl_csr_write(nsh, CSR_HSTATEEN0H, cfg->hstateen0 >> 32);
603 		}
604 	} else {
605 		csr_write(CSR_VSSTATUS, csr->vsstatus);
606 		csr_write(CSR_VSIE, csr->vsie);
607 		csr_write(CSR_VSTVEC, csr->vstvec);
608 		csr_write(CSR_VSSCRATCH, csr->vsscratch);
609 		csr_write(CSR_VSEPC, csr->vsepc);
610 		csr_write(CSR_VSCAUSE, csr->vscause);
611 		csr_write(CSR_VSTVAL, csr->vstval);
612 		csr_write(CSR_HEDELEG, cfg->hedeleg);
613 		csr_write(CSR_HVIP, csr->hvip);
614 		csr_write(CSR_VSATP, csr->vsatp);
615 		csr_write(CSR_HENVCFG, cfg->henvcfg);
616 		if (IS_ENABLED(CONFIG_32BIT))
617 			csr_write(CSR_HENVCFGH, cfg->henvcfg >> 32);
618 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
619 			csr_write(CSR_HSTATEEN0, cfg->hstateen0);
620 			if (IS_ENABLED(CONFIG_32BIT))
621 				csr_write(CSR_HSTATEEN0H, cfg->hstateen0 >> 32);
622 		}
623 	}
624 
625 	kvm_riscv_mmu_update_hgatp(vcpu);
626 
627 	kvm_riscv_vcpu_timer_restore(vcpu);
628 
629 	kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
630 	kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
631 					vcpu->arch.isa);
632 	kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context);
633 	kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context,
634 					    vcpu->arch.isa);
635 
636 	kvm_riscv_vcpu_aia_load(vcpu, cpu);
637 
638 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
639 
640 	vcpu->cpu = cpu;
641 }
642 
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)643 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
644 {
645 	void *nsh;
646 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
647 
648 	vcpu->cpu = -1;
649 
650 	kvm_riscv_vcpu_aia_put(vcpu);
651 
652 	kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
653 				     vcpu->arch.isa);
654 	kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
655 
656 	kvm_riscv_vcpu_timer_save(vcpu);
657 	kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context,
658 					 vcpu->arch.isa);
659 	kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context);
660 
661 	if (kvm_riscv_nacl_available()) {
662 		nsh = nacl_shmem();
663 		csr->vsstatus = nacl_csr_read(nsh, CSR_VSSTATUS);
664 		csr->vsie = nacl_csr_read(nsh, CSR_VSIE);
665 		csr->vstvec = nacl_csr_read(nsh, CSR_VSTVEC);
666 		csr->vsscratch = nacl_csr_read(nsh, CSR_VSSCRATCH);
667 		csr->vsepc = nacl_csr_read(nsh, CSR_VSEPC);
668 		csr->vscause = nacl_csr_read(nsh, CSR_VSCAUSE);
669 		csr->vstval = nacl_csr_read(nsh, CSR_VSTVAL);
670 		csr->hvip = nacl_csr_read(nsh, CSR_HVIP);
671 		csr->vsatp = nacl_csr_read(nsh, CSR_VSATP);
672 	} else {
673 		csr->vsstatus = csr_read(CSR_VSSTATUS);
674 		csr->vsie = csr_read(CSR_VSIE);
675 		csr->vstvec = csr_read(CSR_VSTVEC);
676 		csr->vsscratch = csr_read(CSR_VSSCRATCH);
677 		csr->vsepc = csr_read(CSR_VSEPC);
678 		csr->vscause = csr_read(CSR_VSCAUSE);
679 		csr->vstval = csr_read(CSR_VSTVAL);
680 		csr->hvip = csr_read(CSR_HVIP);
681 		csr->vsatp = csr_read(CSR_VSATP);
682 	}
683 }
684 
685 /**
686  * kvm_riscv_check_vcpu_requests - check and handle pending vCPU requests
687  * @vcpu:	the VCPU pointer
688  *
689  * Return: 1 if we should enter the guest
690  *	    0 if we should exit to userspace
691  */
kvm_riscv_check_vcpu_requests(struct kvm_vcpu * vcpu)692 static int kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
693 {
694 	struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
695 
696 	if (kvm_request_pending(vcpu)) {
697 		if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
698 			kvm_vcpu_srcu_read_unlock(vcpu);
699 			rcuwait_wait_event(wait,
700 				(!kvm_riscv_vcpu_stopped(vcpu)) && (!vcpu->arch.pause),
701 				TASK_INTERRUPTIBLE);
702 			kvm_vcpu_srcu_read_lock(vcpu);
703 
704 			if (kvm_riscv_vcpu_stopped(vcpu) || vcpu->arch.pause) {
705 				/*
706 				 * Awaken to handle a signal, request to
707 				 * sleep again later.
708 				 */
709 				kvm_make_request(KVM_REQ_SLEEP, vcpu);
710 			}
711 		}
712 
713 		if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
714 			kvm_riscv_reset_vcpu(vcpu, true);
715 
716 		if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
717 			kvm_riscv_mmu_update_hgatp(vcpu);
718 
719 		if (kvm_check_request(KVM_REQ_FENCE_I, vcpu))
720 			kvm_riscv_fence_i_process(vcpu);
721 
722 		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
723 			kvm_riscv_tlb_flush_process(vcpu);
724 
725 		if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu))
726 			kvm_riscv_hfence_vvma_all_process(vcpu);
727 
728 		if (kvm_check_request(KVM_REQ_HFENCE, vcpu))
729 			kvm_riscv_hfence_process(vcpu);
730 
731 		if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
732 			kvm_riscv_vcpu_record_steal_time(vcpu);
733 
734 		if (kvm_dirty_ring_check_request(vcpu))
735 			return 0;
736 	}
737 
738 	return 1;
739 }
740 
kvm_riscv_update_hvip(struct kvm_vcpu * vcpu)741 static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
742 {
743 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
744 
745 	ncsr_write(CSR_HVIP, csr->hvip);
746 	kvm_riscv_vcpu_aia_update_hvip(vcpu);
747 }
748 
kvm_riscv_vcpu_swap_in_guest_state(struct kvm_vcpu * vcpu)749 static __always_inline void kvm_riscv_vcpu_swap_in_guest_state(struct kvm_vcpu *vcpu)
750 {
751 	struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr;
752 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
753 	struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
754 
755 	vcpu->arch.host_scounteren = csr_swap(CSR_SCOUNTEREN, csr->scounteren);
756 	vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg);
757 	if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) &&
758 	    (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0))
759 		vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0,
760 						     smcsr->sstateen0);
761 }
762 
kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu * vcpu)763 static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *vcpu)
764 {
765 	struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr;
766 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
767 	struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
768 
769 	csr->scounteren = csr_swap(CSR_SCOUNTEREN, vcpu->arch.host_scounteren);
770 	csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg);
771 	if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) &&
772 	    (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0))
773 		smcsr->sstateen0 = csr_swap(CSR_SSTATEEN0,
774 					    vcpu->arch.host_sstateen0);
775 }
776 
777 /*
778  * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
779  * the vCPU is running.
780  *
781  * This must be noinstr as instrumentation may make use of RCU, and this is not
782  * safe during the EQS.
783  */
kvm_riscv_vcpu_enter_exit(struct kvm_vcpu * vcpu,struct kvm_cpu_trap * trap)784 static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu,
785 					      struct kvm_cpu_trap *trap)
786 {
787 	void *nsh;
788 	struct kvm_cpu_context *gcntx = &vcpu->arch.guest_context;
789 	struct kvm_cpu_context *hcntx = &vcpu->arch.host_context;
790 
791 	/*
792 	 * We save trap CSRs (such as SEPC, SCAUSE, STVAL, HTVAL, and
793 	 * HTINST) here because we do local_irq_enable() after this
794 	 * function in kvm_arch_vcpu_ioctl_run() which can result in
795 	 * an interrupt immediately after local_irq_enable() and can
796 	 * potentially change trap CSRs.
797 	 */
798 
799 	kvm_riscv_vcpu_swap_in_guest_state(vcpu);
800 	guest_state_enter_irqoff();
801 
802 	if (kvm_riscv_nacl_sync_sret_available()) {
803 		nsh = nacl_shmem();
804 
805 		if (kvm_riscv_nacl_autoswap_csr_available()) {
806 			hcntx->hstatus =
807 				nacl_csr_read(nsh, CSR_HSTATUS);
808 			nacl_scratch_write_long(nsh,
809 						SBI_NACL_SHMEM_AUTOSWAP_OFFSET +
810 						SBI_NACL_SHMEM_AUTOSWAP_HSTATUS,
811 						gcntx->hstatus);
812 			nacl_scratch_write_long(nsh,
813 						SBI_NACL_SHMEM_AUTOSWAP_OFFSET,
814 						SBI_NACL_SHMEM_AUTOSWAP_FLAG_HSTATUS);
815 		} else if (kvm_riscv_nacl_sync_csr_available()) {
816 			hcntx->hstatus = nacl_csr_swap(nsh,
817 						       CSR_HSTATUS, gcntx->hstatus);
818 		} else {
819 			hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus);
820 		}
821 
822 		nacl_scratch_write_longs(nsh,
823 					 SBI_NACL_SHMEM_SRET_OFFSET +
824 					 SBI_NACL_SHMEM_SRET_X(1),
825 					 &gcntx->ra,
826 					 SBI_NACL_SHMEM_SRET_X_LAST);
827 
828 		__kvm_riscv_nacl_switch_to(&vcpu->arch, SBI_EXT_NACL,
829 					   SBI_EXT_NACL_SYNC_SRET);
830 
831 		if (kvm_riscv_nacl_autoswap_csr_available()) {
832 			nacl_scratch_write_long(nsh,
833 						SBI_NACL_SHMEM_AUTOSWAP_OFFSET,
834 						0);
835 			gcntx->hstatus = nacl_scratch_read_long(nsh,
836 								SBI_NACL_SHMEM_AUTOSWAP_OFFSET +
837 								SBI_NACL_SHMEM_AUTOSWAP_HSTATUS);
838 		} else {
839 			gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus);
840 		}
841 
842 		trap->htval = nacl_csr_read(nsh, CSR_HTVAL);
843 		trap->htinst = nacl_csr_read(nsh, CSR_HTINST);
844 	} else {
845 		hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus);
846 
847 		__kvm_riscv_switch_to(&vcpu->arch);
848 
849 		gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus);
850 
851 		trap->htval = csr_read(CSR_HTVAL);
852 		trap->htinst = csr_read(CSR_HTINST);
853 	}
854 
855 	trap->sepc = gcntx->sepc;
856 	trap->scause = csr_read(CSR_SCAUSE);
857 	trap->stval = csr_read(CSR_STVAL);
858 
859 	vcpu->arch.last_exit_cpu = vcpu->cpu;
860 	guest_state_exit_irqoff();
861 	kvm_riscv_vcpu_swap_in_host_state(vcpu);
862 }
863 
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)864 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
865 {
866 	int ret;
867 	struct kvm_cpu_trap trap;
868 	struct kvm_run *run = vcpu->run;
869 
870 	if (!vcpu->arch.ran_atleast_once)
871 		kvm_riscv_vcpu_setup_config(vcpu);
872 
873 	/* Mark this VCPU ran at least once */
874 	vcpu->arch.ran_atleast_once = true;
875 
876 	kvm_vcpu_srcu_read_lock(vcpu);
877 
878 	switch (run->exit_reason) {
879 	case KVM_EXIT_MMIO:
880 		/* Process MMIO value returned from user-space */
881 		ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
882 		break;
883 	case KVM_EXIT_RISCV_SBI:
884 		/* Process SBI value returned from user-space */
885 		ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
886 		break;
887 	case KVM_EXIT_RISCV_CSR:
888 		/* Process CSR value returned from user-space */
889 		ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run);
890 		break;
891 	default:
892 		ret = 0;
893 		break;
894 	}
895 	if (ret) {
896 		kvm_vcpu_srcu_read_unlock(vcpu);
897 		return ret;
898 	}
899 
900 	if (!vcpu->wants_to_run) {
901 		kvm_vcpu_srcu_read_unlock(vcpu);
902 		return -EINTR;
903 	}
904 
905 	vcpu_load(vcpu);
906 
907 	kvm_sigset_activate(vcpu);
908 
909 	ret = 1;
910 	run->exit_reason = KVM_EXIT_UNKNOWN;
911 	while (ret > 0) {
912 		/* Check conditions before entering the guest */
913 		ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
914 		if (ret)
915 			continue;
916 		ret = 1;
917 
918 		kvm_riscv_gstage_vmid_update(vcpu);
919 
920 		ret = kvm_riscv_check_vcpu_requests(vcpu);
921 		if (ret <= 0)
922 			continue;
923 
924 		preempt_disable();
925 
926 		/* Update AIA HW state before entering guest */
927 		ret = kvm_riscv_vcpu_aia_update(vcpu);
928 		if (ret <= 0) {
929 			preempt_enable();
930 			continue;
931 		}
932 
933 		local_irq_disable();
934 
935 		/*
936 		 * Ensure we set mode to IN_GUEST_MODE after we disable
937 		 * interrupts and before the final VCPU requests check.
938 		 * See the comment in kvm_vcpu_exiting_guest_mode() and
939 		 * Documentation/virt/kvm/vcpu-requests.rst
940 		 */
941 		vcpu->mode = IN_GUEST_MODE;
942 
943 		kvm_vcpu_srcu_read_unlock(vcpu);
944 		smp_mb__after_srcu_read_unlock();
945 
946 		/*
947 		 * We might have got VCPU interrupts updated asynchronously
948 		 * so update it in HW.
949 		 */
950 		kvm_riscv_vcpu_flush_interrupts(vcpu);
951 
952 		/* Update HVIP CSR for current CPU */
953 		kvm_riscv_update_hvip(vcpu);
954 
955 		if (kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
956 		    kvm_request_pending(vcpu) ||
957 		    xfer_to_guest_mode_work_pending()) {
958 			vcpu->mode = OUTSIDE_GUEST_MODE;
959 			local_irq_enable();
960 			preempt_enable();
961 			kvm_vcpu_srcu_read_lock(vcpu);
962 			continue;
963 		}
964 
965 		/*
966 		 * Sanitize VMID mappings cached (TLB) on current CPU
967 		 *
968 		 * Note: This should be done after G-stage VMID has been
969 		 * updated using kvm_riscv_gstage_vmid_ver_changed()
970 		 */
971 		kvm_riscv_gstage_vmid_sanitize(vcpu);
972 
973 		trace_kvm_entry(vcpu);
974 
975 		guest_timing_enter_irqoff();
976 
977 		kvm_riscv_vcpu_enter_exit(vcpu, &trap);
978 
979 		vcpu->mode = OUTSIDE_GUEST_MODE;
980 		vcpu->stat.exits++;
981 
982 		/* Syncup interrupts state with HW */
983 		kvm_riscv_vcpu_sync_interrupts(vcpu);
984 
985 		/*
986 		 * We must ensure that any pending interrupts are taken before
987 		 * we exit guest timing so that timer ticks are accounted as
988 		 * guest time. Transiently unmask interrupts so that any
989 		 * pending interrupts are taken.
990 		 *
991 		 * There's no barrier which ensures that pending interrupts are
992 		 * recognised, so we just hope that the CPU takes any pending
993 		 * interrupts between the enable and disable.
994 		 */
995 		local_irq_enable();
996 		local_irq_disable();
997 
998 		guest_timing_exit_irqoff();
999 
1000 		local_irq_enable();
1001 
1002 		trace_kvm_exit(&trap);
1003 
1004 		preempt_enable();
1005 
1006 		kvm_vcpu_srcu_read_lock(vcpu);
1007 
1008 		ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
1009 	}
1010 
1011 	kvm_sigset_deactivate(vcpu);
1012 
1013 	vcpu_put(vcpu);
1014 
1015 	kvm_vcpu_srcu_read_unlock(vcpu);
1016 
1017 	return ret;
1018 }
1019