xref: /linux/tools/testing/selftests/kvm/lib/riscv/processor.c (revision 014dfb7b9bf3ff49261b47fbe56b42fc8ed06fc5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * RISC-V code
4  *
5  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
6  */
7 
8 #include <linux/compiler.h>
9 #include <assert.h>
10 
11 #include "guest_modes.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include "ucall_common.h"
15 
16 #define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN	0xac0000
17 
18 static gva_t exception_handlers;
19 
20 bool __vcpu_has_ext(struct kvm_vcpu *vcpu, u64 ext)
21 {
22 	unsigned long value = 0;
23 	int ret;
24 
25 	ret = __vcpu_get_reg(vcpu, ext, &value);
26 
27 	return !ret && !!value;
28 }
29 
30 static u64 pte_addr(struct kvm_vm *vm, u64 entry)
31 {
32 	return ((entry & PGTBL_PTE_ADDR_MASK) >> PGTBL_PTE_ADDR_SHIFT) <<
33 		PGTBL_PAGE_SIZE_SHIFT;
34 }
35 
36 static u64 ptrs_per_pte(struct kvm_vm *vm)
37 {
38 	return PGTBL_PAGE_SIZE / sizeof(u64);
39 }
40 
41 static u64 pte_index_mask[] = {
42 	PGTBL_L0_INDEX_MASK,
43 	PGTBL_L1_INDEX_MASK,
44 	PGTBL_L2_INDEX_MASK,
45 	PGTBL_L3_INDEX_MASK,
46 };
47 
48 static u32 pte_index_shift[] = {
49 	PGTBL_L0_INDEX_SHIFT,
50 	PGTBL_L1_INDEX_SHIFT,
51 	PGTBL_L2_INDEX_SHIFT,
52 	PGTBL_L3_INDEX_SHIFT,
53 };
54 
55 static u64 pte_index(struct kvm_vm *vm, gva_t gva, int level)
56 {
57 	TEST_ASSERT(level > -1,
58 		"Negative page table level (%d) not possible", level);
59 	TEST_ASSERT(level < vm->mmu.pgtable_levels,
60 		"Invalid page table level (%d)", level);
61 
62 	return (gva & pte_index_mask[level]) >> pte_index_shift[level];
63 }
64 
65 void virt_arch_pgd_alloc(struct kvm_vm *vm)
66 {
67 	size_t nr_pages = vm_page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size;
68 
69 	if (vm->mmu.pgd_created)
70 		return;
71 
72 	vm->mmu.pgd = vm_phy_pages_alloc(vm, nr_pages,
73 					 KVM_GUEST_PAGE_TABLE_MIN_PADDR,
74 					 vm->memslots[MEM_REGION_PT]);
75 	vm->mmu.pgd_created = true;
76 }
77 
78 void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr)
79 {
80 	u64 *ptep, next_ppn;
81 	int level = vm->mmu.pgtable_levels - 1;
82 
83 	TEST_ASSERT((gva % vm->page_size) == 0,
84 		"Virtual address not on page boundary,\n"
85 		"  gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size);
86 	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
87 		    "Invalid virtual address, gva: 0x%lx", gva);
88 	TEST_ASSERT((paddr % vm->page_size) == 0,
89 		"Physical address not on page boundary,\n"
90 		"  paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
91 	TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
92 		"Physical address beyond maximum supported,\n"
93 		"  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
94 		paddr, vm->max_gfn, vm->page_size);
95 
96 	ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, gva, level) * 8;
97 	if (!*ptep) {
98 		next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT;
99 		*ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) |
100 			PGTBL_PTE_VALID_MASK;
101 	}
102 	level--;
103 
104 	while (level > -1) {
105 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) +
106 		       pte_index(vm, gva, level) * 8;
107 		if (!*ptep && level > 0) {
108 			next_ppn = vm_alloc_page_table(vm) >>
109 				   PGTBL_PAGE_SIZE_SHIFT;
110 			*ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) |
111 				PGTBL_PTE_VALID_MASK;
112 		}
113 		level--;
114 	}
115 
116 	paddr = paddr >> PGTBL_PAGE_SIZE_SHIFT;
117 	*ptep = (paddr << PGTBL_PTE_ADDR_SHIFT) |
118 		PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK;
119 }
120 
121 gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
122 {
123 	u64 *ptep;
124 	int level = vm->mmu.pgtable_levels - 1;
125 
126 	if (!vm->mmu.pgd_created)
127 		goto unmapped_gva;
128 
129 	ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, gva, level) * 8;
130 	if (!ptep)
131 		goto unmapped_gva;
132 	level--;
133 
134 	while (level > -1) {
135 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) +
136 		       pte_index(vm, gva, level) * 8;
137 		if (!ptep)
138 			goto unmapped_gva;
139 		level--;
140 	}
141 
142 	return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
143 
144 unmapped_gva:
145 	TEST_FAIL("No mapping for vm virtual address gva: 0x%lx level: %d",
146 		  gva, level);
147 	exit(1);
148 }
149 
150 static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent,
151 		     u64 page, int level)
152 {
153 #ifdef DEBUG
154 	static const char *const type[] = { "pte", "pmd", "pud", "p4d"};
155 	u64 pte, *ptep;
156 
157 	if (level < 0)
158 		return;
159 
160 	for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
161 		ptep = addr_gpa2hva(vm, pte);
162 		if (!*ptep)
163 			continue;
164 		fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "",
165 			type[level], pte, *ptep, ptep);
166 		pte_dump(stream, vm, indent + 1,
167 			 pte_addr(vm, *ptep), level - 1);
168 	}
169 #endif
170 }
171 
172 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
173 {
174 	struct kvm_mmu *mmu = &vm->mmu;
175 	int level = mmu->pgtable_levels - 1;
176 	u64 pgd, *ptep;
177 
178 	if (!mmu->pgd_created)
179 		return;
180 
181 	for (pgd = mmu->pgd; pgd < mmu->pgd + ptrs_per_pte(vm) * 8; pgd += 8) {
182 		ptep = addr_gpa2hva(vm, pgd);
183 		if (!*ptep)
184 			continue;
185 		fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "",
186 			pgd, *ptep, ptep);
187 		pte_dump(stream, vm, indent + 1,
188 			 pte_addr(vm, *ptep), level - 1);
189 	}
190 }
191 
192 void riscv_vcpu_mmu_setup(struct kvm_vcpu *vcpu)
193 {
194 	struct kvm_vm *vm = vcpu->vm;
195 	unsigned long satp;
196 	unsigned long satp_mode;
197 	unsigned long max_satp_mode;
198 
199 	/*
200 	 * The RISC-V Sv48 MMU mode supports 56-bit physical address
201 	 * for 48-bit virtual address with 4KB last level page size.
202 	 */
203 	switch (vm->mode) {
204 	case VM_MODE_P56V57_4K:
205 	case VM_MODE_P50V57_4K:
206 	case VM_MODE_P41V57_4K:
207 		satp_mode = SATP_MODE_57;
208 		break;
209 	case VM_MODE_P56V48_4K:
210 	case VM_MODE_P50V48_4K:
211 	case VM_MODE_P41V48_4K:
212 		satp_mode = SATP_MODE_48;
213 		break;
214 	case VM_MODE_P56V39_4K:
215 	case VM_MODE_P50V39_4K:
216 	case VM_MODE_P41V39_4K:
217 		satp_mode = SATP_MODE_39;
218 		break;
219 	default:
220 		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
221 	}
222 
223 	max_satp_mode = vcpu_get_reg(vcpu, RISCV_CONFIG_REG(satp_mode));
224 
225 	if ((satp_mode >> SATP_MODE_SHIFT) > max_satp_mode)
226 		TEST_FAIL("Unable to set satp mode 0x%lx, max mode 0x%lx\n",
227 			  satp_mode >> SATP_MODE_SHIFT, max_satp_mode);
228 
229 	satp = (vm->mmu.pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN;
230 	satp |= satp_mode;
231 
232 	vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(satp), satp);
233 }
234 
235 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
236 {
237 	struct kvm_riscv_core core;
238 
239 	core.mode = vcpu_get_reg(vcpu, RISCV_CORE_REG(mode));
240 	core.regs.pc = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc));
241 	core.regs.ra = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.ra));
242 	core.regs.sp = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.sp));
243 	core.regs.gp = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.gp));
244 	core.regs.tp = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.tp));
245 	core.regs.t0 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t0));
246 	core.regs.t1 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t1));
247 	core.regs.t2 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t2));
248 	core.regs.s0 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s0));
249 	core.regs.s1 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s1));
250 	core.regs.a0 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a0));
251 	core.regs.a1 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a1));
252 	core.regs.a2 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a2));
253 	core.regs.a3 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a3));
254 	core.regs.a4 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a4));
255 	core.regs.a5 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a5));
256 	core.regs.a6 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a6));
257 	core.regs.a7 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a7));
258 	core.regs.s2 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s2));
259 	core.regs.s3 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s3));
260 	core.regs.s4 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s4));
261 	core.regs.s5 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s5));
262 	core.regs.s6 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s6));
263 	core.regs.s7 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s7));
264 	core.regs.s8 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s8));
265 	core.regs.s9 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s9));
266 	core.regs.s10 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s10));
267 	core.regs.s11 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s11));
268 	core.regs.t3 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t3));
269 	core.regs.t4 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t4));
270 	core.regs.t5 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t5));
271 	core.regs.t6 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t6));
272 
273 	fprintf(stream,
274 		" MODE:  0x%lx\n", core.mode);
275 	fprintf(stream,
276 		" PC: 0x%016lx   RA: 0x%016lx SP: 0x%016lx GP: 0x%016lx\n",
277 		core.regs.pc, core.regs.ra, core.regs.sp, core.regs.gp);
278 	fprintf(stream,
279 		" TP: 0x%016lx   T0: 0x%016lx T1: 0x%016lx T2: 0x%016lx\n",
280 		core.regs.tp, core.regs.t0, core.regs.t1, core.regs.t2);
281 	fprintf(stream,
282 		" S0: 0x%016lx   S1: 0x%016lx A0: 0x%016lx A1: 0x%016lx\n",
283 		core.regs.s0, core.regs.s1, core.regs.a0, core.regs.a1);
284 	fprintf(stream,
285 		" A2: 0x%016lx   A3: 0x%016lx A4: 0x%016lx A5: 0x%016lx\n",
286 		core.regs.a2, core.regs.a3, core.regs.a4, core.regs.a5);
287 	fprintf(stream,
288 		" A6: 0x%016lx   A7: 0x%016lx S2: 0x%016lx S3: 0x%016lx\n",
289 		core.regs.a6, core.regs.a7, core.regs.s2, core.regs.s3);
290 	fprintf(stream,
291 		" S4: 0x%016lx   S5: 0x%016lx S6: 0x%016lx S7: 0x%016lx\n",
292 		core.regs.s4, core.regs.s5, core.regs.s6, core.regs.s7);
293 	fprintf(stream,
294 		" S8: 0x%016lx   S9: 0x%016lx S10: 0x%016lx S11: 0x%016lx\n",
295 		core.regs.s8, core.regs.s9, core.regs.s10, core.regs.s11);
296 	fprintf(stream,
297 		" T3: 0x%016lx   T4: 0x%016lx T5: 0x%016lx T6: 0x%016lx\n",
298 		core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6);
299 }
300 
301 static void __aligned(16) guest_unexp_trap(void)
302 {
303 	sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT,
304 		  KVM_RISCV_SELFTESTS_SBI_UNEXP,
305 		  0, 0, 0, 0, 0, 0);
306 }
307 
308 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
309 {
310 	vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
311 }
312 
313 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
314 {
315 	int r;
316 	size_t stack_size;
317 	unsigned long stack_gva;
318 	unsigned long current_gp = 0;
319 	struct kvm_mp_state mps;
320 	struct kvm_vcpu *vcpu;
321 
322 	stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
323 					     vm->page_size;
324 	stack_gva = __vm_alloc(vm, stack_size,
325 			       DEFAULT_RISCV_GUEST_STACK_VADDR_MIN,
326 			       MEM_REGION_DATA);
327 
328 	vcpu = __vm_vcpu_add(vm, vcpu_id);
329 	riscv_vcpu_mmu_setup(vcpu);
330 
331 	/*
332 	 * With SBI HSM support in KVM RISC-V, all secondary VCPUs are
333 	 * powered-off by default so we ensure that all secondary VCPUs
334 	 * are powered-on using KVM_SET_MP_STATE ioctl().
335 	 */
336 	mps.mp_state = KVM_MP_STATE_RUNNABLE;
337 	r = __vcpu_ioctl(vcpu, KVM_SET_MP_STATE, &mps);
338 	TEST_ASSERT(!r, "IOCTL KVM_SET_MP_STATE failed (error %d)", r);
339 
340 	/* Setup global pointer of guest to be same as the host */
341 	asm volatile (
342 		"add %0, gp, zero" : "=r" (current_gp) : : "memory");
343 	vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.gp), current_gp);
344 
345 	/* Setup stack pointer and program counter of guest */
346 	vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_gva + stack_size);
347 
348 	/* Setup sscratch for guest_get_vcpuid() */
349 	vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(sscratch), vcpu_id);
350 
351 	/* Setup default exception vector of guest */
352 	vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(stvec), (unsigned long)guest_unexp_trap);
353 
354 	return vcpu;
355 }
356 
357 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
358 {
359 	va_list ap;
360 	u64 id = RISCV_CORE_REG(regs.a0);
361 	int i;
362 
363 	TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
364 		    "  num: %u", num);
365 
366 	va_start(ap, num);
367 
368 	for (i = 0; i < num; i++) {
369 		switch (i) {
370 		case 0:
371 			id = RISCV_CORE_REG(regs.a0);
372 			break;
373 		case 1:
374 			id = RISCV_CORE_REG(regs.a1);
375 			break;
376 		case 2:
377 			id = RISCV_CORE_REG(regs.a2);
378 			break;
379 		case 3:
380 			id = RISCV_CORE_REG(regs.a3);
381 			break;
382 		case 4:
383 			id = RISCV_CORE_REG(regs.a4);
384 			break;
385 		case 5:
386 			id = RISCV_CORE_REG(regs.a5);
387 			break;
388 		case 6:
389 			id = RISCV_CORE_REG(regs.a6);
390 			break;
391 		case 7:
392 			id = RISCV_CORE_REG(regs.a7);
393 			break;
394 		}
395 		vcpu_set_reg(vcpu, id, va_arg(ap, u64));
396 	}
397 
398 	va_end(ap);
399 }
400 
401 void kvm_exit_unexpected_exception(int vector, int ec)
402 {
403 	ucall(UCALL_UNHANDLED, 2, vector, ec);
404 }
405 
406 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
407 {
408 	struct ucall uc;
409 
410 	if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) {
411 		TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)",
412 			uc.args[0], uc.args[1]);
413 	}
414 }
415 
416 struct handlers {
417 	exception_handler_fn exception_handlers[NR_VECTORS][NR_EXCEPTIONS];
418 };
419 
420 void route_exception(struct pt_regs *regs)
421 {
422 	struct handlers *handlers = (struct handlers *)exception_handlers;
423 	int vector = 0, ec;
424 
425 	ec = regs->cause & ~CAUSE_IRQ_FLAG;
426 	if (ec >= NR_EXCEPTIONS)
427 		goto unexpected_exception;
428 
429 	/* Use the same handler for all the interrupts */
430 	if (regs->cause & CAUSE_IRQ_FLAG) {
431 		vector = 1;
432 		ec = 0;
433 	}
434 
435 	if (handlers && handlers->exception_handlers[vector][ec])
436 		return handlers->exception_handlers[vector][ec](regs);
437 
438 unexpected_exception:
439 	return kvm_exit_unexpected_exception(vector, ec);
440 }
441 
442 void vcpu_init_vector_tables(struct kvm_vcpu *vcpu)
443 {
444 	extern char exception_vectors;
445 
446 	vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(stvec), (unsigned long)&exception_vectors);
447 }
448 
449 void vm_init_vector_tables(struct kvm_vm *vm)
450 {
451 	vm->handlers = __vm_alloc(vm, sizeof(struct handlers), vm->page_size,
452 				  MEM_REGION_DATA);
453 
454 	*(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers;
455 }
456 
457 void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler)
458 {
459 	struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
460 
461 	assert(vector < NR_EXCEPTIONS);
462 	handlers->exception_handlers[0][vector] = handler;
463 }
464 
465 void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handler)
466 {
467 	struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
468 
469 	handlers->exception_handlers[1][0] = handler;
470 }
471 
472 u32 guest_get_vcpuid(void)
473 {
474 	return csr_read(CSR_SSCRATCH);
475 }
476 
477 struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
478 			unsigned long arg1, unsigned long arg2,
479 			unsigned long arg3, unsigned long arg4,
480 			unsigned long arg5)
481 {
482 	register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);
483 	register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);
484 	register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);
485 	register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);
486 	register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4);
487 	register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5);
488 	register uintptr_t a6 asm ("a6") = (uintptr_t)(fid);
489 	register uintptr_t a7 asm ("a7") = (uintptr_t)(ext);
490 	struct sbiret ret;
491 
492 	asm volatile (
493 		"ecall"
494 		: "+r" (a0), "+r" (a1)
495 		: "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
496 		: "memory");
497 	ret.error = a0;
498 	ret.value = a1;
499 
500 	return ret;
501 }
502 
503 bool guest_sbi_probe_extension(int extid, long *out_val)
504 {
505 	struct sbiret ret;
506 
507 	ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid,
508 			0, 0, 0, 0, 0);
509 
510 	__GUEST_ASSERT(!ret.error || ret.error == SBI_ERR_NOT_SUPPORTED,
511 		       "ret.error=%ld, ret.value=%ld\n", ret.error, ret.value);
512 
513 	if (ret.error == SBI_ERR_NOT_SUPPORTED)
514 		return false;
515 
516 	if (out_val)
517 		*out_val = ret.value;
518 
519 	return true;
520 }
521 
522 unsigned long get_host_sbi_spec_version(void)
523 {
524 	struct sbiret ret;
525 
526 	ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_GET_SPEC_VERSION, 0,
527 		       0, 0, 0, 0, 0);
528 
529 	GUEST_ASSERT(!ret.error);
530 
531 	return ret.value;
532 }
533 
534 void kvm_selftest_arch_init(void)
535 {
536 	/*
537 	 * riscv64 doesn't have a true default mode, so start by detecting the
538 	 * supported vm mode.
539 	 */
540 	guest_modes_append_default();
541 }
542 
543 unsigned long riscv64_get_satp_mode(void)
544 {
545 	int kvm_fd, vm_fd, vcpu_fd, err;
546 	u64 val;
547 	struct kvm_one_reg reg = {
548 		.id     = RISCV_CONFIG_REG(satp_mode),
549 		.addr   = (u64)&val,
550 	};
551 
552 	kvm_fd = open_kvm_dev_path_or_exit();
553 	vm_fd = __kvm_ioctl(kvm_fd, KVM_CREATE_VM, NULL);
554 	TEST_ASSERT(vm_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm_fd));
555 
556 	vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
557 	TEST_ASSERT(vcpu_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu_fd));
558 
559 	err = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg);
560 	TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
561 
562 	close(vcpu_fd);
563 	close(vm_fd);
564 	close(kvm_fd);
565 
566 	return val;
567 }
568 
569 bool kvm_arch_has_default_irqchip(void)
570 {
571 	return kvm_check_cap(KVM_CAP_IRQCHIP);
572 }
573