xref: /linux/tools/testing/selftests/kvm/lib/arm64/processor.c (revision 0e8863244ef5b7d4391816062fcc07ff49aa7dcf)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * AArch64 code
4  *
5  * Copyright (C) 2018, Red Hat, Inc.
6  */
7 
8 #include <linux/compiler.h>
9 #include <assert.h>
10 
11 #include "guest_modes.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include "ucall_common.h"
15 
16 #include <linux/bitfield.h>
17 #include <linux/sizes.h>
18 
19 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN	0xac0000
20 
21 static vm_vaddr_t exception_handlers;
22 
page_align(struct kvm_vm * vm,uint64_t v)23 static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
24 {
25 	return (v + vm->page_size) & ~(vm->page_size - 1);
26 }
27 
pgd_index(struct kvm_vm * vm,vm_vaddr_t gva)28 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
29 {
30 	unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
31 	uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
32 
33 	return (gva >> shift) & mask;
34 }
35 
pud_index(struct kvm_vm * vm,vm_vaddr_t gva)36 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
37 {
38 	unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
39 	uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
40 
41 	TEST_ASSERT(vm->pgtable_levels == 4,
42 		"Mode %d does not have 4 page table levels", vm->mode);
43 
44 	return (gva >> shift) & mask;
45 }
46 
pmd_index(struct kvm_vm * vm,vm_vaddr_t gva)47 static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
48 {
49 	unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
50 	uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
51 
52 	TEST_ASSERT(vm->pgtable_levels >= 3,
53 		"Mode %d does not have >= 3 page table levels", vm->mode);
54 
55 	return (gva >> shift) & mask;
56 }
57 
pte_index(struct kvm_vm * vm,vm_vaddr_t gva)58 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
59 {
60 	uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
61 	return (gva >> vm->page_shift) & mask;
62 }
63 
use_lpa2_pte_format(struct kvm_vm * vm)64 static inline bool use_lpa2_pte_format(struct kvm_vm *vm)
65 {
66 	return (vm->page_size == SZ_4K || vm->page_size == SZ_16K) &&
67 	    (vm->pa_bits > 48 || vm->va_bits > 48);
68 }
69 
addr_pte(struct kvm_vm * vm,uint64_t pa,uint64_t attrs)70 static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs)
71 {
72 	uint64_t pte;
73 
74 	if (use_lpa2_pte_format(vm)) {
75 		pte = pa & PTE_ADDR_MASK_LPA2(vm->page_shift);
76 		pte |= FIELD_GET(GENMASK(51, 50), pa) << PTE_ADDR_51_50_LPA2_SHIFT;
77 		attrs &= ~PTE_ADDR_51_50_LPA2;
78 	} else {
79 		pte = pa & PTE_ADDR_MASK(vm->page_shift);
80 		if (vm->page_shift == 16)
81 			pte |= FIELD_GET(GENMASK(51, 48), pa) << PTE_ADDR_51_48_SHIFT;
82 	}
83 	pte |= attrs;
84 
85 	return pte;
86 }
87 
pte_addr(struct kvm_vm * vm,uint64_t pte)88 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte)
89 {
90 	uint64_t pa;
91 
92 	if (use_lpa2_pte_format(vm)) {
93 		pa = pte & PTE_ADDR_MASK_LPA2(vm->page_shift);
94 		pa |= FIELD_GET(PTE_ADDR_51_50_LPA2, pte) << 50;
95 	} else {
96 		pa = pte & PTE_ADDR_MASK(vm->page_shift);
97 		if (vm->page_shift == 16)
98 			pa |= FIELD_GET(PTE_ADDR_51_48, pte) << 48;
99 	}
100 
101 	return pa;
102 }
103 
ptrs_per_pgd(struct kvm_vm * vm)104 static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
105 {
106 	unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
107 	return 1 << (vm->va_bits - shift);
108 }
109 
ptrs_per_pte(struct kvm_vm * vm)110 static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
111 {
112 	return 1 << (vm->page_shift - 3);
113 }
114 
virt_arch_pgd_alloc(struct kvm_vm * vm)115 void virt_arch_pgd_alloc(struct kvm_vm *vm)
116 {
117 	size_t nr_pages = page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size;
118 
119 	if (vm->pgd_created)
120 		return;
121 
122 	vm->pgd = vm_phy_pages_alloc(vm, nr_pages,
123 				     KVM_GUEST_PAGE_TABLE_MIN_PADDR,
124 				     vm->memslots[MEM_REGION_PT]);
125 	vm->pgd_created = true;
126 }
127 
_virt_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr,uint64_t flags)128 static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
129 			 uint64_t flags)
130 {
131 	uint8_t attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT);
132 	uint64_t pg_attr;
133 	uint64_t *ptep;
134 
135 	TEST_ASSERT((vaddr % vm->page_size) == 0,
136 		"Virtual address not on page boundary,\n"
137 		"  vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
138 	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
139 		(vaddr >> vm->page_shift)),
140 		"Invalid virtual address, vaddr: 0x%lx", vaddr);
141 	TEST_ASSERT((paddr % vm->page_size) == 0,
142 		"Physical address not on page boundary,\n"
143 		"  paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
144 	TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
145 		"Physical address beyond beyond maximum supported,\n"
146 		"  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
147 		paddr, vm->max_gfn, vm->page_size);
148 
149 	ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
150 	if (!*ptep)
151 		*ptep = addr_pte(vm, vm_alloc_page_table(vm),
152 				 PGD_TYPE_TABLE | PTE_VALID);
153 
154 	switch (vm->pgtable_levels) {
155 	case 4:
156 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
157 		if (!*ptep)
158 			*ptep = addr_pte(vm, vm_alloc_page_table(vm),
159 					 PUD_TYPE_TABLE | PTE_VALID);
160 		/* fall through */
161 	case 3:
162 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
163 		if (!*ptep)
164 			*ptep = addr_pte(vm, vm_alloc_page_table(vm),
165 					 PMD_TYPE_TABLE | PTE_VALID);
166 		/* fall through */
167 	case 2:
168 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
169 		break;
170 	default:
171 		TEST_FAIL("Page table levels must be 2, 3, or 4");
172 	}
173 
174 	pg_attr = PTE_AF | PTE_ATTRINDX(attr_idx) | PTE_TYPE_PAGE | PTE_VALID;
175 	if (!use_lpa2_pte_format(vm))
176 		pg_attr |= PTE_SHARED;
177 
178 	*ptep = addr_pte(vm, paddr, pg_attr);
179 }
180 
virt_arch_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr)181 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
182 {
183 	uint64_t attr_idx = MT_NORMAL;
184 
185 	_virt_pg_map(vm, vaddr, paddr, attr_idx);
186 }
187 
virt_get_pte_hva(struct kvm_vm * vm,vm_vaddr_t gva)188 uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva)
189 {
190 	uint64_t *ptep;
191 
192 	if (!vm->pgd_created)
193 		goto unmapped_gva;
194 
195 	ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
196 	if (!ptep)
197 		goto unmapped_gva;
198 
199 	switch (vm->pgtable_levels) {
200 	case 4:
201 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
202 		if (!ptep)
203 			goto unmapped_gva;
204 		/* fall through */
205 	case 3:
206 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
207 		if (!ptep)
208 			goto unmapped_gva;
209 		/* fall through */
210 	case 2:
211 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
212 		if (!ptep)
213 			goto unmapped_gva;
214 		break;
215 	default:
216 		TEST_FAIL("Page table levels must be 2, 3, or 4");
217 	}
218 
219 	return ptep;
220 
221 unmapped_gva:
222 	TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
223 	exit(EXIT_FAILURE);
224 }
225 
addr_arch_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva)226 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
227 {
228 	uint64_t *ptep = virt_get_pte_hva(vm, gva);
229 
230 	return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
231 }
232 
pte_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent,uint64_t page,int level)233 static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
234 {
235 #ifdef DEBUG
236 	static const char * const type[] = { "", "pud", "pmd", "pte" };
237 	uint64_t pte, *ptep;
238 
239 	if (level == 4)
240 		return;
241 
242 	for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
243 		ptep = addr_gpa2hva(vm, pte);
244 		if (!*ptep)
245 			continue;
246 		fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
247 		pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
248 	}
249 #endif
250 }
251 
virt_arch_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)252 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
253 {
254 	int level = 4 - (vm->pgtable_levels - 1);
255 	uint64_t pgd, *ptep;
256 
257 	if (!vm->pgd_created)
258 		return;
259 
260 	for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
261 		ptep = addr_gpa2hva(vm, pgd);
262 		if (!*ptep)
263 			continue;
264 		fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
265 		pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
266 	}
267 }
268 
aarch64_vcpu_setup(struct kvm_vcpu * vcpu,struct kvm_vcpu_init * init)269 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
270 {
271 	struct kvm_vcpu_init default_init = { .target = -1, };
272 	struct kvm_vm *vm = vcpu->vm;
273 	uint64_t sctlr_el1, tcr_el1, ttbr0_el1;
274 
275 	if (!init)
276 		init = &default_init;
277 
278 	if (init->target == -1) {
279 		struct kvm_vcpu_init preferred;
280 		vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred);
281 		init->target = preferred.target;
282 	}
283 
284 	vcpu_ioctl(vcpu, KVM_ARM_VCPU_INIT, init);
285 
286 	/*
287 	 * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
288 	 * registers, which the variable argument list macros do.
289 	 */
290 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
291 
292 	sctlr_el1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1));
293 	tcr_el1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1));
294 
295 	/* Configure base granule size */
296 	switch (vm->mode) {
297 	case VM_MODE_PXXV48_4K:
298 		TEST_FAIL("AArch64 does not support 4K sized pages "
299 			  "with ANY-bit physical address ranges");
300 	case VM_MODE_P52V48_64K:
301 	case VM_MODE_P48V48_64K:
302 	case VM_MODE_P40V48_64K:
303 	case VM_MODE_P36V48_64K:
304 		tcr_el1 |= TCR_TG0_64K;
305 		break;
306 	case VM_MODE_P52V48_16K:
307 	case VM_MODE_P48V48_16K:
308 	case VM_MODE_P40V48_16K:
309 	case VM_MODE_P36V48_16K:
310 	case VM_MODE_P36V47_16K:
311 		tcr_el1 |= TCR_TG0_16K;
312 		break;
313 	case VM_MODE_P52V48_4K:
314 	case VM_MODE_P48V48_4K:
315 	case VM_MODE_P40V48_4K:
316 	case VM_MODE_P36V48_4K:
317 		tcr_el1 |= TCR_TG0_4K;
318 		break;
319 	default:
320 		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
321 	}
322 
323 	ttbr0_el1 = vm->pgd & GENMASK(47, vm->page_shift);
324 
325 	/* Configure output size */
326 	switch (vm->mode) {
327 	case VM_MODE_P52V48_4K:
328 	case VM_MODE_P52V48_16K:
329 	case VM_MODE_P52V48_64K:
330 		tcr_el1 |= TCR_IPS_52_BITS;
331 		ttbr0_el1 |= FIELD_GET(GENMASK(51, 48), vm->pgd) << 2;
332 		break;
333 	case VM_MODE_P48V48_4K:
334 	case VM_MODE_P48V48_16K:
335 	case VM_MODE_P48V48_64K:
336 		tcr_el1 |= TCR_IPS_48_BITS;
337 		break;
338 	case VM_MODE_P40V48_4K:
339 	case VM_MODE_P40V48_16K:
340 	case VM_MODE_P40V48_64K:
341 		tcr_el1 |= TCR_IPS_40_BITS;
342 		break;
343 	case VM_MODE_P36V48_4K:
344 	case VM_MODE_P36V48_16K:
345 	case VM_MODE_P36V48_64K:
346 	case VM_MODE_P36V47_16K:
347 		tcr_el1 |= TCR_IPS_36_BITS;
348 		break;
349 	default:
350 		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
351 	}
352 
353 	sctlr_el1 |= SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_I;
354 
355 	tcr_el1 |= TCR_IRGN0_WBWA | TCR_ORGN0_WBWA | TCR_SH0_INNER;
356 	tcr_el1 |= TCR_T0SZ(vm->va_bits);
357 	if (use_lpa2_pte_format(vm))
358 		tcr_el1 |= TCR_DS;
359 
360 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
361 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
362 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
363 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), ttbr0_el1);
364 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id);
365 }
366 
vcpu_arch_dump(FILE * stream,struct kvm_vcpu * vcpu,uint8_t indent)367 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
368 {
369 	uint64_t pstate, pc;
370 
371 	pstate = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate));
372 	pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc));
373 
374 	fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
375 		indent, "", pstate, pc);
376 }
377 
vcpu_arch_set_entry_point(struct kvm_vcpu * vcpu,void * guest_code)378 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
379 {
380 	vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
381 }
382 
__aarch64_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,struct kvm_vcpu_init * init)383 static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
384 					   struct kvm_vcpu_init *init)
385 {
386 	size_t stack_size;
387 	uint64_t stack_vaddr;
388 	struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
389 
390 	stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
391 					     vm->page_size;
392 	stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
393 				       DEFAULT_ARM64_GUEST_STACK_VADDR_MIN,
394 				       MEM_REGION_DATA);
395 
396 	aarch64_vcpu_setup(vcpu, init);
397 
398 	vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
399 	return vcpu;
400 }
401 
aarch64_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,struct kvm_vcpu_init * init,void * guest_code)402 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
403 				  struct kvm_vcpu_init *init, void *guest_code)
404 {
405 	struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init);
406 
407 	vcpu_arch_set_entry_point(vcpu, guest_code);
408 
409 	return vcpu;
410 }
411 
vm_arch_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id)412 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
413 {
414 	return __aarch64_vcpu_add(vm, vcpu_id, NULL);
415 }
416 
vcpu_args_set(struct kvm_vcpu * vcpu,unsigned int num,...)417 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
418 {
419 	va_list ap;
420 	int i;
421 
422 	TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
423 		    "  num: %u", num);
424 
425 	va_start(ap, num);
426 
427 	for (i = 0; i < num; i++) {
428 		vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]),
429 			     va_arg(ap, uint64_t));
430 	}
431 
432 	va_end(ap);
433 }
434 
kvm_exit_unexpected_exception(int vector,uint64_t ec,bool valid_ec)435 void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec)
436 {
437 	ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec);
438 	while (1)
439 		;
440 }
441 
assert_on_unhandled_exception(struct kvm_vcpu * vcpu)442 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
443 {
444 	struct ucall uc;
445 
446 	if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED)
447 		return;
448 
449 	if (uc.args[2]) /* valid_ec */ {
450 		assert(VECTOR_IS_SYNC(uc.args[0]));
451 		TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)",
452 			  uc.args[0], uc.args[1]);
453 	} else {
454 		assert(!VECTOR_IS_SYNC(uc.args[0]));
455 		TEST_FAIL("Unexpected exception (vector:0x%lx)",
456 			  uc.args[0]);
457 	}
458 }
459 
460 struct handlers {
461 	handler_fn exception_handlers[VECTOR_NUM][ESR_ELx_EC_MAX + 1];
462 };
463 
vcpu_init_descriptor_tables(struct kvm_vcpu * vcpu)464 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
465 {
466 	extern char vectors;
467 
468 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
469 }
470 
route_exception(struct ex_regs * regs,int vector)471 void route_exception(struct ex_regs *regs, int vector)
472 {
473 	struct handlers *handlers = (struct handlers *)exception_handlers;
474 	bool valid_ec;
475 	int ec = 0;
476 
477 	switch (vector) {
478 	case VECTOR_SYNC_CURRENT:
479 	case VECTOR_SYNC_LOWER_64:
480 		ec = ESR_ELx_EC(read_sysreg(esr_el1));
481 		valid_ec = true;
482 		break;
483 	case VECTOR_IRQ_CURRENT:
484 	case VECTOR_IRQ_LOWER_64:
485 	case VECTOR_FIQ_CURRENT:
486 	case VECTOR_FIQ_LOWER_64:
487 	case VECTOR_ERROR_CURRENT:
488 	case VECTOR_ERROR_LOWER_64:
489 		ec = 0;
490 		valid_ec = false;
491 		break;
492 	default:
493 		valid_ec = false;
494 		goto unexpected_exception;
495 	}
496 
497 	if (handlers && handlers->exception_handlers[vector][ec])
498 		return handlers->exception_handlers[vector][ec](regs);
499 
500 unexpected_exception:
501 	kvm_exit_unexpected_exception(vector, ec, valid_ec);
502 }
503 
vm_init_descriptor_tables(struct kvm_vm * vm)504 void vm_init_descriptor_tables(struct kvm_vm *vm)
505 {
506 	vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
507 					vm->page_size, MEM_REGION_DATA);
508 
509 	*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
510 }
511 
vm_install_sync_handler(struct kvm_vm * vm,int vector,int ec,void (* handler)(struct ex_regs *))512 void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
513 			 void (*handler)(struct ex_regs *))
514 {
515 	struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
516 
517 	assert(VECTOR_IS_SYNC(vector));
518 	assert(vector < VECTOR_NUM);
519 	assert(ec <= ESR_ELx_EC_MAX);
520 	handlers->exception_handlers[vector][ec] = handler;
521 }
522 
vm_install_exception_handler(struct kvm_vm * vm,int vector,void (* handler)(struct ex_regs *))523 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
524 			 void (*handler)(struct ex_regs *))
525 {
526 	struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
527 
528 	assert(!VECTOR_IS_SYNC(vector));
529 	assert(vector < VECTOR_NUM);
530 	handlers->exception_handlers[vector][0] = handler;
531 }
532 
guest_get_vcpuid(void)533 uint32_t guest_get_vcpuid(void)
534 {
535 	return read_sysreg(tpidr_el1);
536 }
537 
max_ipa_for_page_size(uint32_t vm_ipa,uint32_t gran,uint32_t not_sup_val,uint32_t ipa52_min_val)538 static uint32_t max_ipa_for_page_size(uint32_t vm_ipa, uint32_t gran,
539 				uint32_t not_sup_val, uint32_t ipa52_min_val)
540 {
541 	if (gran == not_sup_val)
542 		return 0;
543 	else if (gran >= ipa52_min_val && vm_ipa >= 52)
544 		return 52;
545 	else
546 		return min(vm_ipa, 48U);
547 }
548 
aarch64_get_supported_page_sizes(uint32_t ipa,uint32_t * ipa4k,uint32_t * ipa16k,uint32_t * ipa64k)549 void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
550 					uint32_t *ipa16k, uint32_t *ipa64k)
551 {
552 	struct kvm_vcpu_init preferred_init;
553 	int kvm_fd, vm_fd, vcpu_fd, err;
554 	uint64_t val;
555 	uint32_t gran;
556 	struct kvm_one_reg reg = {
557 		.id	= KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1),
558 		.addr	= (uint64_t)&val,
559 	};
560 
561 	kvm_fd = open_kvm_dev_path_or_exit();
562 	vm_fd = __kvm_ioctl(kvm_fd, KVM_CREATE_VM, (void *)(unsigned long)ipa);
563 	TEST_ASSERT(vm_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm_fd));
564 
565 	vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
566 	TEST_ASSERT(vcpu_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu_fd));
567 
568 	err = ioctl(vm_fd, KVM_ARM_PREFERRED_TARGET, &preferred_init);
569 	TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_PREFERRED_TARGET, err));
570 	err = ioctl(vcpu_fd, KVM_ARM_VCPU_INIT, &preferred_init);
571 	TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_VCPU_INIT, err));
572 
573 	err = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg);
574 	TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
575 
576 	gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN4), val);
577 	*ipa4k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN4_NI,
578 					ID_AA64MMFR0_EL1_TGRAN4_52_BIT);
579 
580 	gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN64), val);
581 	*ipa64k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN64_NI,
582 					ID_AA64MMFR0_EL1_TGRAN64_IMP);
583 
584 	gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN16), val);
585 	*ipa16k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN16_NI,
586 					ID_AA64MMFR0_EL1_TGRAN16_52_BIT);
587 
588 	close(vcpu_fd);
589 	close(vm_fd);
590 	close(kvm_fd);
591 }
592 
593 #define __smccc_call(insn, function_id, arg0, arg1, arg2, arg3, arg4, arg5,	\
594 		     arg6, res)							\
595 	asm volatile("mov   w0, %w[function_id]\n"				\
596 		     "mov   x1, %[arg0]\n"					\
597 		     "mov   x2, %[arg1]\n"					\
598 		     "mov   x3, %[arg2]\n"					\
599 		     "mov   x4, %[arg3]\n"					\
600 		     "mov   x5, %[arg4]\n"					\
601 		     "mov   x6, %[arg5]\n"					\
602 		     "mov   x7, %[arg6]\n"					\
603 		     #insn  "#0\n"						\
604 		     "mov   %[res0], x0\n"					\
605 		     "mov   %[res1], x1\n"					\
606 		     "mov   %[res2], x2\n"					\
607 		     "mov   %[res3], x3\n"					\
608 		     : [res0] "=r"(res->a0), [res1] "=r"(res->a1),		\
609 		       [res2] "=r"(res->a2), [res3] "=r"(res->a3)		\
610 		     : [function_id] "r"(function_id), [arg0] "r"(arg0),	\
611 		       [arg1] "r"(arg1), [arg2] "r"(arg2), [arg3] "r"(arg3),	\
612 		       [arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6)	\
613 		     : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7")
614 
615 
smccc_hvc(uint32_t function_id,uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,struct arm_smccc_res * res)616 void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
617 	       uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
618 	       uint64_t arg6, struct arm_smccc_res *res)
619 {
620 	__smccc_call(hvc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
621 		     arg6, res);
622 }
623 
smccc_smc(uint32_t function_id,uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,struct arm_smccc_res * res)624 void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
625 	       uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
626 	       uint64_t arg6, struct arm_smccc_res *res)
627 {
628 	__smccc_call(smc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
629 		     arg6, res);
630 }
631 
kvm_selftest_arch_init(void)632 void kvm_selftest_arch_init(void)
633 {
634 	/*
635 	 * arm64 doesn't have a true default mode, so start by computing the
636 	 * available IPA space and page sizes early.
637 	 */
638 	guest_modes_append_default();
639 }
640 
vm_vaddr_populate_bitmap(struct kvm_vm * vm)641 void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
642 {
643 	/*
644 	 * arm64 selftests use only TTBR0_EL1, meaning that the valid VA space
645 	 * is [0, 2^(64 - TCR_EL1.T0SZ)).
646 	 */
647 	sparsebit_set_num(vm->vpages_valid, 0,
648 			  (1ULL << vm->va_bits) >> vm->page_shift);
649 }
650 
651 /* Helper to call wfi instruction. */
wfi(void)652 void wfi(void)
653 {
654 	asm volatile("wfi");
655 }
656