xref: /linux/tools/testing/selftests/kvm/lib/arm64/processor.c (revision dfd2a8b07c6cc94145e11d87d2f11137d6444854)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * AArch64 code
4  *
5  * Copyright (C) 2018, Red Hat, Inc.
6  */
7 
8 #include <linux/compiler.h>
9 #include <assert.h>
10 
11 #include "guest_modes.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include "ucall_common.h"
15 #include "vgic.h"
16 
17 #include <linux/bitfield.h>
18 #include <linux/sizes.h>
19 
20 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN	0xac0000
21 
22 static gva_t exception_handlers;
23 
24 static u64 pgd_index(struct kvm_vm *vm, gva_t gva)
25 {
26 	unsigned int shift = (vm->mmu.pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
27 	u64 mask = (1UL << (vm->va_bits - shift)) - 1;
28 
29 	return (gva >> shift) & mask;
30 }
31 
32 static u64 pud_index(struct kvm_vm *vm, gva_t gva)
33 {
34 	unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
35 	u64 mask = (1UL << (vm->page_shift - 3)) - 1;
36 
37 	TEST_ASSERT(vm->mmu.pgtable_levels == 4,
38 		"Mode %d does not have 4 page table levels", vm->mode);
39 
40 	return (gva >> shift) & mask;
41 }
42 
43 static u64 pmd_index(struct kvm_vm *vm, gva_t gva)
44 {
45 	unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
46 	u64 mask = (1UL << (vm->page_shift - 3)) - 1;
47 
48 	TEST_ASSERT(vm->mmu.pgtable_levels >= 3,
49 		"Mode %d does not have >= 3 page table levels", vm->mode);
50 
51 	return (gva >> shift) & mask;
52 }
53 
54 static u64 pte_index(struct kvm_vm *vm, gva_t gva)
55 {
56 	u64 mask = (1UL << (vm->page_shift - 3)) - 1;
57 	return (gva >> vm->page_shift) & mask;
58 }
59 
60 static inline bool use_lpa2_pte_format(struct kvm_vm *vm)
61 {
62 	return (vm->page_size == SZ_4K || vm->page_size == SZ_16K) &&
63 	    (vm->pa_bits > 48 || vm->va_bits > 48);
64 }
65 
66 static u64 addr_pte(struct kvm_vm *vm, u64 pa, u64 attrs)
67 {
68 	u64 pte;
69 
70 	if (use_lpa2_pte_format(vm)) {
71 		pte = pa & PTE_ADDR_MASK_LPA2(vm->page_shift);
72 		pte |= FIELD_GET(GENMASK(51, 50), pa) << PTE_ADDR_51_50_LPA2_SHIFT;
73 		attrs &= ~PTE_ADDR_51_50_LPA2;
74 	} else {
75 		pte = pa & PTE_ADDR_MASK(vm->page_shift);
76 		if (vm->page_shift == 16)
77 			pte |= FIELD_GET(GENMASK(51, 48), pa) << PTE_ADDR_51_48_SHIFT;
78 	}
79 	pte |= attrs;
80 
81 	return pte;
82 }
83 
84 static u64 pte_addr(struct kvm_vm *vm, u64 pte)
85 {
86 	u64 pa;
87 
88 	if (use_lpa2_pte_format(vm)) {
89 		pa = pte & PTE_ADDR_MASK_LPA2(vm->page_shift);
90 		pa |= FIELD_GET(PTE_ADDR_51_50_LPA2, pte) << 50;
91 	} else {
92 		pa = pte & PTE_ADDR_MASK(vm->page_shift);
93 		if (vm->page_shift == 16)
94 			pa |= FIELD_GET(PTE_ADDR_51_48, pte) << 48;
95 	}
96 
97 	return pa;
98 }
99 
100 static u64 ptrs_per_pgd(struct kvm_vm *vm)
101 {
102 	unsigned int shift = (vm->mmu.pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
103 	return 1 << (vm->va_bits - shift);
104 }
105 
106 static u64 __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
107 {
108 	return 1 << (vm->page_shift - 3);
109 }
110 
111 void virt_arch_pgd_alloc(struct kvm_vm *vm)
112 {
113 	size_t nr_pages = vm_page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size;
114 
115 	if (vm->mmu.pgd_created)
116 		return;
117 
118 	vm->mmu.pgd = vm_phy_pages_alloc(vm, nr_pages,
119 					 KVM_GUEST_PAGE_TABLE_MIN_PADDR,
120 					 vm->memslots[MEM_REGION_PT]);
121 	vm->mmu.pgd_created = true;
122 }
123 
124 static void _virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
125 			 u64 flags)
126 {
127 	u8 attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT);
128 	u64 pg_attr;
129 	u64 *ptep;
130 
131 	TEST_ASSERT((gva % vm->page_size) == 0,
132 		"Virtual address not on page boundary,\n"
133 		"  gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size);
134 	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
135 		    "Invalid virtual address, gva: 0x%lx", gva);
136 	TEST_ASSERT((gpa % vm->page_size) == 0,
137 		    "Physical address not on page boundary,\n"
138 		    "  gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size);
139 	TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
140 		    "Physical address beyond beyond maximum supported,\n"
141 		    "  gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
142 		    gpa, vm->max_gfn, vm->page_size);
143 
144 	ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, gva) * 8;
145 	if (!*ptep)
146 		*ptep = addr_pte(vm, vm_alloc_page_table(vm),
147 				 PGD_TYPE_TABLE | PTE_VALID);
148 
149 	switch (vm->mmu.pgtable_levels) {
150 	case 4:
151 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
152 		if (!*ptep)
153 			*ptep = addr_pte(vm, vm_alloc_page_table(vm),
154 					 PUD_TYPE_TABLE | PTE_VALID);
155 		/* fall through */
156 	case 3:
157 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
158 		if (!*ptep)
159 			*ptep = addr_pte(vm, vm_alloc_page_table(vm),
160 					 PMD_TYPE_TABLE | PTE_VALID);
161 		/* fall through */
162 	case 2:
163 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
164 		break;
165 	default:
166 		TEST_FAIL("Page table levels must be 2, 3, or 4");
167 	}
168 
169 	pg_attr = PTE_AF | PTE_ATTRINDX(attr_idx) | PTE_TYPE_PAGE | PTE_VALID;
170 	if (!use_lpa2_pte_format(vm))
171 		pg_attr |= PTE_SHARED;
172 
173 	*ptep = addr_pte(vm, gpa, pg_attr);
174 }
175 
176 void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
177 {
178 	u64 attr_idx = MT_NORMAL;
179 
180 	_virt_pg_map(vm, gva, gpa, attr_idx);
181 }
182 
183 u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level)
184 {
185 	u64 *ptep;
186 
187 	if (!vm->mmu.pgd_created)
188 		goto unmapped_gva;
189 
190 	ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, gva) * 8;
191 	if (!ptep)
192 		goto unmapped_gva;
193 	if (level == 0)
194 		return ptep;
195 
196 	switch (vm->mmu.pgtable_levels) {
197 	case 4:
198 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
199 		if (!ptep)
200 			goto unmapped_gva;
201 		if (level == 1)
202 			break;
203 		/* fall through */
204 	case 3:
205 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
206 		if (!ptep)
207 			goto unmapped_gva;
208 		if (level == 2)
209 			break;
210 		/* fall through */
211 	case 2:
212 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
213 		if (!ptep)
214 			goto unmapped_gva;
215 		break;
216 	default:
217 		TEST_FAIL("Page table levels must be 2, 3, or 4");
218 	}
219 
220 	return ptep;
221 
222 unmapped_gva:
223 	TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
224 	exit(EXIT_FAILURE);
225 }
226 
227 u64 *virt_get_pte_hva(struct kvm_vm *vm, gva_t gva)
228 {
229 	return virt_get_pte_hva_at_level(vm, gva, 3);
230 }
231 
232 gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
233 {
234 	u64 *ptep = virt_get_pte_hva(vm, gva);
235 
236 	return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
237 }
238 
239 static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level)
240 {
241 #ifdef DEBUG
242 	static const char * const type[] = { "", "pud", "pmd", "pte" };
243 	u64 pte, *ptep;
244 
245 	if (level == 4)
246 		return;
247 
248 	for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
249 		ptep = addr_gpa2hva(vm, pte);
250 		if (!*ptep)
251 			continue;
252 		fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
253 		pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
254 	}
255 #endif
256 }
257 
258 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
259 {
260 	int level = 4 - (vm->mmu.pgtable_levels - 1);
261 	u64 pgd, *ptep;
262 
263 	if (!vm->mmu.pgd_created)
264 		return;
265 
266 	for (pgd = vm->mmu.pgd; pgd < vm->mmu.pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
267 		ptep = addr_gpa2hva(vm, pgd);
268 		if (!*ptep)
269 			continue;
270 		fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
271 		pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
272 	}
273 }
274 
275 bool vm_supports_el2(struct kvm_vm *vm)
276 {
277 	const char *value = getenv("NV");
278 
279 	if (value && *value == '0')
280 		return false;
281 
282 	return vm_check_cap(vm, KVM_CAP_ARM_EL2) && vm->arch.has_gic;
283 }
284 
285 void kvm_get_default_vcpu_target(struct kvm_vm *vm, struct kvm_vcpu_init *init)
286 {
287 	struct kvm_vcpu_init preferred = {};
288 
289 	vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred);
290 	if (vm_supports_el2(vm))
291 		preferred.features[0] |= BIT(KVM_ARM_VCPU_HAS_EL2);
292 
293 	*init = preferred;
294 }
295 
296 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
297 {
298 	struct kvm_vcpu_init default_init = { .target = -1, };
299 	struct kvm_vm *vm = vcpu->vm;
300 	u64 sctlr_el1, tcr_el1, ttbr0_el1;
301 
302 	if (!init) {
303 		kvm_get_default_vcpu_target(vm, &default_init);
304 		init = &default_init;
305 	}
306 
307 	vcpu_ioctl(vcpu, KVM_ARM_VCPU_INIT, init);
308 	vcpu->init = *init;
309 
310 	/*
311 	 * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
312 	 * registers, which the variable argument list macros do.
313 	 */
314 	vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_CPACR_EL1), 3 << 20);
315 
316 	sctlr_el1 = vcpu_get_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SCTLR_EL1));
317 	tcr_el1 = vcpu_get_reg(vcpu, ctxt_reg_alias(vcpu, SYS_TCR_EL1));
318 
319 	/* Configure base granule size */
320 	switch (vm->mode) {
321 	case VM_MODE_PXXVYY_4K:
322 		TEST_FAIL("AArch64 does not support 4K sized pages "
323 			  "with ANY-bit physical address ranges");
324 	case VM_MODE_P52V48_64K:
325 	case VM_MODE_P48V48_64K:
326 	case VM_MODE_P40V48_64K:
327 	case VM_MODE_P36V48_64K:
328 		tcr_el1 |= TCR_TG0_64K;
329 		break;
330 	case VM_MODE_P52V48_16K:
331 	case VM_MODE_P48V48_16K:
332 	case VM_MODE_P40V48_16K:
333 	case VM_MODE_P36V48_16K:
334 	case VM_MODE_P36V47_16K:
335 		tcr_el1 |= TCR_TG0_16K;
336 		break;
337 	case VM_MODE_P52V48_4K:
338 	case VM_MODE_P48V48_4K:
339 	case VM_MODE_P40V48_4K:
340 	case VM_MODE_P36V48_4K:
341 		tcr_el1 |= TCR_TG0_4K;
342 		break;
343 	default:
344 		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
345 	}
346 
347 	ttbr0_el1 = vm->mmu.pgd & GENMASK(47, vm->page_shift);
348 
349 	/* Configure output size */
350 	switch (vm->mode) {
351 	case VM_MODE_P52V48_4K:
352 	case VM_MODE_P52V48_16K:
353 	case VM_MODE_P52V48_64K:
354 		tcr_el1 |= TCR_IPS_52_BITS;
355 		ttbr0_el1 |= FIELD_GET(GENMASK(51, 48), vm->mmu.pgd) << 2;
356 		break;
357 	case VM_MODE_P48V48_4K:
358 	case VM_MODE_P48V48_16K:
359 	case VM_MODE_P48V48_64K:
360 		tcr_el1 |= TCR_IPS_48_BITS;
361 		break;
362 	case VM_MODE_P40V48_4K:
363 	case VM_MODE_P40V48_16K:
364 	case VM_MODE_P40V48_64K:
365 		tcr_el1 |= TCR_IPS_40_BITS;
366 		break;
367 	case VM_MODE_P36V48_4K:
368 	case VM_MODE_P36V48_16K:
369 	case VM_MODE_P36V48_64K:
370 	case VM_MODE_P36V47_16K:
371 		tcr_el1 |= TCR_IPS_36_BITS;
372 		break;
373 	default:
374 		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
375 	}
376 
377 	sctlr_el1 |= SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_I;
378 
379 	tcr_el1 |= TCR_IRGN0_WBWA | TCR_ORGN0_WBWA | TCR_SH0_INNER;
380 	tcr_el1 |= TCR_T0SZ(vm->va_bits);
381 	tcr_el1 |= TCR_TBI1;
382 	tcr_el1 |= TCR_EPD1_MASK;
383 	if (use_lpa2_pte_format(vm))
384 		tcr_el1 |= TCR_DS;
385 
386 	vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SCTLR_EL1), sctlr_el1);
387 	vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_TCR_EL1), tcr_el1);
388 	vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
389 	vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_TTBR0_EL1), ttbr0_el1);
390 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id);
391 
392 	if (!vcpu_has_el2(vcpu))
393 		return;
394 
395 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_HCR_EL2),
396 		     HCR_EL2_RW | HCR_EL2_TGE | HCR_EL2_E2H);
397 }
398 
399 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
400 {
401 	u64 pstate, pc;
402 
403 	pstate = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate));
404 	pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc));
405 
406 	fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
407 		indent, "", pstate, pc);
408 }
409 
410 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
411 {
412 	vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (u64)guest_code);
413 }
414 
415 static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id,
416 					   struct kvm_vcpu_init *init)
417 {
418 	size_t stack_size;
419 	gva_t stack_gva;
420 	struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
421 
422 	stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
423 					     vm->page_size;
424 	stack_gva = __vm_alloc(vm, stack_size,
425 			       DEFAULT_ARM64_GUEST_STACK_VADDR_MIN,
426 			       MEM_REGION_DATA);
427 
428 	aarch64_vcpu_setup(vcpu, init);
429 
430 	vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_gva + stack_size);
431 	return vcpu;
432 }
433 
434 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id,
435 				  struct kvm_vcpu_init *init, void *guest_code)
436 {
437 	struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init);
438 
439 	vcpu_arch_set_entry_point(vcpu, guest_code);
440 
441 	return vcpu;
442 }
443 
444 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
445 {
446 	return __aarch64_vcpu_add(vm, vcpu_id, NULL);
447 }
448 
449 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
450 {
451 	va_list ap;
452 	int i;
453 
454 	TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
455 		    "  num: %u", num);
456 
457 	va_start(ap, num);
458 
459 	for (i = 0; i < num; i++) {
460 		vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]),
461 			     va_arg(ap, u64));
462 	}
463 
464 	va_end(ap);
465 }
466 
467 void kvm_exit_unexpected_exception(int vector, u64 ec, bool valid_ec)
468 {
469 	ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec);
470 	while (1)
471 		;
472 }
473 
474 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
475 {
476 	struct ucall uc;
477 
478 	if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED)
479 		return;
480 
481 	if (uc.args[2]) /* valid_ec */ {
482 		assert(VECTOR_IS_SYNC(uc.args[0]));
483 		TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)",
484 			  uc.args[0], uc.args[1]);
485 	} else {
486 		assert(!VECTOR_IS_SYNC(uc.args[0]));
487 		TEST_FAIL("Unexpected exception (vector:0x%lx)",
488 			  uc.args[0]);
489 	}
490 }
491 
492 struct handlers {
493 	handler_fn exception_handlers[VECTOR_NUM][ESR_ELx_EC_MAX + 1];
494 };
495 
496 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
497 {
498 	extern char vectors;
499 
500 	vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_VBAR_EL1), (u64)&vectors);
501 }
502 
503 void route_exception(struct ex_regs *regs, int vector)
504 {
505 	struct handlers *handlers = (struct handlers *)exception_handlers;
506 	bool valid_ec;
507 	int ec = 0;
508 
509 	switch (vector) {
510 	case VECTOR_SYNC_CURRENT:
511 	case VECTOR_SYNC_LOWER_64:
512 		ec = ESR_ELx_EC(read_sysreg(esr_el1));
513 		valid_ec = true;
514 		break;
515 	case VECTOR_IRQ_CURRENT:
516 	case VECTOR_IRQ_LOWER_64:
517 	case VECTOR_FIQ_CURRENT:
518 	case VECTOR_FIQ_LOWER_64:
519 	case VECTOR_ERROR_CURRENT:
520 	case VECTOR_ERROR_LOWER_64:
521 		ec = 0;
522 		valid_ec = false;
523 		break;
524 	default:
525 		valid_ec = false;
526 		goto unexpected_exception;
527 	}
528 
529 	if (handlers && handlers->exception_handlers[vector][ec])
530 		return handlers->exception_handlers[vector][ec](regs);
531 
532 unexpected_exception:
533 	kvm_exit_unexpected_exception(vector, ec, valid_ec);
534 }
535 
536 void vm_init_descriptor_tables(struct kvm_vm *vm)
537 {
538 	vm->handlers = __vm_alloc(vm, sizeof(struct handlers), vm->page_size,
539 				  MEM_REGION_DATA);
540 
541 	*(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers;
542 }
543 
544 void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
545 			 void (*handler)(struct ex_regs *))
546 {
547 	struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
548 
549 	assert(VECTOR_IS_SYNC(vector));
550 	assert(vector < VECTOR_NUM);
551 	assert(ec <= ESR_ELx_EC_MAX);
552 	handlers->exception_handlers[vector][ec] = handler;
553 }
554 
555 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
556 			 void (*handler)(struct ex_regs *))
557 {
558 	struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
559 
560 	assert(!VECTOR_IS_SYNC(vector));
561 	assert(vector < VECTOR_NUM);
562 	handlers->exception_handlers[vector][0] = handler;
563 }
564 
565 u32 guest_get_vcpuid(void)
566 {
567 	return read_sysreg(tpidr_el1);
568 }
569 
570 static u32 max_ipa_for_page_size(u32 vm_ipa, u32 gran,
571 				 u32 not_sup_val, u32 ipa52_min_val)
572 {
573 	if (gran == not_sup_val)
574 		return 0;
575 	else if (gran >= ipa52_min_val && vm_ipa >= 52)
576 		return 52;
577 	else
578 		return min(vm_ipa, 48U);
579 }
580 
581 void aarch64_get_supported_page_sizes(u32 ipa, u32 *ipa4k,
582 				      u32 *ipa16k, u32 *ipa64k)
583 {
584 	struct kvm_vcpu_init preferred_init;
585 	int kvm_fd, vm_fd, vcpu_fd, err;
586 	u64 val;
587 	u32 gran;
588 	struct kvm_one_reg reg = {
589 		.id	= KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1),
590 		.addr	= (u64)&val,
591 	};
592 
593 	kvm_fd = open_kvm_dev_path_or_exit();
594 	vm_fd = __kvm_ioctl(kvm_fd, KVM_CREATE_VM, (void *)(unsigned long)ipa);
595 	TEST_ASSERT(vm_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm_fd));
596 
597 	vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
598 	TEST_ASSERT(vcpu_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu_fd));
599 
600 	err = ioctl(vm_fd, KVM_ARM_PREFERRED_TARGET, &preferred_init);
601 	TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_PREFERRED_TARGET, err));
602 	err = ioctl(vcpu_fd, KVM_ARM_VCPU_INIT, &preferred_init);
603 	TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_VCPU_INIT, err));
604 
605 	err = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg);
606 	TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
607 
608 	gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN4, val);
609 	*ipa4k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN4_NI,
610 					ID_AA64MMFR0_EL1_TGRAN4_52_BIT);
611 
612 	gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN64, val);
613 	*ipa64k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN64_NI,
614 					ID_AA64MMFR0_EL1_TGRAN64_IMP);
615 
616 	gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN16, val);
617 	*ipa16k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN16_NI,
618 					ID_AA64MMFR0_EL1_TGRAN16_52_BIT);
619 
620 	close(vcpu_fd);
621 	close(vm_fd);
622 	close(kvm_fd);
623 }
624 
625 #define __smccc_call(insn, function_id, arg0, arg1, arg2, arg3, arg4, arg5,	\
626 		     arg6, res)							\
627 	asm volatile("mov   w0, %w[function_id]\n"				\
628 		     "mov   x1, %[arg0]\n"					\
629 		     "mov   x2, %[arg1]\n"					\
630 		     "mov   x3, %[arg2]\n"					\
631 		     "mov   x4, %[arg3]\n"					\
632 		     "mov   x5, %[arg4]\n"					\
633 		     "mov   x6, %[arg5]\n"					\
634 		     "mov   x7, %[arg6]\n"					\
635 		     #insn  "#0\n"						\
636 		     "mov   %[res0], x0\n"					\
637 		     "mov   %[res1], x1\n"					\
638 		     "mov   %[res2], x2\n"					\
639 		     "mov   %[res3], x3\n"					\
640 		     : [res0] "=r"(res->a0), [res1] "=r"(res->a1),		\
641 		       [res2] "=r"(res->a2), [res3] "=r"(res->a3)		\
642 		     : [function_id] "r"(function_id), [arg0] "r"(arg0),	\
643 		       [arg1] "r"(arg1), [arg2] "r"(arg2), [arg3] "r"(arg3),	\
644 		       [arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6)	\
645 		     : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7")
646 
647 
648 void smccc_hvc(u32 function_id, u64 arg0, u64 arg1,
649 	       u64 arg2, u64 arg3, u64 arg4, u64 arg5,
650 	       u64 arg6, struct arm_smccc_res *res)
651 {
652 	__smccc_call(hvc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
653 		     arg6, res);
654 }
655 
656 void smccc_smc(u32 function_id, u64 arg0, u64 arg1,
657 	       u64 arg2, u64 arg3, u64 arg4, u64 arg5,
658 	       u64 arg6, struct arm_smccc_res *res)
659 {
660 	__smccc_call(smc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
661 		     arg6, res);
662 }
663 
664 void kvm_selftest_arch_init(void)
665 {
666 	/*
667 	 * arm64 doesn't have a true default mode, so start by computing the
668 	 * available IPA space and page sizes early.
669 	 */
670 	guest_modes_append_default();
671 }
672 
673 void vm_populate_gva_bitmap(struct kvm_vm *vm)
674 {
675 	/*
676 	 * arm64 selftests use only TTBR0_EL1, meaning that the valid VA space
677 	 * is [0, 2^(64 - TCR_EL1.T0SZ)).
678 	 */
679 	sparsebit_set_num(vm->vpages_valid, 0,
680 			  (1ULL << vm->va_bits) >> vm->page_shift);
681 }
682 
683 /* Helper to call wfi instruction. */
684 void wfi(void)
685 {
686 	asm volatile("wfi");
687 }
688 
689 static bool request_mte;
690 static bool request_vgic = true;
691 
692 void test_wants_mte(void)
693 {
694 	request_mte = true;
695 }
696 
697 void test_disable_default_vgic(void)
698 {
699 	request_vgic = false;
700 }
701 
702 void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus)
703 {
704 	if (request_mte && vm_check_cap(vm, KVM_CAP_ARM_MTE))
705 		vm_enable_cap(vm, KVM_CAP_ARM_MTE, 0);
706 
707 	if (request_vgic && kvm_supports_vgic_v3()) {
708 		vm->arch.gic_fd = __vgic_v3_setup(vm, nr_vcpus, 64);
709 		vm->arch.has_gic = true;
710 	}
711 }
712 
713 void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm)
714 {
715 	if (vm->arch.has_gic)
716 		__vgic_v3_init(vm->arch.gic_fd);
717 }
718 
719 void kvm_arch_vm_release(struct kvm_vm *vm)
720 {
721 	if (vm->arch.has_gic)
722 		close(vm->arch.gic_fd);
723 }
724 
725 bool kvm_arch_has_default_irqchip(void)
726 {
727 	return request_vgic && kvm_supports_vgic_v3();
728 }
729