xref: /linux/tools/testing/selftests/kvm/lib/aarch64/processor.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * AArch64 code
4  *
5  * Copyright (C) 2018, Red Hat, Inc.
6  */
7 
8 #include <linux/compiler.h>
9 #include <assert.h>
10 
11 #include "guest_modes.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include <linux/bitfield.h>
15 #include <linux/sizes.h>
16 
17 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN	0xac0000
18 
19 static vm_vaddr_t exception_handlers;
20 
21 static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
22 {
23 	return (v + vm->page_size) & ~(vm->page_size - 1);
24 }
25 
26 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
27 {
28 	unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
29 	uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
30 
31 	return (gva >> shift) & mask;
32 }
33 
34 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
35 {
36 	unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
37 	uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
38 
39 	TEST_ASSERT(vm->pgtable_levels == 4,
40 		"Mode %d does not have 4 page table levels", vm->mode);
41 
42 	return (gva >> shift) & mask;
43 }
44 
45 static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
46 {
47 	unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
48 	uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
49 
50 	TEST_ASSERT(vm->pgtable_levels >= 3,
51 		"Mode %d does not have >= 3 page table levels", vm->mode);
52 
53 	return (gva >> shift) & mask;
54 }
55 
56 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
57 {
58 	uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
59 	return (gva >> vm->page_shift) & mask;
60 }
61 
62 static inline bool use_lpa2_pte_format(struct kvm_vm *vm)
63 {
64 	return (vm->page_size == SZ_4K || vm->page_size == SZ_16K) &&
65 	    (vm->pa_bits > 48 || vm->va_bits > 48);
66 }
67 
68 static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs)
69 {
70 	uint64_t pte;
71 
72 	if (use_lpa2_pte_format(vm)) {
73 		pte = pa & GENMASK(49, vm->page_shift);
74 		pte |= FIELD_GET(GENMASK(51, 50), pa) << 8;
75 		attrs &= ~GENMASK(9, 8);
76 	} else {
77 		pte = pa & GENMASK(47, vm->page_shift);
78 		if (vm->page_shift == 16)
79 			pte |= FIELD_GET(GENMASK(51, 48), pa) << 12;
80 	}
81 	pte |= attrs;
82 
83 	return pte;
84 }
85 
86 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte)
87 {
88 	uint64_t pa;
89 
90 	if (use_lpa2_pte_format(vm)) {
91 		pa = pte & GENMASK(49, vm->page_shift);
92 		pa |= FIELD_GET(GENMASK(9, 8), pte) << 50;
93 	} else {
94 		pa = pte & GENMASK(47, vm->page_shift);
95 		if (vm->page_shift == 16)
96 			pa |= FIELD_GET(GENMASK(15, 12), pte) << 48;
97 	}
98 
99 	return pa;
100 }
101 
102 static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
103 {
104 	unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
105 	return 1 << (vm->va_bits - shift);
106 }
107 
108 static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
109 {
110 	return 1 << (vm->page_shift - 3);
111 }
112 
113 void virt_arch_pgd_alloc(struct kvm_vm *vm)
114 {
115 	size_t nr_pages = page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size;
116 
117 	if (vm->pgd_created)
118 		return;
119 
120 	vm->pgd = vm_phy_pages_alloc(vm, nr_pages,
121 				     KVM_GUEST_PAGE_TABLE_MIN_PADDR,
122 				     vm->memslots[MEM_REGION_PT]);
123 	vm->pgd_created = true;
124 }
125 
126 static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
127 			 uint64_t flags)
128 {
129 	uint8_t attr_idx = flags & 7;
130 	uint64_t *ptep;
131 
132 	TEST_ASSERT((vaddr % vm->page_size) == 0,
133 		"Virtual address not on page boundary,\n"
134 		"  vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
135 	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
136 		(vaddr >> vm->page_shift)),
137 		"Invalid virtual address, vaddr: 0x%lx", vaddr);
138 	TEST_ASSERT((paddr % vm->page_size) == 0,
139 		"Physical address not on page boundary,\n"
140 		"  paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
141 	TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
142 		"Physical address beyond beyond maximum supported,\n"
143 		"  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
144 		paddr, vm->max_gfn, vm->page_size);
145 
146 	ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
147 	if (!*ptep)
148 		*ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
149 
150 	switch (vm->pgtable_levels) {
151 	case 4:
152 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
153 		if (!*ptep)
154 			*ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
155 		/* fall through */
156 	case 3:
157 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
158 		if (!*ptep)
159 			*ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
160 		/* fall through */
161 	case 2:
162 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
163 		break;
164 	default:
165 		TEST_FAIL("Page table levels must be 2, 3, or 4");
166 	}
167 
168 	*ptep = addr_pte(vm, paddr, (attr_idx << 2) | (1 << 10) | 3);  /* AF */
169 }
170 
171 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
172 {
173 	uint64_t attr_idx = MT_NORMAL;
174 
175 	_virt_pg_map(vm, vaddr, paddr, attr_idx);
176 }
177 
178 uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva)
179 {
180 	uint64_t *ptep;
181 
182 	if (!vm->pgd_created)
183 		goto unmapped_gva;
184 
185 	ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
186 	if (!ptep)
187 		goto unmapped_gva;
188 
189 	switch (vm->pgtable_levels) {
190 	case 4:
191 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
192 		if (!ptep)
193 			goto unmapped_gva;
194 		/* fall through */
195 	case 3:
196 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
197 		if (!ptep)
198 			goto unmapped_gva;
199 		/* fall through */
200 	case 2:
201 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
202 		if (!ptep)
203 			goto unmapped_gva;
204 		break;
205 	default:
206 		TEST_FAIL("Page table levels must be 2, 3, or 4");
207 	}
208 
209 	return ptep;
210 
211 unmapped_gva:
212 	TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
213 	exit(EXIT_FAILURE);
214 }
215 
216 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
217 {
218 	uint64_t *ptep = virt_get_pte_hva(vm, gva);
219 
220 	return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
221 }
222 
223 static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
224 {
225 #ifdef DEBUG
226 	static const char * const type[] = { "", "pud", "pmd", "pte" };
227 	uint64_t pte, *ptep;
228 
229 	if (level == 4)
230 		return;
231 
232 	for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
233 		ptep = addr_gpa2hva(vm, pte);
234 		if (!*ptep)
235 			continue;
236 		fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
237 		pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
238 	}
239 #endif
240 }
241 
242 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
243 {
244 	int level = 4 - (vm->pgtable_levels - 1);
245 	uint64_t pgd, *ptep;
246 
247 	if (!vm->pgd_created)
248 		return;
249 
250 	for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
251 		ptep = addr_gpa2hva(vm, pgd);
252 		if (!*ptep)
253 			continue;
254 		fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
255 		pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
256 	}
257 }
258 
259 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
260 {
261 	struct kvm_vcpu_init default_init = { .target = -1, };
262 	struct kvm_vm *vm = vcpu->vm;
263 	uint64_t sctlr_el1, tcr_el1, ttbr0_el1;
264 
265 	if (!init)
266 		init = &default_init;
267 
268 	if (init->target == -1) {
269 		struct kvm_vcpu_init preferred;
270 		vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred);
271 		init->target = preferred.target;
272 	}
273 
274 	vcpu_ioctl(vcpu, KVM_ARM_VCPU_INIT, init);
275 
276 	/*
277 	 * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
278 	 * registers, which the variable argument list macros do.
279 	 */
280 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
281 
282 	vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
283 	vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
284 
285 	/* Configure base granule size */
286 	switch (vm->mode) {
287 	case VM_MODE_PXXV48_4K:
288 		TEST_FAIL("AArch64 does not support 4K sized pages "
289 			  "with ANY-bit physical address ranges");
290 	case VM_MODE_P52V48_64K:
291 	case VM_MODE_P48V48_64K:
292 	case VM_MODE_P40V48_64K:
293 	case VM_MODE_P36V48_64K:
294 		tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
295 		break;
296 	case VM_MODE_P52V48_16K:
297 	case VM_MODE_P48V48_16K:
298 	case VM_MODE_P40V48_16K:
299 	case VM_MODE_P36V48_16K:
300 	case VM_MODE_P36V47_16K:
301 		tcr_el1 |= 2ul << 14; /* TG0 = 16KB */
302 		break;
303 	case VM_MODE_P52V48_4K:
304 	case VM_MODE_P48V48_4K:
305 	case VM_MODE_P40V48_4K:
306 	case VM_MODE_P36V48_4K:
307 		tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
308 		break;
309 	default:
310 		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
311 	}
312 
313 	ttbr0_el1 = vm->pgd & GENMASK(47, vm->page_shift);
314 
315 	/* Configure output size */
316 	switch (vm->mode) {
317 	case VM_MODE_P52V48_4K:
318 	case VM_MODE_P52V48_16K:
319 	case VM_MODE_P52V48_64K:
320 		tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
321 		ttbr0_el1 |= FIELD_GET(GENMASK(51, 48), vm->pgd) << 2;
322 		break;
323 	case VM_MODE_P48V48_4K:
324 	case VM_MODE_P48V48_16K:
325 	case VM_MODE_P48V48_64K:
326 		tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
327 		break;
328 	case VM_MODE_P40V48_4K:
329 	case VM_MODE_P40V48_16K:
330 	case VM_MODE_P40V48_64K:
331 		tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
332 		break;
333 	case VM_MODE_P36V48_4K:
334 	case VM_MODE_P36V48_16K:
335 	case VM_MODE_P36V48_64K:
336 	case VM_MODE_P36V47_16K:
337 		tcr_el1 |= 1ul << 32; /* IPS = 36 bits */
338 		break;
339 	default:
340 		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
341 	}
342 
343 	sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
344 	/* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
345 	tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
346 	tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
347 	if (use_lpa2_pte_format(vm))
348 		tcr_el1 |= (1ul << 59) /* DS */;
349 
350 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
351 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
352 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
353 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), ttbr0_el1);
354 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id);
355 }
356 
357 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
358 {
359 	uint64_t pstate, pc;
360 
361 	vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate), &pstate);
362 	vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
363 
364 	fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
365 		indent, "", pstate, pc);
366 }
367 
368 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
369 				  struct kvm_vcpu_init *init, void *guest_code)
370 {
371 	size_t stack_size;
372 	uint64_t stack_vaddr;
373 	struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
374 
375 	stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
376 					     vm->page_size;
377 	stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
378 				       DEFAULT_ARM64_GUEST_STACK_VADDR_MIN,
379 				       MEM_REGION_DATA);
380 
381 	aarch64_vcpu_setup(vcpu, init);
382 
383 	vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
384 	vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
385 
386 	return vcpu;
387 }
388 
389 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
390 				  void *guest_code)
391 {
392 	return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code);
393 }
394 
395 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
396 {
397 	va_list ap;
398 	int i;
399 
400 	TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
401 		    "  num: %u\n", num);
402 
403 	va_start(ap, num);
404 
405 	for (i = 0; i < num; i++) {
406 		vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]),
407 			     va_arg(ap, uint64_t));
408 	}
409 
410 	va_end(ap);
411 }
412 
413 void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec)
414 {
415 	ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec);
416 	while (1)
417 		;
418 }
419 
420 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
421 {
422 	struct ucall uc;
423 
424 	if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED)
425 		return;
426 
427 	if (uc.args[2]) /* valid_ec */ {
428 		assert(VECTOR_IS_SYNC(uc.args[0]));
429 		TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)",
430 			  uc.args[0], uc.args[1]);
431 	} else {
432 		assert(!VECTOR_IS_SYNC(uc.args[0]));
433 		TEST_FAIL("Unexpected exception (vector:0x%lx)",
434 			  uc.args[0]);
435 	}
436 }
437 
438 struct handlers {
439 	handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM];
440 };
441 
442 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
443 {
444 	extern char vectors;
445 
446 	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
447 }
448 
449 void route_exception(struct ex_regs *regs, int vector)
450 {
451 	struct handlers *handlers = (struct handlers *)exception_handlers;
452 	bool valid_ec;
453 	int ec = 0;
454 
455 	switch (vector) {
456 	case VECTOR_SYNC_CURRENT:
457 	case VECTOR_SYNC_LOWER_64:
458 		ec = (read_sysreg(esr_el1) >> ESR_EC_SHIFT) & ESR_EC_MASK;
459 		valid_ec = true;
460 		break;
461 	case VECTOR_IRQ_CURRENT:
462 	case VECTOR_IRQ_LOWER_64:
463 	case VECTOR_FIQ_CURRENT:
464 	case VECTOR_FIQ_LOWER_64:
465 	case VECTOR_ERROR_CURRENT:
466 	case VECTOR_ERROR_LOWER_64:
467 		ec = 0;
468 		valid_ec = false;
469 		break;
470 	default:
471 		valid_ec = false;
472 		goto unexpected_exception;
473 	}
474 
475 	if (handlers && handlers->exception_handlers[vector][ec])
476 		return handlers->exception_handlers[vector][ec](regs);
477 
478 unexpected_exception:
479 	kvm_exit_unexpected_exception(vector, ec, valid_ec);
480 }
481 
482 void vm_init_descriptor_tables(struct kvm_vm *vm)
483 {
484 	vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
485 					vm->page_size, MEM_REGION_DATA);
486 
487 	*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
488 }
489 
490 void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
491 			 void (*handler)(struct ex_regs *))
492 {
493 	struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
494 
495 	assert(VECTOR_IS_SYNC(vector));
496 	assert(vector < VECTOR_NUM);
497 	assert(ec < ESR_EC_NUM);
498 	handlers->exception_handlers[vector][ec] = handler;
499 }
500 
501 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
502 			 void (*handler)(struct ex_regs *))
503 {
504 	struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
505 
506 	assert(!VECTOR_IS_SYNC(vector));
507 	assert(vector < VECTOR_NUM);
508 	handlers->exception_handlers[vector][0] = handler;
509 }
510 
511 uint32_t guest_get_vcpuid(void)
512 {
513 	return read_sysreg(tpidr_el1);
514 }
515 
516 static uint32_t max_ipa_for_page_size(uint32_t vm_ipa, uint32_t gran,
517 				uint32_t not_sup_val, uint32_t ipa52_min_val)
518 {
519 	if (gran == not_sup_val)
520 		return 0;
521 	else if (gran >= ipa52_min_val && vm_ipa >= 52)
522 		return 52;
523 	else
524 		return min(vm_ipa, 48U);
525 }
526 
527 void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
528 					uint32_t *ipa16k, uint32_t *ipa64k)
529 {
530 	struct kvm_vcpu_init preferred_init;
531 	int kvm_fd, vm_fd, vcpu_fd, err;
532 	uint64_t val;
533 	uint32_t gran;
534 	struct kvm_one_reg reg = {
535 		.id	= KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1),
536 		.addr	= (uint64_t)&val,
537 	};
538 
539 	kvm_fd = open_kvm_dev_path_or_exit();
540 	vm_fd = __kvm_ioctl(kvm_fd, KVM_CREATE_VM, (void *)(unsigned long)ipa);
541 	TEST_ASSERT(vm_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm_fd));
542 
543 	vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
544 	TEST_ASSERT(vcpu_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu_fd));
545 
546 	err = ioctl(vm_fd, KVM_ARM_PREFERRED_TARGET, &preferred_init);
547 	TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_PREFERRED_TARGET, err));
548 	err = ioctl(vcpu_fd, KVM_ARM_VCPU_INIT, &preferred_init);
549 	TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_VCPU_INIT, err));
550 
551 	err = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg);
552 	TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
553 
554 	gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN4), val);
555 	*ipa4k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN4_NI,
556 					ID_AA64MMFR0_EL1_TGRAN4_52_BIT);
557 
558 	gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN64), val);
559 	*ipa64k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN64_NI,
560 					ID_AA64MMFR0_EL1_TGRAN64_IMP);
561 
562 	gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN16), val);
563 	*ipa16k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN16_NI,
564 					ID_AA64MMFR0_EL1_TGRAN16_52_BIT);
565 
566 	close(vcpu_fd);
567 	close(vm_fd);
568 	close(kvm_fd);
569 }
570 
571 #define __smccc_call(insn, function_id, arg0, arg1, arg2, arg3, arg4, arg5,	\
572 		     arg6, res)							\
573 	asm volatile("mov   w0, %w[function_id]\n"				\
574 		     "mov   x1, %[arg0]\n"					\
575 		     "mov   x2, %[arg1]\n"					\
576 		     "mov   x3, %[arg2]\n"					\
577 		     "mov   x4, %[arg3]\n"					\
578 		     "mov   x5, %[arg4]\n"					\
579 		     "mov   x6, %[arg5]\n"					\
580 		     "mov   x7, %[arg6]\n"					\
581 		     #insn  "#0\n"						\
582 		     "mov   %[res0], x0\n"					\
583 		     "mov   %[res1], x1\n"					\
584 		     "mov   %[res2], x2\n"					\
585 		     "mov   %[res3], x3\n"					\
586 		     : [res0] "=r"(res->a0), [res1] "=r"(res->a1),		\
587 		       [res2] "=r"(res->a2), [res3] "=r"(res->a3)		\
588 		     : [function_id] "r"(function_id), [arg0] "r"(arg0),	\
589 		       [arg1] "r"(arg1), [arg2] "r"(arg2), [arg3] "r"(arg3),	\
590 		       [arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6)	\
591 		     : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7")
592 
593 
594 void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
595 	       uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
596 	       uint64_t arg6, struct arm_smccc_res *res)
597 {
598 	__smccc_call(hvc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
599 		     arg6, res);
600 }
601 
602 void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
603 	       uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
604 	       uint64_t arg6, struct arm_smccc_res *res)
605 {
606 	__smccc_call(smc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
607 		     arg6, res);
608 }
609 
610 void kvm_selftest_arch_init(void)
611 {
612 	/*
613 	 * arm64 doesn't have a true default mode, so start by computing the
614 	 * available IPA space and page sizes early.
615 	 */
616 	guest_modes_append_default();
617 }
618 
619 void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
620 {
621 	/*
622 	 * arm64 selftests use only TTBR0_EL1, meaning that the valid VA space
623 	 * is [0, 2^(64 - TCR_EL1.T0SZ)).
624 	 */
625 	sparsebit_set_num(vm->vpages_valid, 0,
626 			  (1ULL << vm->va_bits) >> vm->page_shift);
627 }
628