xref: /linux/tools/testing/selftests/kvm/lib/aarch64/processor.c (revision 64b14a184e83eb62ea0615e31a409956049d40e7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * AArch64 code
4  *
5  * Copyright (C) 2018, Red Hat, Inc.
6  */
7 
8 #include <linux/compiler.h>
9 #include <assert.h>
10 
11 #include "guest_modes.h"
12 #include "kvm_util.h"
13 #include "../kvm_util_internal.h"
14 #include "processor.h"
15 
16 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN	0xac0000
17 
18 static vm_vaddr_t exception_handlers;
19 
20 static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
21 {
22 	return (v + vm->page_size) & ~(vm->page_size - 1);
23 }
24 
25 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
26 {
27 	unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
28 	uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
29 
30 	return (gva >> shift) & mask;
31 }
32 
33 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
34 {
35 	unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
36 	uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
37 
38 	TEST_ASSERT(vm->pgtable_levels == 4,
39 		"Mode %d does not have 4 page table levels", vm->mode);
40 
41 	return (gva >> shift) & mask;
42 }
43 
44 static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
45 {
46 	unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
47 	uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
48 
49 	TEST_ASSERT(vm->pgtable_levels >= 3,
50 		"Mode %d does not have >= 3 page table levels", vm->mode);
51 
52 	return (gva >> shift) & mask;
53 }
54 
55 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
56 {
57 	uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
58 	return (gva >> vm->page_shift) & mask;
59 }
60 
61 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
62 {
63 	uint64_t mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift;
64 	return entry & mask;
65 }
66 
67 static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
68 {
69 	unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
70 	return 1 << (vm->va_bits - shift);
71 }
72 
73 static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
74 {
75 	return 1 << (vm->page_shift - 3);
76 }
77 
78 void virt_pgd_alloc(struct kvm_vm *vm)
79 {
80 	if (!vm->pgd_created) {
81 		vm_paddr_t paddr = vm_phy_pages_alloc(vm,
82 			page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size,
83 			KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
84 		vm->pgd = paddr;
85 		vm->pgd_created = true;
86 	}
87 }
88 
89 static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
90 			 uint64_t flags)
91 {
92 	uint8_t attr_idx = flags & 7;
93 	uint64_t *ptep;
94 
95 	TEST_ASSERT((vaddr % vm->page_size) == 0,
96 		"Virtual address not on page boundary,\n"
97 		"  vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
98 	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
99 		(vaddr >> vm->page_shift)),
100 		"Invalid virtual address, vaddr: 0x%lx", vaddr);
101 	TEST_ASSERT((paddr % vm->page_size) == 0,
102 		"Physical address not on page boundary,\n"
103 		"  paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
104 	TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
105 		"Physical address beyond beyond maximum supported,\n"
106 		"  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
107 		paddr, vm->max_gfn, vm->page_size);
108 
109 	ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
110 	if (!*ptep)
111 		*ptep = vm_alloc_page_table(vm) | 3;
112 
113 	switch (vm->pgtable_levels) {
114 	case 4:
115 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
116 		if (!*ptep)
117 			*ptep = vm_alloc_page_table(vm) | 3;
118 		/* fall through */
119 	case 3:
120 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
121 		if (!*ptep)
122 			*ptep = vm_alloc_page_table(vm) | 3;
123 		/* fall through */
124 	case 2:
125 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
126 		break;
127 	default:
128 		TEST_FAIL("Page table levels must be 2, 3, or 4");
129 	}
130 
131 	*ptep = paddr | 3;
132 	*ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
133 }
134 
135 void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
136 {
137 	uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */
138 
139 	_virt_pg_map(vm, vaddr, paddr, attr_idx);
140 }
141 
142 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
143 {
144 	uint64_t *ptep;
145 
146 	if (!vm->pgd_created)
147 		goto unmapped_gva;
148 
149 	ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
150 	if (!ptep)
151 		goto unmapped_gva;
152 
153 	switch (vm->pgtable_levels) {
154 	case 4:
155 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
156 		if (!ptep)
157 			goto unmapped_gva;
158 		/* fall through */
159 	case 3:
160 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
161 		if (!ptep)
162 			goto unmapped_gva;
163 		/* fall through */
164 	case 2:
165 		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
166 		if (!ptep)
167 			goto unmapped_gva;
168 		break;
169 	default:
170 		TEST_FAIL("Page table levels must be 2, 3, or 4");
171 	}
172 
173 	return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
174 
175 unmapped_gva:
176 	TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
177 	exit(1);
178 }
179 
180 static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
181 {
182 #ifdef DEBUG
183 	static const char * const type[] = { "", "pud", "pmd", "pte" };
184 	uint64_t pte, *ptep;
185 
186 	if (level == 4)
187 		return;
188 
189 	for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
190 		ptep = addr_gpa2hva(vm, pte);
191 		if (!*ptep)
192 			continue;
193 		fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
194 		pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
195 	}
196 #endif
197 }
198 
199 void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
200 {
201 	int level = 4 - (vm->pgtable_levels - 1);
202 	uint64_t pgd, *ptep;
203 
204 	if (!vm->pgd_created)
205 		return;
206 
207 	for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
208 		ptep = addr_gpa2hva(vm, pgd);
209 		if (!*ptep)
210 			continue;
211 		fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
212 		pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
213 	}
214 }
215 
216 void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init)
217 {
218 	struct kvm_vcpu_init default_init = { .target = -1, };
219 	uint64_t sctlr_el1, tcr_el1;
220 
221 	if (!init)
222 		init = &default_init;
223 
224 	if (init->target == -1) {
225 		struct kvm_vcpu_init preferred;
226 		vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred);
227 		init->target = preferred.target;
228 	}
229 
230 	vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_INIT, init);
231 
232 	/*
233 	 * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
234 	 * registers, which the variable argument list macros do.
235 	 */
236 	set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
237 
238 	get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
239 	get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
240 
241 	/* Configure base granule size */
242 	switch (vm->mode) {
243 	case VM_MODE_P52V48_4K:
244 		TEST_FAIL("AArch64 does not support 4K sized pages "
245 			  "with 52-bit physical address ranges");
246 	case VM_MODE_PXXV48_4K:
247 		TEST_FAIL("AArch64 does not support 4K sized pages "
248 			  "with ANY-bit physical address ranges");
249 	case VM_MODE_P52V48_64K:
250 	case VM_MODE_P48V48_64K:
251 	case VM_MODE_P40V48_64K:
252 	case VM_MODE_P36V48_64K:
253 		tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
254 		break;
255 	case VM_MODE_P48V48_16K:
256 	case VM_MODE_P40V48_16K:
257 	case VM_MODE_P36V48_16K:
258 	case VM_MODE_P36V47_16K:
259 		tcr_el1 |= 2ul << 14; /* TG0 = 16KB */
260 		break;
261 	case VM_MODE_P48V48_4K:
262 	case VM_MODE_P40V48_4K:
263 	case VM_MODE_P36V48_4K:
264 		tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
265 		break;
266 	default:
267 		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
268 	}
269 
270 	/* Configure output size */
271 	switch (vm->mode) {
272 	case VM_MODE_P52V48_64K:
273 		tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
274 		break;
275 	case VM_MODE_P48V48_4K:
276 	case VM_MODE_P48V48_16K:
277 	case VM_MODE_P48V48_64K:
278 		tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
279 		break;
280 	case VM_MODE_P40V48_4K:
281 	case VM_MODE_P40V48_16K:
282 	case VM_MODE_P40V48_64K:
283 		tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
284 		break;
285 	case VM_MODE_P36V48_4K:
286 	case VM_MODE_P36V48_16K:
287 	case VM_MODE_P36V48_64K:
288 	case VM_MODE_P36V47_16K:
289 		tcr_el1 |= 1ul << 32; /* IPS = 36 bits */
290 		break;
291 	default:
292 		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
293 	}
294 
295 	sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
296 	/* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
297 	tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
298 	tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
299 
300 	set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
301 	set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
302 	set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
303 	set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd);
304 	set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpuid);
305 }
306 
307 void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
308 {
309 	uint64_t pstate, pc;
310 
311 	get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
312 	get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
313 
314 	fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
315 		indent, "", pstate, pc);
316 }
317 
318 void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid,
319 			      struct kvm_vcpu_init *init, void *guest_code)
320 {
321 	size_t stack_size = vm->page_size == 4096 ?
322 					DEFAULT_STACK_PGS * vm->page_size :
323 					vm->page_size;
324 	uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
325 					      DEFAULT_ARM64_GUEST_STACK_VADDR_MIN);
326 
327 	vm_vcpu_add(vm, vcpuid);
328 	aarch64_vcpu_setup(vm, vcpuid, init);
329 
330 	set_reg(vm, vcpuid, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
331 	set_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
332 }
333 
334 void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
335 {
336 	aarch64_vcpu_add_default(vm, vcpuid, NULL, guest_code);
337 }
338 
339 void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
340 {
341 	va_list ap;
342 	int i;
343 
344 	TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
345 		    "  num: %u\n", num);
346 
347 	va_start(ap, num);
348 
349 	for (i = 0; i < num; i++) {
350 		set_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[i]),
351 			va_arg(ap, uint64_t));
352 	}
353 
354 	va_end(ap);
355 }
356 
357 void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec)
358 {
359 	ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec);
360 	while (1)
361 		;
362 }
363 
364 void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
365 {
366 	struct ucall uc;
367 
368 	if (get_ucall(vm, vcpuid, &uc) != UCALL_UNHANDLED)
369 		return;
370 
371 	if (uc.args[2]) /* valid_ec */ {
372 		assert(VECTOR_IS_SYNC(uc.args[0]));
373 		TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)",
374 			  uc.args[0], uc.args[1]);
375 	} else {
376 		assert(!VECTOR_IS_SYNC(uc.args[0]));
377 		TEST_FAIL("Unexpected exception (vector:0x%lx)",
378 			  uc.args[0]);
379 	}
380 }
381 
382 struct handlers {
383 	handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM];
384 };
385 
386 void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
387 {
388 	extern char vectors;
389 
390 	set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
391 }
392 
393 void route_exception(struct ex_regs *regs, int vector)
394 {
395 	struct handlers *handlers = (struct handlers *)exception_handlers;
396 	bool valid_ec;
397 	int ec = 0;
398 
399 	switch (vector) {
400 	case VECTOR_SYNC_CURRENT:
401 	case VECTOR_SYNC_LOWER_64:
402 		ec = (read_sysreg(esr_el1) >> ESR_EC_SHIFT) & ESR_EC_MASK;
403 		valid_ec = true;
404 		break;
405 	case VECTOR_IRQ_CURRENT:
406 	case VECTOR_IRQ_LOWER_64:
407 	case VECTOR_FIQ_CURRENT:
408 	case VECTOR_FIQ_LOWER_64:
409 	case VECTOR_ERROR_CURRENT:
410 	case VECTOR_ERROR_LOWER_64:
411 		ec = 0;
412 		valid_ec = false;
413 		break;
414 	default:
415 		valid_ec = false;
416 		goto unexpected_exception;
417 	}
418 
419 	if (handlers && handlers->exception_handlers[vector][ec])
420 		return handlers->exception_handlers[vector][ec](regs);
421 
422 unexpected_exception:
423 	kvm_exit_unexpected_exception(vector, ec, valid_ec);
424 }
425 
426 void vm_init_descriptor_tables(struct kvm_vm *vm)
427 {
428 	vm->handlers = vm_vaddr_alloc(vm, sizeof(struct handlers),
429 			vm->page_size);
430 
431 	*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
432 }
433 
434 void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
435 			 void (*handler)(struct ex_regs *))
436 {
437 	struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
438 
439 	assert(VECTOR_IS_SYNC(vector));
440 	assert(vector < VECTOR_NUM);
441 	assert(ec < ESR_EC_NUM);
442 	handlers->exception_handlers[vector][ec] = handler;
443 }
444 
445 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
446 			 void (*handler)(struct ex_regs *))
447 {
448 	struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
449 
450 	assert(!VECTOR_IS_SYNC(vector));
451 	assert(vector < VECTOR_NUM);
452 	handlers->exception_handlers[vector][0] = handler;
453 }
454 
455 uint32_t guest_get_vcpuid(void)
456 {
457 	return read_sysreg(tpidr_el1);
458 }
459 
460 void aarch64_get_supported_page_sizes(uint32_t ipa,
461 				      bool *ps4k, bool *ps16k, bool *ps64k)
462 {
463 	struct kvm_vcpu_init preferred_init;
464 	int kvm_fd, vm_fd, vcpu_fd, err;
465 	uint64_t val;
466 	struct kvm_one_reg reg = {
467 		.id	= KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1),
468 		.addr	= (uint64_t)&val,
469 	};
470 
471 	kvm_fd = open_kvm_dev_path_or_exit();
472 	vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, ipa);
473 	TEST_ASSERT(vm_fd >= 0, "Can't create VM");
474 
475 	vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
476 	TEST_ASSERT(vcpu_fd >= 0, "Can't create vcpu");
477 
478 	err = ioctl(vm_fd, KVM_ARM_PREFERRED_TARGET, &preferred_init);
479 	TEST_ASSERT(err == 0, "Can't get target");
480 	err = ioctl(vcpu_fd, KVM_ARM_VCPU_INIT, &preferred_init);
481 	TEST_ASSERT(err == 0, "Can't get init vcpu");
482 
483 	err = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg);
484 	TEST_ASSERT(err == 0, "Can't get MMFR0");
485 
486 	*ps4k = ((val >> 28) & 0xf) != 0xf;
487 	*ps64k = ((val >> 24) & 0xf) == 0;
488 	*ps16k = ((val >> 20) & 0xf) != 0;
489 
490 	close(vcpu_fd);
491 	close(vm_fd);
492 	close(kvm_fd);
493 }
494 
495 /*
496  * arm64 doesn't have a true default mode, so start by computing the
497  * available IPA space and page sizes early.
498  */
499 void __attribute__((constructor)) init_guest_modes(void)
500 {
501        guest_modes_append_default();
502 }
503