xref: /linux/tools/testing/selftests/kvm/arm64/at.c (revision 0fc8f6200d2313278fbf4539bbab74677c685531)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * at - Test for KVM's AT emulation in the EL2&0 and EL1&0 translation regimes.
4  */
5 #include "kvm_util.h"
6 #include "processor.h"
7 #include "test_util.h"
8 #include "ucall.h"
9 
10 #include <asm/sysreg.h>
11 
12 #define TEST_ADDR	0x80000000
13 
14 enum {
15 	CLEAR_ACCESS_FLAG,
16 };
17 
18 static u64 *ptep_hva;
19 
20 #define copy_el2_to_el1(reg)						\
21 	write_sysreg_s(read_sysreg_s(SYS_##reg##_EL1), SYS_##reg##_EL12)
22 
23 /* Yes, this is an ugly hack */
24 #define __at(op, addr)	write_sysreg_s(addr, op)
25 
26 #define test_at_insn(op, expect_fault)							\
27 do {											\
28 	u64 par, fsc;									\
29 	bool fault;									\
30 											\
31 	GUEST_SYNC(CLEAR_ACCESS_FLAG);							\
32 											\
33 	__at(OP_AT_##op, TEST_ADDR);							\
34 	isb();										\
35 	par = read_sysreg(par_el1);							\
36 											\
37 	fault = par & SYS_PAR_EL1_F;							\
38 	fsc = FIELD_GET(SYS_PAR_EL1_FST, par);						\
39 											\
40 	__GUEST_ASSERT((expect_fault) == fault,						\
41 		       "AT "#op": %sexpected fault (par: %lx)1",			\
42 		       (expect_fault) ? "" : "un", par);				\
43 	if ((expect_fault)) {								\
44 		__GUEST_ASSERT(fsc == ESR_ELx_FSC_ACCESS_L(3),				\
45 			       "AT "#op": expected access flag fault (par: %lx)",	\
46 			       par);							\
47 	} else {									\
48 		GUEST_ASSERT_EQ(FIELD_GET(SYS_PAR_EL1_ATTR, par), MAIR_ATTR_NORMAL);	\
49 		GUEST_ASSERT_EQ(FIELD_GET(SYS_PAR_EL1_SH, par), PTE_SHARED >> 8);	\
50 		GUEST_ASSERT_EQ(par & SYS_PAR_EL1_PA, TEST_ADDR);			\
51 	}										\
52 } while (0)
53 
54 static void test_at(bool expect_fault)
55 {
56 	test_at_insn(S1E2R, expect_fault);
57 	test_at_insn(S1E2W, expect_fault);
58 
59 	/* Reuse the stage-1 MMU context from EL2 at EL1 */
60 	copy_el2_to_el1(SCTLR);
61 	copy_el2_to_el1(MAIR);
62 	copy_el2_to_el1(TCR);
63 	copy_el2_to_el1(TTBR0);
64 	copy_el2_to_el1(TTBR1);
65 
66 	/* Disable stage-2 translation and enter a non-host context */
67 	write_sysreg(0, vtcr_el2);
68 	write_sysreg(0, vttbr_el2);
69 	sysreg_clear_set(hcr_el2, HCR_EL2_TGE | HCR_EL2_VM, 0);
70 	isb();
71 
72 	test_at_insn(S1E1R, expect_fault);
73 	test_at_insn(S1E1W, expect_fault);
74 }
75 
76 static void guest_code(void)
77 {
78 	sysreg_clear_set(tcr_el1, TCR_HA, 0);
79 	isb();
80 
81 	test_at(true);
82 
83 	if (!SYS_FIELD_GET(ID_AA64MMFR1_EL1, HAFDBS, read_sysreg(id_aa64mmfr1_el1)))
84 		GUEST_DONE();
85 
86 	sysreg_clear_set(tcr_el1, 0, TCR_HA);
87 	isb();
88 	test_at(false);
89 
90 	GUEST_DONE();
91 }
92 
93 static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc)
94 {
95 	switch (uc->args[1]) {
96 	case CLEAR_ACCESS_FLAG:
97 		/*
98 		 * Delete + reinstall the memslot to invalidate stage-2
99 		 * mappings of the stage-1 page tables, allowing KVM to
100 		 * potentially use the 'slow' AT emulation path.
101 		 *
102 		 * This and clearing the access flag from host userspace
103 		 * ensures that the access flag cannot be set speculatively
104 		 * and is reliably cleared at the time of the AT instruction.
105 		 */
106 		clear_bit(__ffs(PTE_AF), ptep_hva);
107 		vm_mem_region_reload(vcpu->vm, vcpu->vm->memslots[MEM_REGION_PT]);
108 		break;
109 	default:
110 		TEST_FAIL("Unexpected SYNC arg: %lu", uc->args[1]);
111 	}
112 }
113 
114 static void run_test(struct kvm_vcpu *vcpu)
115 {
116 	struct ucall uc;
117 
118 	while (true) {
119 		vcpu_run(vcpu);
120 		switch (get_ucall(vcpu, &uc)) {
121 		case UCALL_DONE:
122 			return;
123 		case UCALL_SYNC:
124 			handle_sync(vcpu, &uc);
125 			continue;
126 		case UCALL_ABORT:
127 			REPORT_GUEST_ASSERT(uc);
128 			return;
129 		default:
130 			TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
131 		}
132 	}
133 }
134 
135 int main(void)
136 {
137 	struct kvm_vcpu_init init;
138 	struct kvm_vcpu *vcpu;
139 	struct kvm_vm *vm;
140 
141 	TEST_REQUIRE(kvm_check_cap(KVM_CAP_ARM_EL2));
142 
143 	vm = vm_create(1);
144 
145 	kvm_get_default_vcpu_target(vm, &init);
146 	init.features[0] |= BIT(KVM_ARM_VCPU_HAS_EL2);
147 	vcpu = aarch64_vcpu_add(vm, 0, &init, guest_code);
148 	kvm_arch_vm_finalize_vcpus(vm);
149 
150 	virt_map(vm, TEST_ADDR, TEST_ADDR, 1);
151 	ptep_hva = virt_get_pte_hva_at_level(vm, TEST_ADDR, 3);
152 	run_test(vcpu);
153 
154 	kvm_vm_free(vm);
155 	return 0;
156 }
157