1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * at - Test for KVM's AT emulation in the EL2&0 and EL1&0 translation regimes.
4 */
5 #include "kvm_util.h"
6 #include "processor.h"
7 #include "test_util.h"
8 #include "ucall.h"
9
10 #include <asm/sysreg.h>
11
12 #define TEST_ADDR 0x80000000
13
14 enum {
15 CLEAR_ACCESS_FLAG,
16 TEST_ACCESS_FLAG,
17 };
18
19 static u64 *ptep_hva;
20
21 #define copy_el2_to_el1(reg) \
22 write_sysreg_s(read_sysreg_s(SYS_##reg##_EL1), SYS_##reg##_EL12)
23
24 /* Yes, this is an ugly hack */
25 #define __at(op, addr) write_sysreg_s(addr, op)
26
27 #define test_at_insn(op, expect_fault) \
28 do { \
29 u64 par, fsc; \
30 bool fault; \
31 \
32 GUEST_SYNC(CLEAR_ACCESS_FLAG); \
33 \
34 __at(OP_AT_##op, TEST_ADDR); \
35 isb(); \
36 par = read_sysreg(par_el1); \
37 \
38 fault = par & SYS_PAR_EL1_F; \
39 fsc = FIELD_GET(SYS_PAR_EL1_FST, par); \
40 \
41 __GUEST_ASSERT((expect_fault) == fault, \
42 "AT "#op": %sexpected fault (par: %lx)1", \
43 (expect_fault) ? "" : "un", par); \
44 if ((expect_fault)) { \
45 __GUEST_ASSERT(fsc == ESR_ELx_FSC_ACCESS_L(3), \
46 "AT "#op": expected access flag fault (par: %lx)", \
47 par); \
48 } else { \
49 GUEST_ASSERT_EQ(FIELD_GET(SYS_PAR_EL1_ATTR, par), MAIR_ATTR_NORMAL); \
50 GUEST_ASSERT_EQ(FIELD_GET(SYS_PAR_EL1_SH, par), PTE_SHARED >> 8); \
51 GUEST_ASSERT_EQ(par & SYS_PAR_EL1_PA, TEST_ADDR); \
52 GUEST_SYNC(TEST_ACCESS_FLAG); \
53 } \
54 } while (0)
55
test_at(bool expect_fault)56 static void test_at(bool expect_fault)
57 {
58 test_at_insn(S1E2R, expect_fault);
59 test_at_insn(S1E2W, expect_fault);
60
61 /* Reuse the stage-1 MMU context from EL2 at EL1 */
62 copy_el2_to_el1(SCTLR);
63 copy_el2_to_el1(MAIR);
64 copy_el2_to_el1(TCR);
65 copy_el2_to_el1(TTBR0);
66 copy_el2_to_el1(TTBR1);
67
68 /* Disable stage-2 translation and enter a non-host context */
69 write_sysreg(0, vtcr_el2);
70 write_sysreg(0, vttbr_el2);
71 sysreg_clear_set(hcr_el2, HCR_EL2_TGE | HCR_EL2_VM, 0);
72 isb();
73
74 test_at_insn(S1E1R, expect_fault);
75 test_at_insn(S1E1W, expect_fault);
76 }
77
guest_code(void)78 static void guest_code(void)
79 {
80 sysreg_clear_set(tcr_el1, TCR_HA, 0);
81 isb();
82
83 test_at(true);
84
85 if (!SYS_FIELD_GET(ID_AA64MMFR1_EL1, HAFDBS, read_sysreg(id_aa64mmfr1_el1)))
86 GUEST_DONE();
87
88 /*
89 * KVM's software PTW makes the implementation choice that the AT
90 * instruction sets the access flag.
91 */
92 sysreg_clear_set(tcr_el1, 0, TCR_HA);
93 isb();
94 test_at(false);
95
96 GUEST_DONE();
97 }
98
handle_sync(struct kvm_vcpu * vcpu,struct ucall * uc)99 static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc)
100 {
101 switch (uc->args[1]) {
102 case CLEAR_ACCESS_FLAG:
103 /*
104 * Delete + reinstall the memslot to invalidate stage-2
105 * mappings of the stage-1 page tables, forcing KVM to
106 * use the 'slow' AT emulation path.
107 *
108 * This and clearing the access flag from host userspace
109 * ensures that the access flag cannot be set speculatively
110 * and is reliably cleared at the time of the AT instruction.
111 */
112 clear_bit(__ffs(PTE_AF), ptep_hva);
113 vm_mem_region_reload(vcpu->vm, vcpu->vm->memslots[MEM_REGION_PT]);
114 break;
115 case TEST_ACCESS_FLAG:
116 TEST_ASSERT(test_bit(__ffs(PTE_AF), ptep_hva),
117 "Expected access flag to be set (desc: %lu)", *ptep_hva);
118 break;
119 default:
120 TEST_FAIL("Unexpected SYNC arg: %lu", uc->args[1]);
121 }
122 }
123
run_test(struct kvm_vcpu * vcpu)124 static void run_test(struct kvm_vcpu *vcpu)
125 {
126 struct ucall uc;
127
128 while (true) {
129 vcpu_run(vcpu);
130 switch (get_ucall(vcpu, &uc)) {
131 case UCALL_DONE:
132 return;
133 case UCALL_SYNC:
134 handle_sync(vcpu, &uc);
135 continue;
136 case UCALL_ABORT:
137 REPORT_GUEST_ASSERT(uc);
138 return;
139 default:
140 TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
141 }
142 }
143 }
144
main(void)145 int main(void)
146 {
147 struct kvm_vcpu_init init;
148 struct kvm_vcpu *vcpu;
149 struct kvm_vm *vm;
150
151 TEST_REQUIRE(kvm_check_cap(KVM_CAP_ARM_EL2));
152
153 vm = vm_create(1);
154
155 kvm_get_default_vcpu_target(vm, &init);
156 init.features[0] |= BIT(KVM_ARM_VCPU_HAS_EL2);
157 vcpu = aarch64_vcpu_add(vm, 0, &init, guest_code);
158 kvm_arch_vm_finalize_vcpus(vm);
159
160 virt_map(vm, TEST_ADDR, TEST_ADDR, 1);
161 ptep_hva = virt_get_pte_hva_at_level(vm, TEST_ADDR, 3);
162 run_test(vcpu);
163
164 kvm_vm_free(vm);
165 return 0;
166 }
167