xref: /linux/tools/testing/selftests/kvm/arm64/external_aborts.c (revision 2858ea3083f088a76b439f5b88b6d4f032dabc0c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * external_abort - Tests for userspace external abort injection
4  *
5  * Copyright (c) 2024 Google LLC
6  */
7 #include "processor.h"
8 #include "test_util.h"
9 
10 #define MMIO_ADDR	0x8000000ULL
11 
12 static u64 expected_abort_pc;
13 
14 static void expect_sea_handler(struct ex_regs *regs)
15 {
16 	u64 esr = read_sysreg(esr_el1);
17 
18 	GUEST_ASSERT_EQ(regs->pc, expected_abort_pc);
19 	GUEST_ASSERT_EQ(ESR_ELx_EC(esr), ESR_ELx_EC_DABT_CUR);
20 	GUEST_ASSERT_EQ(esr & ESR_ELx_FSC_TYPE, ESR_ELx_FSC_EXTABT);
21 
22 	GUEST_DONE();
23 }
24 
25 static void unexpected_dabt_handler(struct ex_regs *regs)
26 {
27 	GUEST_FAIL("Unexpected data abort at PC: %lx\n", regs->pc);
28 }
29 
30 static struct kvm_vm *vm_create_with_dabt_handler(struct kvm_vcpu **vcpu, void *guest_code,
31 						  handler_fn dabt_handler)
32 {
33 	struct kvm_vm *vm = vm_create_with_one_vcpu(vcpu, guest_code);
34 
35 	vm_init_descriptor_tables(vm);
36 	vcpu_init_descriptor_tables(*vcpu);
37 	vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, ESR_ELx_EC_DABT_CUR, dabt_handler);
38 
39 	virt_map(vm, MMIO_ADDR, MMIO_ADDR, 1);
40 
41 	return vm;
42 }
43 
44 static void vcpu_inject_sea(struct kvm_vcpu *vcpu)
45 {
46 	struct kvm_vcpu_events events = {};
47 
48 	events.exception.ext_dabt_pending = true;
49 	vcpu_events_set(vcpu, &events);
50 }
51 
52 static void vcpu_inject_serror(struct kvm_vcpu *vcpu)
53 {
54 	struct kvm_vcpu_events events = {};
55 
56 	events.exception.serror_pending = true;
57 	vcpu_events_set(vcpu, &events);
58 }
59 
60 static void __vcpu_run_expect(struct kvm_vcpu *vcpu, unsigned int cmd)
61 {
62 	struct ucall uc;
63 
64 	vcpu_run(vcpu);
65 	switch (get_ucall(vcpu, &uc)) {
66 	case UCALL_ABORT:
67 		REPORT_GUEST_ASSERT(uc);
68 		break;
69 	default:
70 		if (uc.cmd == cmd)
71 			return;
72 
73 		TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
74 	}
75 }
76 
77 static void vcpu_run_expect_done(struct kvm_vcpu *vcpu)
78 {
79 	__vcpu_run_expect(vcpu, UCALL_DONE);
80 }
81 
82 static void vcpu_run_expect_sync(struct kvm_vcpu *vcpu)
83 {
84 	__vcpu_run_expect(vcpu, UCALL_SYNC);
85 }
86 
87 extern char test_mmio_abort_insn;
88 
89 static void test_mmio_abort_guest(void)
90 {
91 	WRITE_ONCE(expected_abort_pc, (u64)&test_mmio_abort_insn);
92 
93 	asm volatile("test_mmio_abort_insn:\n\t"
94 		     "ldr x0, [%0]\n\t"
95 		     : : "r" (MMIO_ADDR) : "x0", "memory");
96 
97 	GUEST_FAIL("MMIO instruction should not retire");
98 }
99 
100 /*
101  * Test that KVM doesn't complete MMIO emulation when userspace has made an
102  * external abort pending for the instruction.
103  */
104 static void test_mmio_abort(void)
105 {
106 	struct kvm_vcpu *vcpu;
107 	struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_abort_guest,
108 							expect_sea_handler);
109 	struct kvm_run *run = vcpu->run;
110 
111 	vcpu_run(vcpu);
112 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_MMIO);
113 	TEST_ASSERT_EQ(run->mmio.phys_addr, MMIO_ADDR);
114 	TEST_ASSERT_EQ(run->mmio.len, sizeof(unsigned long));
115 	TEST_ASSERT(!run->mmio.is_write, "Expected MMIO read");
116 
117 	vcpu_inject_sea(vcpu);
118 	vcpu_run_expect_done(vcpu);
119 	kvm_vm_free(vm);
120 }
121 
122 extern char test_mmio_nisv_insn;
123 
124 static void test_mmio_nisv_guest(void)
125 {
126 	WRITE_ONCE(expected_abort_pc, (u64)&test_mmio_nisv_insn);
127 
128 	asm volatile("test_mmio_nisv_insn:\n\t"
129 		     "ldr x0, [%0], #8\n\t"
130 		     : : "r" (MMIO_ADDR) : "x0", "memory");
131 
132 	GUEST_FAIL("MMIO instruction should not retire");
133 }
134 
135 /*
136  * Test that the KVM_RUN ioctl fails for ESR_EL2.ISV=0 MMIO aborts if userspace
137  * hasn't enabled KVM_CAP_ARM_NISV_TO_USER.
138  */
139 static void test_mmio_nisv(void)
140 {
141 	struct kvm_vcpu *vcpu;
142 	struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_nisv_guest,
143 							unexpected_dabt_handler);
144 
145 	TEST_ASSERT(_vcpu_run(vcpu), "Expected nonzero return code from KVM_RUN");
146 	TEST_ASSERT_EQ(errno, ENOSYS);
147 
148 	kvm_vm_free(vm);
149 }
150 
151 /*
152  * Test that ESR_EL2.ISV=0 MMIO aborts reach userspace and that an injected SEA
153  * reaches the guest.
154  */
155 static void test_mmio_nisv_abort(void)
156 {
157 	struct kvm_vcpu *vcpu;
158 	struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_nisv_guest,
159 							expect_sea_handler);
160 	struct kvm_run *run = vcpu->run;
161 
162 	vm_enable_cap(vm, KVM_CAP_ARM_NISV_TO_USER, 1);
163 
164 	vcpu_run(vcpu);
165 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_ARM_NISV);
166 	TEST_ASSERT_EQ(run->arm_nisv.fault_ipa, MMIO_ADDR);
167 
168 	vcpu_inject_sea(vcpu);
169 	vcpu_run_expect_done(vcpu);
170 	kvm_vm_free(vm);
171 }
172 
173 static void unexpected_serror_handler(struct ex_regs *regs)
174 {
175 	GUEST_FAIL("Took unexpected SError exception");
176 }
177 
178 static void test_serror_masked_guest(void)
179 {
180 	GUEST_ASSERT(read_sysreg(isr_el1) & ISR_EL1_A);
181 
182 	isb();
183 
184 	GUEST_DONE();
185 }
186 
187 static void test_serror_masked(void)
188 {
189 	struct kvm_vcpu *vcpu;
190 	struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_serror_masked_guest,
191 							unexpected_dabt_handler);
192 
193 	vm_install_exception_handler(vm, VECTOR_ERROR_CURRENT, unexpected_serror_handler);
194 
195 	vcpu_inject_serror(vcpu);
196 	vcpu_run_expect_done(vcpu);
197 	kvm_vm_free(vm);
198 }
199 
200 static void expect_serror_handler(struct ex_regs *regs)
201 {
202 	GUEST_DONE();
203 }
204 
205 static void test_serror_guest(void)
206 {
207 	GUEST_ASSERT(read_sysreg(isr_el1) & ISR_EL1_A);
208 
209 	local_serror_enable();
210 	isb();
211 	local_serror_disable();
212 
213 	GUEST_FAIL("Should've taken pending SError exception");
214 }
215 
216 static void test_serror(void)
217 {
218 	struct kvm_vcpu *vcpu;
219 	struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_serror_guest,
220 							unexpected_dabt_handler);
221 
222 	vm_install_exception_handler(vm, VECTOR_ERROR_CURRENT, expect_serror_handler);
223 
224 	vcpu_inject_serror(vcpu);
225 	vcpu_run_expect_done(vcpu);
226 	kvm_vm_free(vm);
227 }
228 
229 static void test_serror_emulated_guest(void)
230 {
231 	GUEST_ASSERT(!(read_sysreg(isr_el1) & ISR_EL1_A));
232 
233 	local_serror_enable();
234 	GUEST_SYNC(0);
235 	local_serror_disable();
236 
237 	GUEST_FAIL("Should've taken unmasked SError exception");
238 }
239 
240 static void test_serror_emulated(void)
241 {
242 	struct kvm_vcpu *vcpu;
243 	struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_serror_emulated_guest,
244 							unexpected_dabt_handler);
245 
246 	vm_install_exception_handler(vm, VECTOR_ERROR_CURRENT, expect_serror_handler);
247 
248 	vcpu_run_expect_sync(vcpu);
249 	vcpu_inject_serror(vcpu);
250 	vcpu_run_expect_done(vcpu);
251 	kvm_vm_free(vm);
252 }
253 
254 int main(void)
255 {
256 	test_mmio_abort();
257 	test_mmio_nisv();
258 	test_mmio_nisv_abort();
259 	test_serror();
260 	test_serror_masked();
261 	test_serror_emulated();
262 }
263