xref: /linux/tools/testing/selftests/kvm/arm64/sea_to_user.c (revision 06bc7ff0a1e0f2b0102e1314e3527a7ec0997851)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Test KVM returns to userspace with KVM_EXIT_ARM_SEA if host APEI fails
4  * to handle SEA and userspace has opt-ed in KVM_CAP_ARM_SEA_TO_USER.
5  *
6  * After reaching userspace with expected arm_sea info, also test userspace
7  * injecting a synchronous external data abort into the guest.
8  *
9  * This test utilizes EINJ to generate a REAL synchronous external data
10  * abort by consuming a recoverable uncorrectable memory error. Therefore
11  * the device under test must support EINJ in both firmware and host kernel,
12  * including the notrigger feature. Otherwise the test will be skipped.
13  * The under-test platform's APEI should be unable to claim SEA. Otherwise
14  * the test will also be skipped.
15  */
16 
17 #include <signal.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <unistd.h>
21 
22 #include "test_util.h"
23 #include "kvm_util.h"
24 #include "processor.h"
25 #include "guest_modes.h"
26 
27 #define PAGE_PRESENT		(1ULL << 63)
28 #define PAGE_PHYSICAL		0x007fffffffffffffULL
29 #define PAGE_ADDR_MASK		(~(0xfffULL))
30 
31 /* Group ISV and ISS[23:14]. */
32 #define ESR_ELx_INST_SYNDROME	((ESR_ELx_ISV) | (ESR_ELx_SAS) | \
33 				 (ESR_ELx_SSE) | (ESR_ELx_SRT_MASK) | \
34 				 (ESR_ELx_SF) | (ESR_ELx_AR))
35 
36 #define EINJ_ETYPE		"/sys/kernel/debug/apei/einj/error_type"
37 #define EINJ_ADDR		"/sys/kernel/debug/apei/einj/param1"
38 #define EINJ_MASK		"/sys/kernel/debug/apei/einj/param2"
39 #define EINJ_FLAGS		"/sys/kernel/debug/apei/einj/flags"
40 #define EINJ_NOTRIGGER		"/sys/kernel/debug/apei/einj/notrigger"
41 #define EINJ_DOIT		"/sys/kernel/debug/apei/einj/error_inject"
42 /* Memory Uncorrectable non-fatal. */
43 #define ERROR_TYPE_MEMORY_UER	0x10
44 /* Memory address and mask valid (param1 and param2). */
45 #define MASK_MEMORY_UER		0b10
46 
47 /* Guest virtual address region = [2G, 3G).  */
48 #define START_GVA		0x80000000UL
49 #define VM_MEM_SIZE		0x40000000UL
50 /* Note: EINJ_OFFSET must < VM_MEM_SIZE. */
51 #define EINJ_OFFSET		0x01234badUL
52 #define EINJ_GVA		((START_GVA) + (EINJ_OFFSET))
53 
54 static gpa_t einj_gpa;
55 static void *einj_hva;
56 static u64 einj_hpa;
57 static bool far_invalid;
58 
translate_hva_to_hpa(unsigned long hva)59 static u64 translate_hva_to_hpa(unsigned long hva)
60 {
61 	u64 pinfo;
62 	s64 offset = hva / getpagesize() * sizeof(pinfo);
63 	int fd;
64 
65 	fd = open("/proc/self/pagemap", O_RDONLY);
66 	if (fd < 0)
67 		ksft_exit_fail_perror("Failed to open /proc/self/pagemap");
68 	if (pread(fd, &pinfo, sizeof(pinfo), offset) != sizeof(pinfo)) {
69 		close(fd);
70 		ksft_exit_fail_perror("Failed to read /proc/self/pagemap");
71 	}
72 
73 	close(fd);
74 
75 	if ((pinfo & PAGE_PRESENT) == 0)
76 		ksft_exit_fail_perror("Page not present");
77 
78 	return ((pinfo & PAGE_PHYSICAL) << MIN_PAGE_SHIFT) +
79 	       (hva & (getpagesize() - 1));
80 }
81 
write_einj_entry(const char * einj_path,u64 val)82 static void write_einj_entry(const char *einj_path, u64 val)
83 {
84 	char cmd[256] = {0};
85 	FILE *cmdfile = NULL;
86 
87 	sprintf(cmd, "echo %#lx > %s", val, einj_path);
88 	cmdfile = popen(cmd, "r");
89 
90 	if (pclose(cmdfile) == 0)
91 		ksft_print_msg("echo %#lx > %s - done\n", val, einj_path);
92 	else
93 		ksft_exit_fail_perror("Failed to write EINJ entry");
94 }
95 
inject_uer(u64 hpa)96 static void inject_uer(u64 hpa)
97 {
98 	if (access("/sys/firmware/acpi/tables/EINJ", R_OK) == -1)
99 		ksft_test_result_skip("EINJ table no available in firmware");
100 
101 	if (access(EINJ_ETYPE, R_OK | W_OK) == -1)
102 		ksft_test_result_skip("EINJ module probably not loaded?");
103 
104 	write_einj_entry(EINJ_ETYPE, ERROR_TYPE_MEMORY_UER);
105 	write_einj_entry(EINJ_FLAGS, MASK_MEMORY_UER);
106 	write_einj_entry(EINJ_ADDR, hpa);
107 	write_einj_entry(EINJ_MASK, ~0x0UL);
108 	write_einj_entry(EINJ_NOTRIGGER, 1);
109 	write_einj_entry(EINJ_DOIT, 1);
110 }
111 
112 /*
113  * When host APEI successfully claims the SEA caused by guest_code, kernel
114  * will send SIGBUS signal with BUS_MCEERR_AR to test thread.
115  *
116  * We set up this SIGBUS handler to skip the test for that case.
117  */
sigbus_signal_handler(int sig,siginfo_t * si,void * v)118 static void sigbus_signal_handler(int sig, siginfo_t *si, void *v)
119 {
120 	ksft_print_msg("SIGBUS (%d) received, dumping siginfo...\n", sig);
121 	ksft_print_msg("si_signo=%d, si_errno=%d, si_code=%d, si_addr=%p\n",
122 		       si->si_signo, si->si_errno, si->si_code, si->si_addr);
123 	if (si->si_code == BUS_MCEERR_AR)
124 		ksft_test_result_skip("SEA is claimed by host APEI\n");
125 	else
126 		ksft_test_result_fail("Exit with signal unhandled\n");
127 
128 	exit(0);
129 }
130 
setup_sigbus_handler(void)131 static void setup_sigbus_handler(void)
132 {
133 	struct sigaction act;
134 
135 	memset(&act, 0, sizeof(act));
136 	sigemptyset(&act.sa_mask);
137 	act.sa_sigaction = sigbus_signal_handler;
138 	act.sa_flags = SA_SIGINFO;
139 	TEST_ASSERT(sigaction(SIGBUS, &act, NULL) == 0,
140 		    "Failed to setup SIGBUS handler");
141 }
142 
guest_code(void)143 static void guest_code(void)
144 {
145 	u64 guest_data;
146 
147 	/* Consumes error will cause a SEA. */
148 	guest_data = *(u64 *)EINJ_GVA;
149 
150 	GUEST_FAIL("Poison not protected by SEA: gva=%#lx, guest_data=%#lx\n",
151 		   EINJ_GVA, guest_data);
152 }
153 
expect_sea_handler(struct ex_regs * regs)154 static void expect_sea_handler(struct ex_regs *regs)
155 {
156 	u64 esr = read_sysreg(esr_el1);
157 	u64 far = read_sysreg(far_el1);
158 	bool expect_far_invalid = far_invalid;
159 
160 	GUEST_PRINTF("Handling Guest SEA\n");
161 	GUEST_PRINTF("ESR_EL1=%#lx, FAR_EL1=%#lx\n", esr, far);
162 
163 	GUEST_ASSERT_EQ(ESR_ELx_EC(esr), ESR_ELx_EC_DABT_CUR);
164 	GUEST_ASSERT_EQ(esr & ESR_ELx_FSC_TYPE, ESR_ELx_FSC_EXTABT);
165 
166 	if (expect_far_invalid) {
167 		GUEST_ASSERT_EQ(esr & ESR_ELx_FnV, ESR_ELx_FnV);
168 		GUEST_PRINTF("Guest observed garbage value in FAR\n");
169 	} else {
170 		GUEST_ASSERT_EQ(esr & ESR_ELx_FnV, 0);
171 		GUEST_ASSERT_EQ(far, EINJ_GVA);
172 	}
173 
174 	GUEST_DONE();
175 }
176 
vcpu_inject_sea(struct kvm_vcpu * vcpu)177 static void vcpu_inject_sea(struct kvm_vcpu *vcpu)
178 {
179 	struct kvm_vcpu_events events = {};
180 
181 	events.exception.ext_dabt_pending = true;
182 	vcpu_events_set(vcpu, &events);
183 }
184 
run_vm(struct kvm_vm * vm,struct kvm_vcpu * vcpu)185 static void run_vm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
186 {
187 	struct ucall uc;
188 	bool guest_done = false;
189 	struct kvm_run *run = vcpu->run;
190 	u64 esr;
191 
192 	/* Resume the vCPU after error injection to consume the error. */
193 	vcpu_run(vcpu);
194 
195 	ksft_print_msg("Dump kvm_run info about KVM_EXIT_%s\n",
196 		       exit_reason_str(run->exit_reason));
197 	ksft_print_msg("kvm_run.arm_sea: esr=%#llx, flags=%#llx\n",
198 		       run->arm_sea.esr, run->arm_sea.flags);
199 	ksft_print_msg("kvm_run.arm_sea: gva=%#llx, gpa=%#llx\n",
200 		       run->arm_sea.gva, run->arm_sea.gpa);
201 
202 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_ARM_SEA);
203 
204 	esr = run->arm_sea.esr;
205 	TEST_ASSERT_EQ(ESR_ELx_EC(esr), ESR_ELx_EC_DABT_LOW);
206 	TEST_ASSERT_EQ(esr & ESR_ELx_FSC_TYPE, ESR_ELx_FSC_EXTABT);
207 	TEST_ASSERT_EQ(ESR_ELx_ISS2(esr), 0);
208 	TEST_ASSERT_EQ((esr & ESR_ELx_INST_SYNDROME), 0);
209 	TEST_ASSERT_EQ(esr & ESR_ELx_VNCR, 0);
210 
211 	if (!(esr & ESR_ELx_FnV)) {
212 		ksft_print_msg("Expect gva to match given FnV bit is 0\n");
213 		TEST_ASSERT_EQ(run->arm_sea.gva, EINJ_GVA);
214 	}
215 
216 	if (run->arm_sea.flags & KVM_EXIT_ARM_SEA_FLAG_GPA_VALID) {
217 		ksft_print_msg("Expect gpa to match given KVM_EXIT_ARM_SEA_FLAG_GPA_VALID is set\n");
218 		TEST_ASSERT_EQ(run->arm_sea.gpa, einj_gpa & PAGE_ADDR_MASK);
219 	}
220 
221 	far_invalid = esr & ESR_ELx_FnV;
222 
223 	/* Inject a SEA into guest and expect handled in SEA handler. */
224 	vcpu_inject_sea(vcpu);
225 
226 	/* Expect the guest to reach GUEST_DONE gracefully. */
227 	do {
228 		vcpu_run(vcpu);
229 		switch (get_ucall(vcpu, &uc)) {
230 		case UCALL_PRINTF:
231 			ksft_print_msg("From guest: %s", uc.buffer);
232 			break;
233 		case UCALL_DONE:
234 			ksft_print_msg("Guest done gracefully!\n");
235 			guest_done = 1;
236 			break;
237 		case UCALL_ABORT:
238 			ksft_print_msg("Guest aborted!\n");
239 			guest_done = 1;
240 			REPORT_GUEST_ASSERT(uc);
241 			break;
242 		default:
243 			TEST_FAIL("Unexpected ucall: %lu\n", uc.cmd);
244 		}
245 	} while (!guest_done);
246 }
247 
vm_create_with_sea_handler(struct kvm_vcpu ** vcpu)248 static struct kvm_vm *vm_create_with_sea_handler(struct kvm_vcpu **vcpu)
249 {
250 	size_t backing_page_size;
251 	size_t guest_page_size;
252 	size_t alignment;
253 	u64 num_guest_pages;
254 	gpa_t start_gpa;
255 	enum vm_mem_backing_src_type src_type = VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB;
256 	struct kvm_vm *vm;
257 
258 	backing_page_size = get_backing_src_pagesz(src_type);
259 	guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
260 	alignment = max(backing_page_size, guest_page_size);
261 	num_guest_pages = VM_MEM_SIZE / guest_page_size;
262 
263 	vm = __vm_create_with_one_vcpu(vcpu, num_guest_pages, guest_code);
264 	vm_init_descriptor_tables(vm);
265 	vcpu_init_descriptor_tables(*vcpu);
266 
267 	vm_install_sync_handler(vm,
268 		/*vector=*/VECTOR_SYNC_CURRENT,
269 		/*ec=*/ESR_ELx_EC_DABT_CUR,
270 		/*handler=*/expect_sea_handler);
271 
272 	start_gpa = (vm->max_gfn - num_guest_pages) * guest_page_size;
273 	start_gpa = align_down(start_gpa, alignment);
274 
275 	vm_userspace_mem_region_add(
276 		/*vm=*/vm,
277 		/*src_type=*/src_type,
278 		/*gpa=*/start_gpa,
279 		/*slot=*/1,
280 		/*npages=*/num_guest_pages,
281 		/*flags=*/0);
282 
283 	virt_map(vm, START_GVA, start_gpa, num_guest_pages);
284 
285 	ksft_print_msg("Mapped %#lx pages: gva=%#lx to gpa=%#lx\n",
286 		       num_guest_pages, START_GVA, start_gpa);
287 	return vm;
288 }
289 
vm_inject_memory_uer(struct kvm_vm * vm)290 static void vm_inject_memory_uer(struct kvm_vm *vm)
291 {
292 	u64 guest_data;
293 
294 	einj_gpa = addr_gva2gpa(vm, EINJ_GVA);
295 	einj_hva = addr_gva2hva(vm, EINJ_GVA);
296 
297 	/* Populate certain data before injecting UER. */
298 	*(u64 *)einj_hva = 0xBAADCAFE;
299 	guest_data = *(u64 *)einj_hva;
300 	ksft_print_msg("Before EINJect: data=%#lx\n",
301 		guest_data);
302 
303 	einj_hpa = translate_hva_to_hpa((unsigned long)einj_hva);
304 
305 	ksft_print_msg("EINJ_GVA=%#lx, einj_gpa=%#lx, einj_hva=%p, einj_hpa=%#lx\n",
306 		       EINJ_GVA, einj_gpa, einj_hva, einj_hpa);
307 
308 	inject_uer(einj_hpa);
309 	ksft_print_msg("Memory UER EINJected\n");
310 }
311 
main(int argc,char * argv[])312 int main(int argc, char *argv[])
313 {
314 	struct kvm_vm *vm;
315 	struct kvm_vcpu *vcpu;
316 
317 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SEA_TO_USER));
318 
319 	setup_sigbus_handler();
320 
321 	vm = vm_create_with_sea_handler(&vcpu);
322 	vm_enable_cap(vm, KVM_CAP_ARM_SEA_TO_USER, 0);
323 	vm_inject_memory_uer(vm);
324 	run_vm(vm, vcpu);
325 	kvm_vm_free(vm);
326 
327 	return 0;
328 }
329