xref: /linux/tools/testing/selftests/kvm/arm64/sea_to_user.c (revision 51d90a15fedf8366cb96ef68d0ea2d0bf15417d2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Test KVM returns to userspace with KVM_EXIT_ARM_SEA if host APEI fails
4  * to handle SEA and userspace has opt-ed in KVM_CAP_ARM_SEA_TO_USER.
5  *
6  * After reaching userspace with expected arm_sea info, also test userspace
7  * injecting a synchronous external data abort into the guest.
8  *
9  * This test utilizes EINJ to generate a REAL synchronous external data
10  * abort by consuming a recoverable uncorrectable memory error. Therefore
11  * the device under test must support EINJ in both firmware and host kernel,
12  * including the notrigger feature. Otherwise the test will be skipped.
13  * The under-test platform's APEI should be unable to claim SEA. Otherwise
14  * the test will also be skipped.
15  */
16 
17 #include <signal.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <unistd.h>
21 
22 #include "test_util.h"
23 #include "kvm_util.h"
24 #include "processor.h"
25 #include "guest_modes.h"
26 
27 #define PAGE_PRESENT		(1ULL << 63)
28 #define PAGE_PHYSICAL		0x007fffffffffffffULL
29 #define PAGE_ADDR_MASK		(~(0xfffULL))
30 
31 /* Group ISV and ISS[23:14]. */
32 #define ESR_ELx_INST_SYNDROME	((ESR_ELx_ISV) | (ESR_ELx_SAS) | \
33 				 (ESR_ELx_SSE) | (ESR_ELx_SRT_MASK) | \
34 				 (ESR_ELx_SF) | (ESR_ELx_AR))
35 
36 #define EINJ_ETYPE		"/sys/kernel/debug/apei/einj/error_type"
37 #define EINJ_ADDR		"/sys/kernel/debug/apei/einj/param1"
38 #define EINJ_MASK		"/sys/kernel/debug/apei/einj/param2"
39 #define EINJ_FLAGS		"/sys/kernel/debug/apei/einj/flags"
40 #define EINJ_NOTRIGGER		"/sys/kernel/debug/apei/einj/notrigger"
41 #define EINJ_DOIT		"/sys/kernel/debug/apei/einj/error_inject"
42 /* Memory Uncorrectable non-fatal. */
43 #define ERROR_TYPE_MEMORY_UER	0x10
44 /* Memory address and mask valid (param1 and param2). */
45 #define MASK_MEMORY_UER		0b10
46 
47 /* Guest virtual address region = [2G, 3G).  */
48 #define START_GVA		0x80000000UL
49 #define VM_MEM_SIZE		0x40000000UL
50 /* Note: EINJ_OFFSET must < VM_MEM_SIZE. */
51 #define EINJ_OFFSET		0x01234badUL
52 #define EINJ_GVA		((START_GVA) + (EINJ_OFFSET))
53 
54 static vm_paddr_t einj_gpa;
55 static void *einj_hva;
56 static uint64_t einj_hpa;
57 static bool far_invalid;
58 
translate_to_host_paddr(unsigned long vaddr)59 static uint64_t translate_to_host_paddr(unsigned long vaddr)
60 {
61 	uint64_t pinfo;
62 	int64_t offset = vaddr / getpagesize() * sizeof(pinfo);
63 	int fd;
64 	uint64_t page_addr;
65 	uint64_t paddr;
66 
67 	fd = open("/proc/self/pagemap", O_RDONLY);
68 	if (fd < 0)
69 		ksft_exit_fail_perror("Failed to open /proc/self/pagemap");
70 	if (pread(fd, &pinfo, sizeof(pinfo), offset) != sizeof(pinfo)) {
71 		close(fd);
72 		ksft_exit_fail_perror("Failed to read /proc/self/pagemap");
73 	}
74 
75 	close(fd);
76 
77 	if ((pinfo & PAGE_PRESENT) == 0)
78 		ksft_exit_fail_perror("Page not present");
79 
80 	page_addr = (pinfo & PAGE_PHYSICAL) << MIN_PAGE_SHIFT;
81 	paddr = page_addr + (vaddr & (getpagesize() - 1));
82 	return paddr;
83 }
84 
write_einj_entry(const char * einj_path,uint64_t val)85 static void write_einj_entry(const char *einj_path, uint64_t val)
86 {
87 	char cmd[256] = {0};
88 	FILE *cmdfile = NULL;
89 
90 	sprintf(cmd, "echo %#lx > %s", val, einj_path);
91 	cmdfile = popen(cmd, "r");
92 
93 	if (pclose(cmdfile) == 0)
94 		ksft_print_msg("echo %#lx > %s - done\n", val, einj_path);
95 	else
96 		ksft_exit_fail_perror("Failed to write EINJ entry");
97 }
98 
inject_uer(uint64_t paddr)99 static void inject_uer(uint64_t paddr)
100 {
101 	if (access("/sys/firmware/acpi/tables/EINJ", R_OK) == -1)
102 		ksft_test_result_skip("EINJ table no available in firmware");
103 
104 	if (access(EINJ_ETYPE, R_OK | W_OK) == -1)
105 		ksft_test_result_skip("EINJ module probably not loaded?");
106 
107 	write_einj_entry(EINJ_ETYPE, ERROR_TYPE_MEMORY_UER);
108 	write_einj_entry(EINJ_FLAGS, MASK_MEMORY_UER);
109 	write_einj_entry(EINJ_ADDR, paddr);
110 	write_einj_entry(EINJ_MASK, ~0x0UL);
111 	write_einj_entry(EINJ_NOTRIGGER, 1);
112 	write_einj_entry(EINJ_DOIT, 1);
113 }
114 
115 /*
116  * When host APEI successfully claims the SEA caused by guest_code, kernel
117  * will send SIGBUS signal with BUS_MCEERR_AR to test thread.
118  *
119  * We set up this SIGBUS handler to skip the test for that case.
120  */
sigbus_signal_handler(int sig,siginfo_t * si,void * v)121 static void sigbus_signal_handler(int sig, siginfo_t *si, void *v)
122 {
123 	ksft_print_msg("SIGBUS (%d) received, dumping siginfo...\n", sig);
124 	ksft_print_msg("si_signo=%d, si_errno=%d, si_code=%d, si_addr=%p\n",
125 		       si->si_signo, si->si_errno, si->si_code, si->si_addr);
126 	if (si->si_code == BUS_MCEERR_AR)
127 		ksft_test_result_skip("SEA is claimed by host APEI\n");
128 	else
129 		ksft_test_result_fail("Exit with signal unhandled\n");
130 
131 	exit(0);
132 }
133 
setup_sigbus_handler(void)134 static void setup_sigbus_handler(void)
135 {
136 	struct sigaction act;
137 
138 	memset(&act, 0, sizeof(act));
139 	sigemptyset(&act.sa_mask);
140 	act.sa_sigaction = sigbus_signal_handler;
141 	act.sa_flags = SA_SIGINFO;
142 	TEST_ASSERT(sigaction(SIGBUS, &act, NULL) == 0,
143 		    "Failed to setup SIGBUS handler");
144 }
145 
guest_code(void)146 static void guest_code(void)
147 {
148 	uint64_t guest_data;
149 
150 	/* Consumes error will cause a SEA. */
151 	guest_data = *(uint64_t *)EINJ_GVA;
152 
153 	GUEST_FAIL("Poison not protected by SEA: gva=%#lx, guest_data=%#lx\n",
154 		   EINJ_GVA, guest_data);
155 }
156 
expect_sea_handler(struct ex_regs * regs)157 static void expect_sea_handler(struct ex_regs *regs)
158 {
159 	u64 esr = read_sysreg(esr_el1);
160 	u64 far = read_sysreg(far_el1);
161 	bool expect_far_invalid = far_invalid;
162 
163 	GUEST_PRINTF("Handling Guest SEA\n");
164 	GUEST_PRINTF("ESR_EL1=%#lx, FAR_EL1=%#lx\n", esr, far);
165 
166 	GUEST_ASSERT_EQ(ESR_ELx_EC(esr), ESR_ELx_EC_DABT_CUR);
167 	GUEST_ASSERT_EQ(esr & ESR_ELx_FSC_TYPE, ESR_ELx_FSC_EXTABT);
168 
169 	if (expect_far_invalid) {
170 		GUEST_ASSERT_EQ(esr & ESR_ELx_FnV, ESR_ELx_FnV);
171 		GUEST_PRINTF("Guest observed garbage value in FAR\n");
172 	} else {
173 		GUEST_ASSERT_EQ(esr & ESR_ELx_FnV, 0);
174 		GUEST_ASSERT_EQ(far, EINJ_GVA);
175 	}
176 
177 	GUEST_DONE();
178 }
179 
vcpu_inject_sea(struct kvm_vcpu * vcpu)180 static void vcpu_inject_sea(struct kvm_vcpu *vcpu)
181 {
182 	struct kvm_vcpu_events events = {};
183 
184 	events.exception.ext_dabt_pending = true;
185 	vcpu_events_set(vcpu, &events);
186 }
187 
run_vm(struct kvm_vm * vm,struct kvm_vcpu * vcpu)188 static void run_vm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
189 {
190 	struct ucall uc;
191 	bool guest_done = false;
192 	struct kvm_run *run = vcpu->run;
193 	u64 esr;
194 
195 	/* Resume the vCPU after error injection to consume the error. */
196 	vcpu_run(vcpu);
197 
198 	ksft_print_msg("Dump kvm_run info about KVM_EXIT_%s\n",
199 		       exit_reason_str(run->exit_reason));
200 	ksft_print_msg("kvm_run.arm_sea: esr=%#llx, flags=%#llx\n",
201 		       run->arm_sea.esr, run->arm_sea.flags);
202 	ksft_print_msg("kvm_run.arm_sea: gva=%#llx, gpa=%#llx\n",
203 		       run->arm_sea.gva, run->arm_sea.gpa);
204 
205 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_ARM_SEA);
206 
207 	esr = run->arm_sea.esr;
208 	TEST_ASSERT_EQ(ESR_ELx_EC(esr), ESR_ELx_EC_DABT_LOW);
209 	TEST_ASSERT_EQ(esr & ESR_ELx_FSC_TYPE, ESR_ELx_FSC_EXTABT);
210 	TEST_ASSERT_EQ(ESR_ELx_ISS2(esr), 0);
211 	TEST_ASSERT_EQ((esr & ESR_ELx_INST_SYNDROME), 0);
212 	TEST_ASSERT_EQ(esr & ESR_ELx_VNCR, 0);
213 
214 	if (!(esr & ESR_ELx_FnV)) {
215 		ksft_print_msg("Expect gva to match given FnV bit is 0\n");
216 		TEST_ASSERT_EQ(run->arm_sea.gva, EINJ_GVA);
217 	}
218 
219 	if (run->arm_sea.flags & KVM_EXIT_ARM_SEA_FLAG_GPA_VALID) {
220 		ksft_print_msg("Expect gpa to match given KVM_EXIT_ARM_SEA_FLAG_GPA_VALID is set\n");
221 		TEST_ASSERT_EQ(run->arm_sea.gpa, einj_gpa & PAGE_ADDR_MASK);
222 	}
223 
224 	far_invalid = esr & ESR_ELx_FnV;
225 
226 	/* Inject a SEA into guest and expect handled in SEA handler. */
227 	vcpu_inject_sea(vcpu);
228 
229 	/* Expect the guest to reach GUEST_DONE gracefully. */
230 	do {
231 		vcpu_run(vcpu);
232 		switch (get_ucall(vcpu, &uc)) {
233 		case UCALL_PRINTF:
234 			ksft_print_msg("From guest: %s", uc.buffer);
235 			break;
236 		case UCALL_DONE:
237 			ksft_print_msg("Guest done gracefully!\n");
238 			guest_done = 1;
239 			break;
240 		case UCALL_ABORT:
241 			ksft_print_msg("Guest aborted!\n");
242 			guest_done = 1;
243 			REPORT_GUEST_ASSERT(uc);
244 			break;
245 		default:
246 			TEST_FAIL("Unexpected ucall: %lu\n", uc.cmd);
247 		}
248 	} while (!guest_done);
249 }
250 
vm_create_with_sea_handler(struct kvm_vcpu ** vcpu)251 static struct kvm_vm *vm_create_with_sea_handler(struct kvm_vcpu **vcpu)
252 {
253 	size_t backing_page_size;
254 	size_t guest_page_size;
255 	size_t alignment;
256 	uint64_t num_guest_pages;
257 	vm_paddr_t start_gpa;
258 	enum vm_mem_backing_src_type src_type = VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB;
259 	struct kvm_vm *vm;
260 
261 	backing_page_size = get_backing_src_pagesz(src_type);
262 	guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
263 	alignment = max(backing_page_size, guest_page_size);
264 	num_guest_pages = VM_MEM_SIZE / guest_page_size;
265 
266 	vm = __vm_create_with_one_vcpu(vcpu, num_guest_pages, guest_code);
267 	vm_init_descriptor_tables(vm);
268 	vcpu_init_descriptor_tables(*vcpu);
269 
270 	vm_install_sync_handler(vm,
271 		/*vector=*/VECTOR_SYNC_CURRENT,
272 		/*ec=*/ESR_ELx_EC_DABT_CUR,
273 		/*handler=*/expect_sea_handler);
274 
275 	start_gpa = (vm->max_gfn - num_guest_pages) * guest_page_size;
276 	start_gpa = align_down(start_gpa, alignment);
277 
278 	vm_userspace_mem_region_add(
279 		/*vm=*/vm,
280 		/*src_type=*/src_type,
281 		/*guest_paddr=*/start_gpa,
282 		/*slot=*/1,
283 		/*npages=*/num_guest_pages,
284 		/*flags=*/0);
285 
286 	virt_map(vm, START_GVA, start_gpa, num_guest_pages);
287 
288 	ksft_print_msg("Mapped %#lx pages: gva=%#lx to gpa=%#lx\n",
289 		       num_guest_pages, START_GVA, start_gpa);
290 	return vm;
291 }
292 
vm_inject_memory_uer(struct kvm_vm * vm)293 static void vm_inject_memory_uer(struct kvm_vm *vm)
294 {
295 	uint64_t guest_data;
296 
297 	einj_gpa = addr_gva2gpa(vm, EINJ_GVA);
298 	einj_hva = addr_gva2hva(vm, EINJ_GVA);
299 
300 	/* Populate certain data before injecting UER. */
301 	*(uint64_t *)einj_hva = 0xBAADCAFE;
302 	guest_data = *(uint64_t *)einj_hva;
303 	ksft_print_msg("Before EINJect: data=%#lx\n",
304 		guest_data);
305 
306 	einj_hpa = translate_to_host_paddr((unsigned long)einj_hva);
307 
308 	ksft_print_msg("EINJ_GVA=%#lx, einj_gpa=%#lx, einj_hva=%p, einj_hpa=%#lx\n",
309 		       EINJ_GVA, einj_gpa, einj_hva, einj_hpa);
310 
311 	inject_uer(einj_hpa);
312 	ksft_print_msg("Memory UER EINJected\n");
313 }
314 
main(int argc,char * argv[])315 int main(int argc, char *argv[])
316 {
317 	struct kvm_vm *vm;
318 	struct kvm_vcpu *vcpu;
319 
320 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SEA_TO_USER));
321 
322 	setup_sigbus_handler();
323 
324 	vm = vm_create_with_sea_handler(&vcpu);
325 	vm_enable_cap(vm, KVM_CAP_ARM_SEA_TO_USER, 0);
326 	vm_inject_memory_uer(vm);
327 	run_vm(vm, vcpu);
328 	kvm_vm_free(vm);
329 
330 	return 0;
331 }
332