xref: /linux/arch/arm64/kvm/handle_exit.c (revision 0ad53fe3ae82443c74ff8cfd7bd13377cc1134a3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/handle_exit.c:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 
14 #include <asm/esr.h>
15 #include <asm/exception.h>
16 #include <asm/kvm_asm.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_mmu.h>
19 #include <asm/debug-monitors.h>
20 #include <asm/traps.h>
21 
22 #include <kvm/arm_hypercalls.h>
23 
24 #define CREATE_TRACE_POINTS
25 #include "trace_handle_exit.h"
26 
27 typedef int (*exit_handle_fn)(struct kvm_vcpu *);
28 
29 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
30 {
31 	if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
32 		kvm_inject_vabt(vcpu);
33 }
34 
35 static int handle_hvc(struct kvm_vcpu *vcpu)
36 {
37 	int ret;
38 
39 	trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
40 			    kvm_vcpu_hvc_get_imm(vcpu));
41 	vcpu->stat.hvc_exit_stat++;
42 
43 	ret = kvm_hvc_call_handler(vcpu);
44 	if (ret < 0) {
45 		vcpu_set_reg(vcpu, 0, ~0UL);
46 		return 1;
47 	}
48 
49 	return ret;
50 }
51 
52 static int handle_smc(struct kvm_vcpu *vcpu)
53 {
54 	/*
55 	 * "If an SMC instruction executed at Non-secure EL1 is
56 	 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
57 	 * Trap exception, not a Secure Monitor Call exception [...]"
58 	 *
59 	 * We need to advance the PC after the trap, as it would
60 	 * otherwise return to the same address...
61 	 */
62 	vcpu_set_reg(vcpu, 0, ~0UL);
63 	kvm_incr_pc(vcpu);
64 	return 1;
65 }
66 
67 /*
68  * Guest access to FP/ASIMD registers are routed to this handler only
69  * when the system doesn't support FP/ASIMD.
70  */
71 static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
72 {
73 	kvm_inject_undefined(vcpu);
74 	return 1;
75 }
76 
77 /**
78  * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
79  *		    instruction executed by a guest
80  *
81  * @vcpu:	the vcpu pointer
82  *
83  * WFE: Yield the CPU and come back to this vcpu when the scheduler
84  * decides to.
85  * WFI: Simply call kvm_vcpu_block(), which will halt execution of
86  * world-switches and schedule other host processes until there is an
87  * incoming IRQ or FIQ to the VM.
88  */
89 static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
90 {
91 	if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
92 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
93 		vcpu->stat.wfe_exit_stat++;
94 		kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
95 	} else {
96 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
97 		vcpu->stat.wfi_exit_stat++;
98 		kvm_vcpu_block(vcpu);
99 		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
100 	}
101 
102 	kvm_incr_pc(vcpu);
103 
104 	return 1;
105 }
106 
107 /**
108  * kvm_handle_guest_debug - handle a debug exception instruction
109  *
110  * @vcpu:	the vcpu pointer
111  *
112  * We route all debug exceptions through the same handler. If both the
113  * guest and host are using the same debug facilities it will be up to
114  * userspace to re-inject the correct exception for guest delivery.
115  *
116  * @return: 0 (while setting vcpu->run->exit_reason)
117  */
118 static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
119 {
120 	struct kvm_run *run = vcpu->run;
121 	u32 esr = kvm_vcpu_get_esr(vcpu);
122 
123 	run->exit_reason = KVM_EXIT_DEBUG;
124 	run->debug.arch.hsr = esr;
125 
126 	if (ESR_ELx_EC(esr) == ESR_ELx_EC_WATCHPT_LOW)
127 		run->debug.arch.far = vcpu->arch.fault.far_el2;
128 
129 	return 0;
130 }
131 
132 static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
133 {
134 	u32 esr = kvm_vcpu_get_esr(vcpu);
135 
136 	kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n",
137 		      esr, esr_get_class_string(esr));
138 
139 	kvm_inject_undefined(vcpu);
140 	return 1;
141 }
142 
143 static int handle_sve(struct kvm_vcpu *vcpu)
144 {
145 	/* Until SVE is supported for guests: */
146 	kvm_inject_undefined(vcpu);
147 	return 1;
148 }
149 
150 /*
151  * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
152  * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
153  * that we can do is give the guest an UNDEF.
154  */
155 static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
156 {
157 	kvm_inject_undefined(vcpu);
158 	return 1;
159 }
160 
161 static exit_handle_fn arm_exit_handlers[] = {
162 	[0 ... ESR_ELx_EC_MAX]	= kvm_handle_unknown_ec,
163 	[ESR_ELx_EC_WFx]	= kvm_handle_wfx,
164 	[ESR_ELx_EC_CP15_32]	= kvm_handle_cp15_32,
165 	[ESR_ELx_EC_CP15_64]	= kvm_handle_cp15_64,
166 	[ESR_ELx_EC_CP14_MR]	= kvm_handle_cp14_32,
167 	[ESR_ELx_EC_CP14_LS]	= kvm_handle_cp14_load_store,
168 	[ESR_ELx_EC_CP14_64]	= kvm_handle_cp14_64,
169 	[ESR_ELx_EC_HVC32]	= handle_hvc,
170 	[ESR_ELx_EC_SMC32]	= handle_smc,
171 	[ESR_ELx_EC_HVC64]	= handle_hvc,
172 	[ESR_ELx_EC_SMC64]	= handle_smc,
173 	[ESR_ELx_EC_SYS64]	= kvm_handle_sys_reg,
174 	[ESR_ELx_EC_SVE]	= handle_sve,
175 	[ESR_ELx_EC_IABT_LOW]	= kvm_handle_guest_abort,
176 	[ESR_ELx_EC_DABT_LOW]	= kvm_handle_guest_abort,
177 	[ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
178 	[ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
179 	[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
180 	[ESR_ELx_EC_BKPT32]	= kvm_handle_guest_debug,
181 	[ESR_ELx_EC_BRK64]	= kvm_handle_guest_debug,
182 	[ESR_ELx_EC_FP_ASIMD]	= handle_no_fpsimd,
183 	[ESR_ELx_EC_PAC]	= kvm_handle_ptrauth,
184 };
185 
186 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
187 {
188 	u32 esr = kvm_vcpu_get_esr(vcpu);
189 	u8 esr_ec = ESR_ELx_EC(esr);
190 
191 	return arm_exit_handlers[esr_ec];
192 }
193 
194 /*
195  * We may be single-stepping an emulated instruction. If the emulation
196  * has been completed in the kernel, we can return to userspace with a
197  * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
198  * emulation first.
199  */
200 static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
201 {
202 	int handled;
203 
204 	/*
205 	 * See ARM ARM B1.14.1: "Hyp traps on instructions
206 	 * that fail their condition code check"
207 	 */
208 	if (!kvm_condition_valid(vcpu)) {
209 		kvm_incr_pc(vcpu);
210 		handled = 1;
211 	} else {
212 		exit_handle_fn exit_handler;
213 
214 		exit_handler = kvm_get_exit_handler(vcpu);
215 		handled = exit_handler(vcpu);
216 	}
217 
218 	return handled;
219 }
220 
221 /*
222  * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
223  * proper exit to userspace.
224  */
225 int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
226 {
227 	struct kvm_run *run = vcpu->run;
228 
229 	exception_index = ARM_EXCEPTION_CODE(exception_index);
230 
231 	switch (exception_index) {
232 	case ARM_EXCEPTION_IRQ:
233 		return 1;
234 	case ARM_EXCEPTION_EL1_SERROR:
235 		return 1;
236 	case ARM_EXCEPTION_TRAP:
237 		return handle_trap_exceptions(vcpu);
238 	case ARM_EXCEPTION_HYP_GONE:
239 		/*
240 		 * EL2 has been reset to the hyp-stub. This happens when a guest
241 		 * is pre-empted by kvm_reboot()'s shutdown call.
242 		 */
243 		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
244 		return 0;
245 	case ARM_EXCEPTION_IL:
246 		/*
247 		 * We attempted an illegal exception return.  Guest state must
248 		 * have been corrupted somehow.  Give up.
249 		 */
250 		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
251 		return -EINVAL;
252 	default:
253 		kvm_pr_unimpl("Unsupported exception type: %d",
254 			      exception_index);
255 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
256 		return 0;
257 	}
258 }
259 
260 /* For exit types that need handling before we can be preempted */
261 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
262 {
263 	if (ARM_SERROR_PENDING(exception_index)) {
264 		if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
265 			u64 disr = kvm_vcpu_get_disr(vcpu);
266 
267 			kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
268 		} else {
269 			kvm_inject_vabt(vcpu);
270 		}
271 
272 		return;
273 	}
274 
275 	exception_index = ARM_EXCEPTION_CODE(exception_index);
276 
277 	if (exception_index == ARM_EXCEPTION_EL1_SERROR)
278 		kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
279 }
280 
281 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
282 					      u64 elr_virt, u64 elr_phys,
283 					      u64 par, uintptr_t vcpu,
284 					      u64 far, u64 hpfar) {
285 	u64 elr_in_kimg = __phys_to_kimg(elr_phys);
286 	u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
287 	u64 mode = spsr & PSR_MODE_MASK;
288 
289 	/*
290 	 * The nVHE hyp symbols are not included by kallsyms to avoid issues
291 	 * with aliasing. That means that the symbols cannot be printed with the
292 	 * "%pS" format specifier, so fall back to the vmlinux address if
293 	 * there's no better option.
294 	 */
295 	if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
296 		kvm_err("Invalid host exception to nVHE hyp!\n");
297 	} else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
298 		   (esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
299 		const char *file = NULL;
300 		unsigned int line = 0;
301 
302 		/* All hyp bugs, including warnings, are treated as fatal. */
303 		if (!is_protected_kvm_enabled() ||
304 		    IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
305 			struct bug_entry *bug = find_bug(elr_in_kimg);
306 
307 			if (bug)
308 				bug_get_file_line(bug, &file, &line);
309 		}
310 
311 		if (file)
312 			kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
313 		else
314 			kvm_err("nVHE hyp BUG at: %016llx!\n", elr_virt + hyp_offset);
315 	} else {
316 		kvm_err("nVHE hyp panic at: %016llx!\n", elr_virt + hyp_offset);
317 	}
318 
319 	/*
320 	 * Hyp has panicked and we're going to handle that by panicking the
321 	 * kernel. The kernel offset will be revealed in the panic so we're
322 	 * also safe to reveal the hyp offset as a debugging aid for translating
323 	 * hyp VAs to vmlinux addresses.
324 	 */
325 	kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
326 
327 	panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
328 	      spsr, elr_virt, esr, far, hpfar, par, vcpu);
329 }
330