xref: /linux/arch/arm64/kvm/hyp/nvhe/switch.c (revision 86cc796e5e9bff0c3993607f4301b8188095516c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <hyp/switch.h>
8 #include <hyp/sysreg-sr.h>
9 
10 #include <linux/arm-smccc.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13 #include <linux/jump_label.h>
14 #include <uapi/linux/psci.h>
15 
16 #include <kvm/arm_psci.h>
17 
18 #include <asm/barrier.h>
19 #include <asm/cpufeature.h>
20 #include <asm/kprobes.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24 #include <asm/kvm_mmu.h>
25 #include <asm/fpsimd.h>
26 #include <asm/debug-monitors.h>
27 #include <asm/processor.h>
28 
29 #include <nvhe/mem_protect.h>
30 
31 /* Non-VHE specific context */
32 DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
33 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
34 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
35 
36 struct fgt_masks hfgrtr_masks;
37 struct fgt_masks hfgwtr_masks;
38 struct fgt_masks hfgitr_masks;
39 struct fgt_masks hdfgrtr_masks;
40 struct fgt_masks hdfgwtr_masks;
41 struct fgt_masks hafgrtr_masks;
42 struct fgt_masks hfgrtr2_masks;
43 struct fgt_masks hfgwtr2_masks;
44 struct fgt_masks hfgitr2_masks;
45 struct fgt_masks hdfgrtr2_masks;
46 struct fgt_masks hdfgwtr2_masks;
47 
48 extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
49 
__activate_traps(struct kvm_vcpu * vcpu)50 static void __activate_traps(struct kvm_vcpu *vcpu)
51 {
52 	___activate_traps(vcpu, vcpu->arch.hcr_el2);
53 
54 	*host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2);
55 	write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
56 
57 	__activate_traps_common(vcpu);
58 	__activate_cptr_traps(vcpu);
59 
60 	write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
61 
62 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
63 		struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
64 
65 		isb();
66 		/*
67 		 * At this stage, and thanks to the above isb(), S2 is
68 		 * configured and enabled. We can now restore the guest's S1
69 		 * configuration: SCTLR, and only then TCR.
70 		 */
71 		write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1),	SYS_SCTLR);
72 		isb();
73 		write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1),	SYS_TCR);
74 	}
75 }
76 
__deactivate_traps(struct kvm_vcpu * vcpu)77 static void __deactivate_traps(struct kvm_vcpu *vcpu)
78 {
79 	extern char __kvm_hyp_host_vector[];
80 
81 	___deactivate_traps(vcpu);
82 
83 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
84 		u64 val;
85 
86 		/*
87 		 * Set the TCR and SCTLR registers in the exact opposite
88 		 * sequence as __activate_traps (first prevent walks,
89 		 * then force the MMU on). A generous sprinkling of isb()
90 		 * ensure that things happen in this exact order.
91 		 */
92 		val = read_sysreg_el1(SYS_TCR);
93 		write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
94 		isb();
95 		val = read_sysreg_el1(SYS_SCTLR);
96 		write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
97 		isb();
98 	}
99 
100 	write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
101 
102 	__deactivate_traps_common(vcpu);
103 
104 	write_sysreg_hcr(this_cpu_ptr(&kvm_init_params)->hcr_el2);
105 
106 	__deactivate_cptr_traps(vcpu);
107 	write_sysreg(__kvm_hyp_host_vector, vbar_el2);
108 }
109 
110 /* Save VGICv3 state on non-VHE systems */
__hyp_vgic_save_state(struct kvm_vcpu * vcpu)111 static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
112 {
113 	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
114 		__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
115 		__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
116 	}
117 }
118 
119 /* Restore VGICv3 state on non-VHE systems */
__hyp_vgic_restore_state(struct kvm_vcpu * vcpu)120 static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
121 {
122 	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
123 		__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
124 		__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
125 	}
126 }
127 
128 /*
129  * Disable host events, enable guest events
130  */
131 #ifdef CONFIG_HW_PERF_EVENTS
__pmu_switch_to_guest(struct kvm_vcpu * vcpu)132 static bool __pmu_switch_to_guest(struct kvm_vcpu *vcpu)
133 {
134 	struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
135 
136 	if (pmu->events_host)
137 		write_sysreg(pmu->events_host, pmcntenclr_el0);
138 
139 	if (pmu->events_guest)
140 		write_sysreg(pmu->events_guest, pmcntenset_el0);
141 
142 	return (pmu->events_host || pmu->events_guest);
143 }
144 
145 /*
146  * Disable guest events, enable host events
147  */
__pmu_switch_to_host(struct kvm_vcpu * vcpu)148 static void __pmu_switch_to_host(struct kvm_vcpu *vcpu)
149 {
150 	struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
151 
152 	if (pmu->events_guest)
153 		write_sysreg(pmu->events_guest, pmcntenclr_el0);
154 
155 	if (pmu->events_host)
156 		write_sysreg(pmu->events_host, pmcntenset_el0);
157 }
158 #else
159 #define __pmu_switch_to_guest(v)	({ false; })
160 #define __pmu_switch_to_host(v)		do {} while (0)
161 #endif
162 
163 /*
164  * Handler for protected VM MSR, MRS or System instruction execution in AArch64.
165  *
166  * Returns true if the hypervisor has handled the exit, and control should go
167  * back to the guest, or false if it hasn't.
168  */
kvm_handle_pvm_sys64(struct kvm_vcpu * vcpu,u64 * exit_code)169 static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
170 {
171 	/*
172 	 * Make sure we handle the exit for workarounds before the pKVM
173 	 * handling, as the latter could decide to UNDEF.
174 	 */
175 	return (kvm_hyp_handle_sysreg(vcpu, exit_code) ||
176 		kvm_handle_pvm_sysreg(vcpu, exit_code));
177 }
178 
179 static const exit_handler_fn hyp_exit_handlers[] = {
180 	[0 ... ESR_ELx_EC_MAX]		= NULL,
181 	[ESR_ELx_EC_CP15_32]		= kvm_hyp_handle_cp15_32,
182 	[ESR_ELx_EC_SYS64]		= kvm_hyp_handle_sysreg,
183 	[ESR_ELx_EC_SVE]		= kvm_hyp_handle_fpsimd,
184 	[ESR_ELx_EC_FP_ASIMD]		= kvm_hyp_handle_fpsimd,
185 	[ESR_ELx_EC_IABT_LOW]		= kvm_hyp_handle_iabt_low,
186 	[ESR_ELx_EC_DABT_LOW]		= kvm_hyp_handle_dabt_low,
187 	[ESR_ELx_EC_WATCHPT_LOW]	= kvm_hyp_handle_watchpt_low,
188 	[ESR_ELx_EC_MOPS]		= kvm_hyp_handle_mops,
189 };
190 
191 static const exit_handler_fn pvm_exit_handlers[] = {
192 	[0 ... ESR_ELx_EC_MAX]		= NULL,
193 	[ESR_ELx_EC_SYS64]		= kvm_handle_pvm_sys64,
194 	[ESR_ELx_EC_SVE]		= kvm_handle_pvm_restricted,
195 	[ESR_ELx_EC_FP_ASIMD]		= kvm_hyp_handle_fpsimd,
196 	[ESR_ELx_EC_IABT_LOW]		= kvm_hyp_handle_iabt_low,
197 	[ESR_ELx_EC_DABT_LOW]		= kvm_hyp_handle_dabt_low,
198 	[ESR_ELx_EC_WATCHPT_LOW]	= kvm_hyp_handle_watchpt_low,
199 	[ESR_ELx_EC_MOPS]		= kvm_hyp_handle_mops,
200 };
201 
kvm_get_exit_handler_array(struct kvm_vcpu * vcpu)202 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
203 {
204 	if (unlikely(vcpu_is_protected(vcpu)))
205 		return pvm_exit_handlers;
206 
207 	return hyp_exit_handlers;
208 }
209 
fixup_guest_exit(struct kvm_vcpu * vcpu,u64 * exit_code)210 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
211 {
212 	const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
213 
214 	synchronize_vcpu_pstate(vcpu, exit_code);
215 
216 	/*
217 	 * Some guests (e.g., protected VMs) are not be allowed to run in
218 	 * AArch32.  The ARMv8 architecture does not give the hypervisor a
219 	 * mechanism to prevent a guest from dropping to AArch32 EL0 if
220 	 * implemented by the CPU. If the hypervisor spots a guest in such a
221 	 * state ensure it is handled, and don't trust the host to spot or fix
222 	 * it.  The check below is based on the one in
223 	 * kvm_arch_vcpu_ioctl_run().
224 	 */
225 	if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu))) {
226 		/*
227 		 * As we have caught the guest red-handed, decide that it isn't
228 		 * fit for purpose anymore by making the vcpu invalid. The VMM
229 		 * can try and fix it by re-initializing the vcpu with
230 		 * KVM_ARM_VCPU_INIT, however, this is likely not possible for
231 		 * protected VMs.
232 		 */
233 		vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
234 		*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
235 		*exit_code |= ARM_EXCEPTION_IL;
236 	}
237 
238 	return __fixup_guest_exit(vcpu, exit_code, handlers);
239 }
240 
241 /* Switch to the guest for legacy non-VHE systems */
__kvm_vcpu_run(struct kvm_vcpu * vcpu)242 int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
243 {
244 	struct kvm_cpu_context *host_ctxt;
245 	struct kvm_cpu_context *guest_ctxt;
246 	struct kvm_s2_mmu *mmu;
247 	bool pmu_switch_needed;
248 	u64 exit_code;
249 
250 	/*
251 	 * Having IRQs masked via PMR when entering the guest means the GIC
252 	 * will not signal the CPU of interrupts of lower priority, and the
253 	 * only way to get out will be via guest exceptions.
254 	 * Naturally, we want to avoid this.
255 	 */
256 	if (system_uses_irq_prio_masking()) {
257 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
258 		pmr_sync();
259 	}
260 
261 	host_ctxt = host_data_ptr(host_ctxt);
262 	host_ctxt->__hyp_running_vcpu = vcpu;
263 	guest_ctxt = &vcpu->arch.ctxt;
264 
265 	pmu_switch_needed = __pmu_switch_to_guest(vcpu);
266 
267 	__sysreg_save_state_nvhe(host_ctxt);
268 	/*
269 	 * We must flush and disable the SPE buffer for nVHE, as
270 	 * the translation regime(EL1&0) is going to be loaded with
271 	 * that of the guest. And we must do this before we change the
272 	 * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
273 	 * before we load guest Stage1.
274 	 */
275 	__debug_save_host_buffers_nvhe(vcpu);
276 
277 	/*
278 	 * We're about to restore some new MMU state. Make sure
279 	 * ongoing page-table walks that have started before we
280 	 * trapped to EL2 have completed. This also synchronises the
281 	 * above disabling of BRBE, SPE and TRBE.
282 	 *
283 	 * See DDI0487I.a D8.1.5 "Out-of-context translation regimes",
284 	 * rule R_LFHQG and subsequent information statements.
285 	 */
286 	dsb(nsh);
287 
288 	__kvm_adjust_pc(vcpu);
289 
290 	/*
291 	 * We must restore the 32-bit state before the sysregs, thanks
292 	 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
293 	 *
294 	 * Also, and in order to be able to deal with erratum #1319537 (A57)
295 	 * and #1319367 (A72), we must ensure that all VM-related sysreg are
296 	 * restored before we enable S2 translation.
297 	 */
298 	__sysreg32_restore_state(vcpu);
299 	__sysreg_restore_state_nvhe(guest_ctxt);
300 
301 	mmu = kern_hyp_va(vcpu->arch.hw_mmu);
302 	__load_stage2(mmu, kern_hyp_va(mmu->arch));
303 	__activate_traps(vcpu);
304 
305 	__hyp_vgic_restore_state(vcpu);
306 	__timer_enable_traps(vcpu);
307 
308 	__debug_switch_to_guest(vcpu);
309 
310 	do {
311 		/* Jump in the fire! */
312 		exit_code = __guest_enter(vcpu);
313 
314 		/* And we're baaack! */
315 	} while (fixup_guest_exit(vcpu, &exit_code));
316 
317 	__sysreg_save_state_nvhe(guest_ctxt);
318 	__sysreg32_save_state(vcpu);
319 	__timer_disable_traps(vcpu);
320 	__hyp_vgic_save_state(vcpu);
321 
322 	/*
323 	 * Same thing as before the guest run: we're about to switch
324 	 * the MMU context, so let's make sure we don't have any
325 	 * ongoing EL1&0 translations.
326 	 */
327 	dsb(nsh);
328 
329 	__deactivate_traps(vcpu);
330 	__load_host_stage2();
331 
332 	__sysreg_restore_state_nvhe(host_ctxt);
333 
334 	if (guest_owns_fp_regs())
335 		__fpsimd_save_fpexc32(vcpu);
336 
337 	__debug_switch_to_host(vcpu);
338 	/*
339 	 * This must come after restoring the host sysregs, since a non-VHE
340 	 * system may enable SPE here and make use of the TTBRs.
341 	 */
342 	__debug_restore_host_buffers_nvhe(vcpu);
343 
344 	if (pmu_switch_needed)
345 		__pmu_switch_to_host(vcpu);
346 
347 	/* Returning to host will clear PSR.I, remask PMR if needed */
348 	if (system_uses_irq_prio_masking())
349 		gic_write_pmr(GIC_PRIO_IRQOFF);
350 
351 	host_ctxt->__hyp_running_vcpu = NULL;
352 
353 	return exit_code;
354 }
355 
hyp_panic(void)356 asmlinkage void __noreturn hyp_panic(void)
357 {
358 	u64 spsr = read_sysreg_el2(SYS_SPSR);
359 	u64 elr = read_sysreg_el2(SYS_ELR);
360 	u64 par = read_sysreg_par();
361 	struct kvm_cpu_context *host_ctxt;
362 	struct kvm_vcpu *vcpu;
363 
364 	host_ctxt = host_data_ptr(host_ctxt);
365 	vcpu = host_ctxt->__hyp_running_vcpu;
366 
367 	if (vcpu) {
368 		__timer_disable_traps(vcpu);
369 		__deactivate_traps(vcpu);
370 		__load_host_stage2();
371 		__sysreg_restore_state_nvhe(host_ctxt);
372 	}
373 
374 	/* Prepare to dump kvm nvhe hyp stacktrace */
375 	kvm_nvhe_prepare_backtrace((unsigned long)__builtin_frame_address(0),
376 				   _THIS_IP_);
377 
378 	__hyp_do_panic(host_ctxt, spsr, elr, par);
379 	unreachable();
380 }
381 
hyp_panic_bad_stack(void)382 asmlinkage void __noreturn hyp_panic_bad_stack(void)
383 {
384 	hyp_panic();
385 }
386 
kvm_unexpected_el2_exception(void)387 asmlinkage void kvm_unexpected_el2_exception(void)
388 {
389 	__kvm_unexpected_el2_exception();
390 }
391