xref: /linux/arch/arm64/kvm/hyp/nvhe/switch.c (revision ef3be86021c3bdf384c36d9d4aa1ee9fe65b95af)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <hyp/switch.h>
8 #include <hyp/sysreg-sr.h>
9 
10 #include <linux/arm-smccc.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13 #include <linux/jump_label.h>
14 #include <uapi/linux/psci.h>
15 
16 #include <kvm/arm_psci.h>
17 
18 #include <asm/barrier.h>
19 #include <asm/cpufeature.h>
20 #include <asm/kprobes.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24 #include <asm/kvm_mmu.h>
25 #include <asm/fpsimd.h>
26 #include <asm/debug-monitors.h>
27 #include <asm/processor.h>
28 
29 #include <nvhe/fixed_config.h>
30 #include <nvhe/mem_protect.h>
31 
32 /* Non-VHE specific context */
33 DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
34 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
35 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
36 
37 extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
38 
39 static void __activate_traps(struct kvm_vcpu *vcpu)
40 {
41 	u64 val;
42 
43 	___activate_traps(vcpu, vcpu->arch.hcr_el2);
44 	__activate_traps_common(vcpu);
45 
46 	val = vcpu->arch.cptr_el2;
47 	val |= CPTR_EL2_TAM;	/* Same bit irrespective of E2H */
48 	val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
49 	if (cpus_have_final_cap(ARM64_SME)) {
50 		if (has_hvhe())
51 			val &= ~CPACR_ELx_SMEN;
52 		else
53 			val |= CPTR_EL2_TSM;
54 	}
55 
56 	if (!guest_owns_fp_regs()) {
57 		if (has_hvhe())
58 			val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
59 		else
60 			val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
61 
62 		__activate_traps_fpsimd32(vcpu);
63 	}
64 
65 	kvm_write_cptr_el2(val);
66 	write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
67 
68 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
69 		struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
70 
71 		isb();
72 		/*
73 		 * At this stage, and thanks to the above isb(), S2 is
74 		 * configured and enabled. We can now restore the guest's S1
75 		 * configuration: SCTLR, and only then TCR.
76 		 */
77 		write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1),	SYS_SCTLR);
78 		isb();
79 		write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1),	SYS_TCR);
80 	}
81 }
82 
83 static void __deactivate_traps(struct kvm_vcpu *vcpu)
84 {
85 	extern char __kvm_hyp_host_vector[];
86 
87 	___deactivate_traps(vcpu);
88 
89 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
90 		u64 val;
91 
92 		/*
93 		 * Set the TCR and SCTLR registers in the exact opposite
94 		 * sequence as __activate_traps (first prevent walks,
95 		 * then force the MMU on). A generous sprinkling of isb()
96 		 * ensure that things happen in this exact order.
97 		 */
98 		val = read_sysreg_el1(SYS_TCR);
99 		write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
100 		isb();
101 		val = read_sysreg_el1(SYS_SCTLR);
102 		write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
103 		isb();
104 	}
105 
106 	__deactivate_traps_common(vcpu);
107 
108 	write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
109 
110 	kvm_reset_cptr_el2(vcpu);
111 	write_sysreg(__kvm_hyp_host_vector, vbar_el2);
112 }
113 
114 /* Save VGICv3 state on non-VHE systems */
115 static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
116 {
117 	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
118 		__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
119 		__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
120 	}
121 }
122 
123 /* Restore VGICv3 state on non-VHE systems */
124 static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
125 {
126 	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
127 		__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
128 		__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
129 	}
130 }
131 
132 /*
133  * Disable host events, enable guest events
134  */
135 #ifdef CONFIG_HW_PERF_EVENTS
136 static bool __pmu_switch_to_guest(struct kvm_vcpu *vcpu)
137 {
138 	struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
139 
140 	if (pmu->events_host)
141 		write_sysreg(pmu->events_host, pmcntenclr_el0);
142 
143 	if (pmu->events_guest)
144 		write_sysreg(pmu->events_guest, pmcntenset_el0);
145 
146 	return (pmu->events_host || pmu->events_guest);
147 }
148 
149 /*
150  * Disable guest events, enable host events
151  */
152 static void __pmu_switch_to_host(struct kvm_vcpu *vcpu)
153 {
154 	struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
155 
156 	if (pmu->events_guest)
157 		write_sysreg(pmu->events_guest, pmcntenclr_el0);
158 
159 	if (pmu->events_host)
160 		write_sysreg(pmu->events_host, pmcntenset_el0);
161 }
162 #else
163 #define __pmu_switch_to_guest(v)	({ false; })
164 #define __pmu_switch_to_host(v)		do {} while (0)
165 #endif
166 
167 /*
168  * Handler for protected VM MSR, MRS or System instruction execution in AArch64.
169  *
170  * Returns true if the hypervisor has handled the exit, and control should go
171  * back to the guest, or false if it hasn't.
172  */
173 static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
174 {
175 	/*
176 	 * Make sure we handle the exit for workarounds and ptrauth
177 	 * before the pKVM handling, as the latter could decide to
178 	 * UNDEF.
179 	 */
180 	return (kvm_hyp_handle_sysreg(vcpu, exit_code) ||
181 		kvm_handle_pvm_sysreg(vcpu, exit_code));
182 }
183 
184 static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
185 {
186 	/*
187 	 * Non-protected kvm relies on the host restoring its sve state.
188 	 * Protected kvm restores the host's sve state as not to reveal that
189 	 * fpsimd was used by a guest nor leak upper sve bits.
190 	 */
191 	if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
192 		__hyp_sve_save_host();
193 
194 		/* Re-enable SVE traps if not supported for the guest vcpu. */
195 		if (!vcpu_has_sve(vcpu))
196 			cpacr_clear_set(CPACR_ELx_ZEN, 0);
197 
198 	} else {
199 		__fpsimd_save_state(*host_data_ptr(fpsimd_state));
200 	}
201 
202 	if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) {
203 		u64 val = read_sysreg_s(SYS_FPMR);
204 
205 		if (unlikely(is_protected_kvm_enabled()))
206 			*host_data_ptr(fpmr) = val;
207 		else
208 			**host_data_ptr(fpmr_ptr) = val;
209 	}
210 }
211 
212 static const exit_handler_fn hyp_exit_handlers[] = {
213 	[0 ... ESR_ELx_EC_MAX]		= NULL,
214 	[ESR_ELx_EC_CP15_32]		= kvm_hyp_handle_cp15_32,
215 	[ESR_ELx_EC_SYS64]		= kvm_hyp_handle_sysreg,
216 	[ESR_ELx_EC_SVE]		= kvm_hyp_handle_fpsimd,
217 	[ESR_ELx_EC_FP_ASIMD]		= kvm_hyp_handle_fpsimd,
218 	[ESR_ELx_EC_IABT_LOW]		= kvm_hyp_handle_iabt_low,
219 	[ESR_ELx_EC_DABT_LOW]		= kvm_hyp_handle_dabt_low,
220 	[ESR_ELx_EC_WATCHPT_LOW]	= kvm_hyp_handle_watchpt_low,
221 	[ESR_ELx_EC_MOPS]		= kvm_hyp_handle_mops,
222 };
223 
224 static const exit_handler_fn pvm_exit_handlers[] = {
225 	[0 ... ESR_ELx_EC_MAX]		= NULL,
226 	[ESR_ELx_EC_SYS64]		= kvm_handle_pvm_sys64,
227 	[ESR_ELx_EC_SVE]		= kvm_handle_pvm_restricted,
228 	[ESR_ELx_EC_FP_ASIMD]		= kvm_hyp_handle_fpsimd,
229 	[ESR_ELx_EC_IABT_LOW]		= kvm_hyp_handle_iabt_low,
230 	[ESR_ELx_EC_DABT_LOW]		= kvm_hyp_handle_dabt_low,
231 	[ESR_ELx_EC_WATCHPT_LOW]	= kvm_hyp_handle_watchpt_low,
232 	[ESR_ELx_EC_MOPS]		= kvm_hyp_handle_mops,
233 };
234 
235 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
236 {
237 	if (unlikely(vcpu_is_protected(vcpu)))
238 		return pvm_exit_handlers;
239 
240 	return hyp_exit_handlers;
241 }
242 
243 /*
244  * Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
245  * The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
246  * guest from dropping to AArch32 EL0 if implemented by the CPU. If the
247  * hypervisor spots a guest in such a state ensure it is handled, and don't
248  * trust the host to spot or fix it.  The check below is based on the one in
249  * kvm_arch_vcpu_ioctl_run().
250  *
251  * Returns false if the guest ran in AArch32 when it shouldn't have, and
252  * thus should exit to the host, or true if a the guest run loop can continue.
253  */
254 static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
255 {
256 	if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu))) {
257 		/*
258 		 * As we have caught the guest red-handed, decide that it isn't
259 		 * fit for purpose anymore by making the vcpu invalid. The VMM
260 		 * can try and fix it by re-initializing the vcpu with
261 		 * KVM_ARM_VCPU_INIT, however, this is likely not possible for
262 		 * protected VMs.
263 		 */
264 		vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
265 		*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
266 		*exit_code |= ARM_EXCEPTION_IL;
267 	}
268 }
269 
270 /* Switch to the guest for legacy non-VHE systems */
271 int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
272 {
273 	struct kvm_cpu_context *host_ctxt;
274 	struct kvm_cpu_context *guest_ctxt;
275 	struct kvm_s2_mmu *mmu;
276 	bool pmu_switch_needed;
277 	u64 exit_code;
278 
279 	/*
280 	 * Having IRQs masked via PMR when entering the guest means the GIC
281 	 * will not signal the CPU of interrupts of lower priority, and the
282 	 * only way to get out will be via guest exceptions.
283 	 * Naturally, we want to avoid this.
284 	 */
285 	if (system_uses_irq_prio_masking()) {
286 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
287 		pmr_sync();
288 	}
289 
290 	host_ctxt = host_data_ptr(host_ctxt);
291 	host_ctxt->__hyp_running_vcpu = vcpu;
292 	guest_ctxt = &vcpu->arch.ctxt;
293 
294 	pmu_switch_needed = __pmu_switch_to_guest(vcpu);
295 
296 	__sysreg_save_state_nvhe(host_ctxt);
297 	/*
298 	 * We must flush and disable the SPE buffer for nVHE, as
299 	 * the translation regime(EL1&0) is going to be loaded with
300 	 * that of the guest. And we must do this before we change the
301 	 * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
302 	 * before we load guest Stage1.
303 	 */
304 	__debug_save_host_buffers_nvhe(vcpu);
305 
306 	/*
307 	 * We're about to restore some new MMU state. Make sure
308 	 * ongoing page-table walks that have started before we
309 	 * trapped to EL2 have completed. This also synchronises the
310 	 * above disabling of SPE and TRBE.
311 	 *
312 	 * See DDI0487I.a D8.1.5 "Out-of-context translation regimes",
313 	 * rule R_LFHQG and subsequent information statements.
314 	 */
315 	dsb(nsh);
316 
317 	__kvm_adjust_pc(vcpu);
318 
319 	/*
320 	 * We must restore the 32-bit state before the sysregs, thanks
321 	 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
322 	 *
323 	 * Also, and in order to be able to deal with erratum #1319537 (A57)
324 	 * and #1319367 (A72), we must ensure that all VM-related sysreg are
325 	 * restored before we enable S2 translation.
326 	 */
327 	__sysreg32_restore_state(vcpu);
328 	__sysreg_restore_state_nvhe(guest_ctxt);
329 
330 	mmu = kern_hyp_va(vcpu->arch.hw_mmu);
331 	__load_stage2(mmu, kern_hyp_va(mmu->arch));
332 	__activate_traps(vcpu);
333 
334 	__hyp_vgic_restore_state(vcpu);
335 	__timer_enable_traps(vcpu);
336 
337 	__debug_switch_to_guest(vcpu);
338 
339 	do {
340 		/* Jump in the fire! */
341 		exit_code = __guest_enter(vcpu);
342 
343 		/* And we're baaack! */
344 	} while (fixup_guest_exit(vcpu, &exit_code));
345 
346 	__sysreg_save_state_nvhe(guest_ctxt);
347 	__sysreg32_save_state(vcpu);
348 	__timer_disable_traps(vcpu);
349 	__hyp_vgic_save_state(vcpu);
350 
351 	/*
352 	 * Same thing as before the guest run: we're about to switch
353 	 * the MMU context, so let's make sure we don't have any
354 	 * ongoing EL1&0 translations.
355 	 */
356 	dsb(nsh);
357 
358 	__deactivate_traps(vcpu);
359 	__load_host_stage2();
360 
361 	__sysreg_restore_state_nvhe(host_ctxt);
362 
363 	if (guest_owns_fp_regs())
364 		__fpsimd_save_fpexc32(vcpu);
365 
366 	__debug_switch_to_host(vcpu);
367 	/*
368 	 * This must come after restoring the host sysregs, since a non-VHE
369 	 * system may enable SPE here and make use of the TTBRs.
370 	 */
371 	__debug_restore_host_buffers_nvhe(vcpu);
372 
373 	if (pmu_switch_needed)
374 		__pmu_switch_to_host(vcpu);
375 
376 	/* Returning to host will clear PSR.I, remask PMR if needed */
377 	if (system_uses_irq_prio_masking())
378 		gic_write_pmr(GIC_PRIO_IRQOFF);
379 
380 	host_ctxt->__hyp_running_vcpu = NULL;
381 
382 	return exit_code;
383 }
384 
385 asmlinkage void __noreturn hyp_panic(void)
386 {
387 	u64 spsr = read_sysreg_el2(SYS_SPSR);
388 	u64 elr = read_sysreg_el2(SYS_ELR);
389 	u64 par = read_sysreg_par();
390 	struct kvm_cpu_context *host_ctxt;
391 	struct kvm_vcpu *vcpu;
392 
393 	host_ctxt = host_data_ptr(host_ctxt);
394 	vcpu = host_ctxt->__hyp_running_vcpu;
395 
396 	if (vcpu) {
397 		__timer_disable_traps(vcpu);
398 		__deactivate_traps(vcpu);
399 		__load_host_stage2();
400 		__sysreg_restore_state_nvhe(host_ctxt);
401 	}
402 
403 	/* Prepare to dump kvm nvhe hyp stacktrace */
404 	kvm_nvhe_prepare_backtrace((unsigned long)__builtin_frame_address(0),
405 				   _THIS_IP_);
406 
407 	__hyp_do_panic(host_ctxt, spsr, elr, par);
408 	unreachable();
409 }
410 
411 asmlinkage void __noreturn hyp_panic_bad_stack(void)
412 {
413 	hyp_panic();
414 }
415 
416 asmlinkage void kvm_unexpected_el2_exception(void)
417 {
418 	__kvm_unexpected_el2_exception();
419 }
420