xref: /linux/arch/arm64/kvm/fpsimd.c (revision 73d7cf07109e79b093d1a1fb57a88d4048cd9b4b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * arch/arm64/kvm/fpsimd.c: Guest/host FPSIMD context coordination helpers
4  *
5  * Copyright 2018 Arm Limited
6  * Author: Dave Martin <Dave.Martin@arm.com>
7  */
8 #include <linux/irqflags.h>
9 #include <linux/sched.h>
10 #include <linux/kvm_host.h>
11 #include <asm/fpsimd.h>
12 #include <asm/kvm_asm.h>
13 #include <asm/kvm_hyp.h>
14 #include <asm/kvm_mmu.h>
15 #include <asm/sysreg.h>
16 
17 /*
18  * Prepare vcpu for saving the host's FPSIMD state and loading the guest's.
19  * The actual loading is done by the FPSIMD access trap taken to hyp.
20  *
21  * Here, we just set the correct metadata to indicate that the FPSIMD
22  * state in the cpu regs (if any) belongs to current on the host.
23  */
kvm_arch_vcpu_load_fp(struct kvm_vcpu * vcpu)24 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
25 {
26 	BUG_ON(!current->mm);
27 
28 	if (!system_supports_fpsimd())
29 		return;
30 
31 	/*
32 	 * Ensure that any host FPSIMD/SVE/SME state is saved and unbound such
33 	 * that the host kernel is responsible for restoring this state upon
34 	 * return to userspace, and the hyp code doesn't need to save anything.
35 	 *
36 	 * When the host may use SME, fpsimd_save_and_flush_cpu_state() ensures
37 	 * that PSTATE.{SM,ZA} == {0,0}.
38 	 */
39 	fpsimd_save_and_flush_cpu_state();
40 	*host_data_ptr(fp_owner) = FP_STATE_FREE;
41 
42 	WARN_ON_ONCE(system_supports_sme() && read_sysreg_s(SYS_SVCR));
43 }
44 
45 /*
46  * Called just before entering the guest once we are no longer preemptible
47  * and interrupts are disabled. If we have managed to run anything using
48  * FP while we were preemptible (such as off the back of an interrupt),
49  * then neither the host nor the guest own the FP hardware (and it was the
50  * responsibility of the code that used FP to save the existing state).
51  */
kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu * vcpu)52 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
53 {
54 	if (test_thread_flag(TIF_FOREIGN_FPSTATE))
55 		*host_data_ptr(fp_owner) = FP_STATE_FREE;
56 }
57 
58 /*
59  * Called just after exiting the guest. If the guest FPSIMD state
60  * was loaded, update the host's context tracking data mark the CPU
61  * FPSIMD regs as dirty and belonging to vcpu so that they will be
62  * written back if the kernel clobbers them due to kernel-mode NEON
63  * before re-entry into the guest.
64  */
kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu * vcpu)65 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
66 {
67 	struct cpu_fp_state fp_state;
68 
69 	WARN_ON_ONCE(!irqs_disabled());
70 
71 	if (guest_owns_fp_regs()) {
72 		/*
73 		 * Currently we do not support SME guests so SVCR is
74 		 * always 0 and we just need a variable to point to.
75 		 */
76 		fp_state.st = &vcpu->arch.ctxt.fp_regs;
77 		fp_state.sve_state = vcpu->arch.sve_state;
78 		fp_state.sve_vl = vcpu->arch.sve_max_vl;
79 		fp_state.sme_state = NULL;
80 		fp_state.svcr = __ctxt_sys_reg(&vcpu->arch.ctxt, SVCR);
81 		fp_state.fpmr = __ctxt_sys_reg(&vcpu->arch.ctxt, FPMR);
82 		fp_state.fp_type = &vcpu->arch.fp_type;
83 
84 		if (vcpu_has_sve(vcpu))
85 			fp_state.to_save = FP_STATE_SVE;
86 		else
87 			fp_state.to_save = FP_STATE_FPSIMD;
88 
89 		fpsimd_bind_state_to_cpu(&fp_state);
90 
91 		clear_thread_flag(TIF_FOREIGN_FPSTATE);
92 	}
93 }
94 
95 /*
96  * Write back the vcpu FPSIMD regs if they are dirty, and invalidate the
97  * cpu FPSIMD regs so that they can't be spuriously reused if this vcpu
98  * disappears and another task or vcpu appears that recycles the same
99  * struct fpsimd_state.
100  */
kvm_arch_vcpu_put_fp(struct kvm_vcpu * vcpu)101 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
102 {
103 	unsigned long flags;
104 
105 	local_irq_save(flags);
106 
107 	if (guest_owns_fp_regs()) {
108 		/*
109 		 * Flush (save and invalidate) the fpsimd/sve state so that if
110 		 * the host tries to use fpsimd/sve, it's not using stale data
111 		 * from the guest.
112 		 *
113 		 * Flushing the state sets the TIF_FOREIGN_FPSTATE bit for the
114 		 * context unconditionally, in both nVHE and VHE. This allows
115 		 * the kernel to restore the fpsimd/sve state, including ZCR_EL1
116 		 * when needed.
117 		 */
118 		fpsimd_save_and_flush_cpu_state();
119 	}
120 
121 	local_irq_restore(flags);
122 }
123