1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * arch/arm64/kvm/fpsimd.c: Guest/host FPSIMD context coordination helpers 4 * 5 * Copyright 2018 Arm Limited 6 * Author: Dave Martin <Dave.Martin@arm.com> 7 */ 8 #include <linux/irqflags.h> 9 #include <linux/sched.h> 10 #include <linux/kvm_host.h> 11 #include <asm/fpsimd.h> 12 #include <asm/kvm_asm.h> 13 #include <asm/kvm_hyp.h> 14 #include <asm/kvm_mmu.h> 15 #include <asm/sysreg.h> 16 17 /* 18 * Called on entry to KVM_RUN unless this vcpu previously ran at least 19 * once and the most recent prior KVM_RUN for this vcpu was called from 20 * the same task as current (highly likely). 21 * 22 * This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu), 23 * such that on entering hyp the relevant parts of current are already 24 * mapped. 25 */ 26 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) 27 { 28 struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state; 29 int ret; 30 31 /* pKVM has its own tracking of the host fpsimd state. */ 32 if (is_protected_kvm_enabled()) 33 return 0; 34 35 /* Make sure the host task fpsimd state is visible to hyp: */ 36 ret = kvm_share_hyp(fpsimd, fpsimd + 1); 37 if (ret) 38 return ret; 39 40 return 0; 41 } 42 43 /* 44 * Prepare vcpu for saving the host's FPSIMD state and loading the guest's. 45 * The actual loading is done by the FPSIMD access trap taken to hyp. 46 * 47 * Here, we just set the correct metadata to indicate that the FPSIMD 48 * state in the cpu regs (if any) belongs to current on the host. 49 */ 50 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) 51 { 52 BUG_ON(!current->mm); 53 54 if (!system_supports_fpsimd()) 55 return; 56 57 fpsimd_kvm_prepare(); 58 59 /* 60 * We will check TIF_FOREIGN_FPSTATE just before entering the 61 * guest in kvm_arch_vcpu_ctxflush_fp() and override this to 62 * FP_STATE_FREE if the flag set. 63 */ 64 *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED; 65 *host_data_ptr(fpsimd_state) = kern_hyp_va(¤t->thread.uw.fpsimd_state); 66 *host_data_ptr(fpmr_ptr) = kern_hyp_va(¤t->thread.uw.fpmr); 67 68 vcpu_clear_flag(vcpu, HOST_SVE_ENABLED); 69 if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) 70 vcpu_set_flag(vcpu, HOST_SVE_ENABLED); 71 72 if (system_supports_sme()) { 73 vcpu_clear_flag(vcpu, HOST_SME_ENABLED); 74 if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN) 75 vcpu_set_flag(vcpu, HOST_SME_ENABLED); 76 77 /* 78 * If PSTATE.SM is enabled then save any pending FP 79 * state and disable PSTATE.SM. If we leave PSTATE.SM 80 * enabled and the guest does not enable SME via 81 * CPACR_EL1.SMEN then operations that should be valid 82 * may generate SME traps from EL1 to EL1 which we 83 * can't intercept and which would confuse the guest. 84 * 85 * Do the same for PSTATE.ZA in the case where there 86 * is state in the registers which has not already 87 * been saved, this is very unlikely to happen. 88 */ 89 if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) { 90 *host_data_ptr(fp_owner) = FP_STATE_FREE; 91 fpsimd_save_and_flush_cpu_state(); 92 } 93 } 94 95 /* 96 * If normal guests gain SME support, maintain this behavior for pKVM 97 * guests, which don't support SME. 98 */ 99 WARN_ON(is_protected_kvm_enabled() && system_supports_sme() && 100 read_sysreg_s(SYS_SVCR)); 101 } 102 103 /* 104 * Called just before entering the guest once we are no longer preemptible 105 * and interrupts are disabled. If we have managed to run anything using 106 * FP while we were preemptible (such as off the back of an interrupt), 107 * then neither the host nor the guest own the FP hardware (and it was the 108 * responsibility of the code that used FP to save the existing state). 109 */ 110 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu) 111 { 112 if (test_thread_flag(TIF_FOREIGN_FPSTATE)) 113 *host_data_ptr(fp_owner) = FP_STATE_FREE; 114 } 115 116 /* 117 * Called just after exiting the guest. If the guest FPSIMD state 118 * was loaded, update the host's context tracking data mark the CPU 119 * FPSIMD regs as dirty and belonging to vcpu so that they will be 120 * written back if the kernel clobbers them due to kernel-mode NEON 121 * before re-entry into the guest. 122 */ 123 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) 124 { 125 struct cpu_fp_state fp_state; 126 127 WARN_ON_ONCE(!irqs_disabled()); 128 129 if (guest_owns_fp_regs()) { 130 /* 131 * Currently we do not support SME guests so SVCR is 132 * always 0 and we just need a variable to point to. 133 */ 134 fp_state.st = &vcpu->arch.ctxt.fp_regs; 135 fp_state.sve_state = vcpu->arch.sve_state; 136 fp_state.sve_vl = vcpu->arch.sve_max_vl; 137 fp_state.sme_state = NULL; 138 fp_state.svcr = &__vcpu_sys_reg(vcpu, SVCR); 139 fp_state.fpmr = &__vcpu_sys_reg(vcpu, FPMR); 140 fp_state.fp_type = &vcpu->arch.fp_type; 141 142 if (vcpu_has_sve(vcpu)) 143 fp_state.to_save = FP_STATE_SVE; 144 else 145 fp_state.to_save = FP_STATE_FPSIMD; 146 147 fpsimd_bind_state_to_cpu(&fp_state); 148 149 clear_thread_flag(TIF_FOREIGN_FPSTATE); 150 } 151 } 152 153 /* 154 * Write back the vcpu FPSIMD regs if they are dirty, and invalidate the 155 * cpu FPSIMD regs so that they can't be spuriously reused if this vcpu 156 * disappears and another task or vcpu appears that recycles the same 157 * struct fpsimd_state. 158 */ 159 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) 160 { 161 unsigned long flags; 162 163 local_irq_save(flags); 164 165 /* 166 * If we have VHE then the Hyp code will reset CPACR_EL1 to 167 * the default value and we need to reenable SME. 168 */ 169 if (has_vhe() && system_supports_sme()) { 170 /* Also restore EL0 state seen on entry */ 171 if (vcpu_get_flag(vcpu, HOST_SME_ENABLED)) 172 sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN); 173 else 174 sysreg_clear_set(CPACR_EL1, 175 CPACR_EL1_SMEN_EL0EN, 176 CPACR_EL1_SMEN_EL1EN); 177 isb(); 178 } 179 180 if (guest_owns_fp_regs()) { 181 if (vcpu_has_sve(vcpu)) { 182 u64 zcr = read_sysreg_el1(SYS_ZCR); 183 184 /* 185 * If the vCPU is in the hyp context then ZCR_EL1 is 186 * loaded with its vEL2 counterpart. 187 */ 188 __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr; 189 190 /* 191 * Restore the VL that was saved when bound to the CPU, 192 * which is the maximum VL for the guest. Because the 193 * layout of the data when saving the sve state depends 194 * on the VL, we need to use a consistent (i.e., the 195 * maximum) VL. 196 * Note that this means that at guest exit ZCR_EL1 is 197 * not necessarily the same as on guest entry. 198 * 199 * ZCR_EL2 holds the guest hypervisor's VL when running 200 * a nested guest, which could be smaller than the 201 * max for the vCPU. Similar to above, we first need to 202 * switch to a VL consistent with the layout of the 203 * vCPU's SVE state. KVM support for NV implies VHE, so 204 * using the ZCR_EL1 alias is safe. 205 */ 206 if (!has_vhe() || (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) 207 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, 208 SYS_ZCR_EL1); 209 } 210 211 /* 212 * Flush (save and invalidate) the fpsimd/sve state so that if 213 * the host tries to use fpsimd/sve, it's not using stale data 214 * from the guest. 215 * 216 * Flushing the state sets the TIF_FOREIGN_FPSTATE bit for the 217 * context unconditionally, in both nVHE and VHE. This allows 218 * the kernel to restore the fpsimd/sve state, including ZCR_EL1 219 * when needed. 220 */ 221 fpsimd_save_and_flush_cpu_state(); 222 } else if (has_vhe() && system_supports_sve()) { 223 /* 224 * The FPSIMD/SVE state in the CPU has not been touched, and we 225 * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been 226 * reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE 227 * for EL0. To avoid spurious traps, restore the trap state 228 * seen by kvm_arch_vcpu_load_fp(): 229 */ 230 if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED)) 231 sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN); 232 else 233 sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); 234 } 235 236 local_irq_restore(flags); 237 } 238