1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012-2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #include <hyp/sysreg-sr.h> 8 9 #include <linux/compiler.h> 10 #include <linux/kvm_host.h> 11 12 #include <asm/kprobes.h> 13 #include <asm/kvm_asm.h> 14 #include <asm/kvm_emulate.h> 15 #include <asm/kvm_hyp.h> 16 #include <asm/kvm_nested.h> 17 18 static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu) 19 { 20 /* These registers are common with EL1 */ 21 __vcpu_sys_reg(vcpu, PAR_EL1) = read_sysreg(par_el1); 22 __vcpu_sys_reg(vcpu, TPIDR_EL1) = read_sysreg(tpidr_el1); 23 24 __vcpu_sys_reg(vcpu, ESR_EL2) = read_sysreg_el1(SYS_ESR); 25 __vcpu_sys_reg(vcpu, AFSR0_EL2) = read_sysreg_el1(SYS_AFSR0); 26 __vcpu_sys_reg(vcpu, AFSR1_EL2) = read_sysreg_el1(SYS_AFSR1); 27 __vcpu_sys_reg(vcpu, FAR_EL2) = read_sysreg_el1(SYS_FAR); 28 __vcpu_sys_reg(vcpu, MAIR_EL2) = read_sysreg_el1(SYS_MAIR); 29 __vcpu_sys_reg(vcpu, VBAR_EL2) = read_sysreg_el1(SYS_VBAR); 30 __vcpu_sys_reg(vcpu, CONTEXTIDR_EL2) = read_sysreg_el1(SYS_CONTEXTIDR); 31 __vcpu_sys_reg(vcpu, AMAIR_EL2) = read_sysreg_el1(SYS_AMAIR); 32 33 /* 34 * In VHE mode those registers are compatible between EL1 and EL2, 35 * and the guest uses the _EL1 versions on the CPU naturally. 36 * So we save them into their _EL2 versions here. 37 * For nVHE mode we trap accesses to those registers, so our 38 * _EL2 copy in sys_regs[] is always up-to-date and we don't need 39 * to save anything here. 40 */ 41 if (vcpu_el2_e2h_is_set(vcpu)) { 42 u64 val; 43 44 /* 45 * We don't save CPTR_EL2, as accesses to CPACR_EL1 46 * are always trapped, ensuring that the in-memory 47 * copy is always up-to-date. A small blessing... 48 */ 49 __vcpu_sys_reg(vcpu, SCTLR_EL2) = read_sysreg_el1(SYS_SCTLR); 50 __vcpu_sys_reg(vcpu, TTBR0_EL2) = read_sysreg_el1(SYS_TTBR0); 51 __vcpu_sys_reg(vcpu, TTBR1_EL2) = read_sysreg_el1(SYS_TTBR1); 52 __vcpu_sys_reg(vcpu, TCR_EL2) = read_sysreg_el1(SYS_TCR); 53 54 if (ctxt_has_tcrx(&vcpu->arch.ctxt)) { 55 __vcpu_sys_reg(vcpu, TCR2_EL2) = read_sysreg_el1(SYS_TCR2); 56 57 if (ctxt_has_s1pie(&vcpu->arch.ctxt)) { 58 __vcpu_sys_reg(vcpu, PIRE0_EL2) = read_sysreg_el1(SYS_PIRE0); 59 __vcpu_sys_reg(vcpu, PIR_EL2) = read_sysreg_el1(SYS_PIR); 60 } 61 62 if (ctxt_has_s1poe(&vcpu->arch.ctxt)) 63 __vcpu_sys_reg(vcpu, POR_EL2) = read_sysreg_el1(SYS_POR); 64 } 65 66 /* 67 * The EL1 view of CNTKCTL_EL1 has a bunch of RES0 bits where 68 * the interesting CNTHCTL_EL2 bits live. So preserve these 69 * bits when reading back the guest-visible value. 70 */ 71 val = read_sysreg_el1(SYS_CNTKCTL); 72 val &= CNTKCTL_VALID_BITS; 73 __vcpu_sys_reg(vcpu, CNTHCTL_EL2) &= ~CNTKCTL_VALID_BITS; 74 __vcpu_sys_reg(vcpu, CNTHCTL_EL2) |= val; 75 } 76 77 __vcpu_sys_reg(vcpu, SP_EL2) = read_sysreg(sp_el1); 78 __vcpu_sys_reg(vcpu, ELR_EL2) = read_sysreg_el1(SYS_ELR); 79 __vcpu_sys_reg(vcpu, SPSR_EL2) = read_sysreg_el1(SYS_SPSR); 80 } 81 82 static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu) 83 { 84 u64 val; 85 86 /* These registers are common with EL1 */ 87 write_sysreg(__vcpu_sys_reg(vcpu, PAR_EL1), par_el1); 88 write_sysreg(__vcpu_sys_reg(vcpu, TPIDR_EL1), tpidr_el1); 89 90 write_sysreg(__vcpu_sys_reg(vcpu, MPIDR_EL1), vmpidr_el2); 91 write_sysreg_el1(__vcpu_sys_reg(vcpu, MAIR_EL2), SYS_MAIR); 92 write_sysreg_el1(__vcpu_sys_reg(vcpu, VBAR_EL2), SYS_VBAR); 93 write_sysreg_el1(__vcpu_sys_reg(vcpu, CONTEXTIDR_EL2), SYS_CONTEXTIDR); 94 write_sysreg_el1(__vcpu_sys_reg(vcpu, AMAIR_EL2), SYS_AMAIR); 95 96 if (vcpu_el2_e2h_is_set(vcpu)) { 97 /* 98 * In VHE mode those registers are compatible between 99 * EL1 and EL2. 100 */ 101 write_sysreg_el1(__vcpu_sys_reg(vcpu, SCTLR_EL2), SYS_SCTLR); 102 write_sysreg_el1(__vcpu_sys_reg(vcpu, CPTR_EL2), SYS_CPACR); 103 write_sysreg_el1(__vcpu_sys_reg(vcpu, TTBR0_EL2), SYS_TTBR0); 104 write_sysreg_el1(__vcpu_sys_reg(vcpu, TTBR1_EL2), SYS_TTBR1); 105 write_sysreg_el1(__vcpu_sys_reg(vcpu, TCR_EL2), SYS_TCR); 106 write_sysreg_el1(__vcpu_sys_reg(vcpu, CNTHCTL_EL2), SYS_CNTKCTL); 107 } else { 108 /* 109 * CNTHCTL_EL2 only affects EL1 when running nVHE, so 110 * no need to restore it. 111 */ 112 val = translate_sctlr_el2_to_sctlr_el1(__vcpu_sys_reg(vcpu, SCTLR_EL2)); 113 write_sysreg_el1(val, SYS_SCTLR); 114 val = translate_cptr_el2_to_cpacr_el1(__vcpu_sys_reg(vcpu, CPTR_EL2)); 115 write_sysreg_el1(val, SYS_CPACR); 116 val = translate_ttbr0_el2_to_ttbr0_el1(__vcpu_sys_reg(vcpu, TTBR0_EL2)); 117 write_sysreg_el1(val, SYS_TTBR0); 118 val = translate_tcr_el2_to_tcr_el1(__vcpu_sys_reg(vcpu, TCR_EL2)); 119 write_sysreg_el1(val, SYS_TCR); 120 } 121 122 if (ctxt_has_tcrx(&vcpu->arch.ctxt)) { 123 write_sysreg_el1(__vcpu_sys_reg(vcpu, TCR2_EL2), SYS_TCR2); 124 125 if (ctxt_has_s1pie(&vcpu->arch.ctxt)) { 126 write_sysreg_el1(__vcpu_sys_reg(vcpu, PIR_EL2), SYS_PIR); 127 write_sysreg_el1(__vcpu_sys_reg(vcpu, PIRE0_EL2), SYS_PIRE0); 128 } 129 130 if (ctxt_has_s1poe(&vcpu->arch.ctxt)) 131 write_sysreg_el1(__vcpu_sys_reg(vcpu, POR_EL2), SYS_POR); 132 } 133 134 write_sysreg_el1(__vcpu_sys_reg(vcpu, ESR_EL2), SYS_ESR); 135 write_sysreg_el1(__vcpu_sys_reg(vcpu, AFSR0_EL2), SYS_AFSR0); 136 write_sysreg_el1(__vcpu_sys_reg(vcpu, AFSR1_EL2), SYS_AFSR1); 137 write_sysreg_el1(__vcpu_sys_reg(vcpu, FAR_EL2), SYS_FAR); 138 write_sysreg(__vcpu_sys_reg(vcpu, SP_EL2), sp_el1); 139 write_sysreg_el1(__vcpu_sys_reg(vcpu, ELR_EL2), SYS_ELR); 140 write_sysreg_el1(__vcpu_sys_reg(vcpu, SPSR_EL2), SYS_SPSR); 141 } 142 143 /* 144 * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and 145 * pstate, which are handled as part of the el2 return state) on every 146 * switch (sp_el0 is being dealt with in the assembly code). 147 * tpidr_el0 and tpidrro_el0 only need to be switched when going 148 * to host userspace or a different VCPU. EL1 registers only need to be 149 * switched when potentially going to run a different VCPU. The latter two 150 * classes are handled as part of kvm_arch_vcpu_load and kvm_arch_vcpu_put. 151 */ 152 153 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) 154 { 155 __sysreg_save_common_state(ctxt); 156 } 157 NOKPROBE_SYMBOL(sysreg_save_host_state_vhe); 158 159 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) 160 { 161 __sysreg_save_common_state(ctxt); 162 __sysreg_save_el2_return_state(ctxt); 163 } 164 NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe); 165 166 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) 167 { 168 __sysreg_restore_common_state(ctxt); 169 } 170 NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe); 171 172 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) 173 { 174 __sysreg_restore_common_state(ctxt); 175 __sysreg_restore_el2_return_state(ctxt); 176 } 177 NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe); 178 179 /** 180 * __vcpu_load_switch_sysregs - Load guest system registers to the physical CPU 181 * 182 * @vcpu: The VCPU pointer 183 * 184 * Load system registers that do not affect the host's execution, for 185 * example EL1 system registers on a VHE system where the host kernel 186 * runs at EL2. This function is called from KVM's vcpu_load() function 187 * and loading system register state early avoids having to load them on 188 * every entry to the VM. 189 */ 190 void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu) 191 { 192 struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; 193 struct kvm_cpu_context *host_ctxt; 194 u64 mpidr; 195 196 host_ctxt = host_data_ptr(host_ctxt); 197 __sysreg_save_user_state(host_ctxt); 198 199 /* 200 * When running a normal EL1 guest, we only load a new vcpu 201 * after a context switch, which imvolves a DSB, so all 202 * speculative EL1&0 walks will have already completed. 203 * If running NV, the vcpu may transition between vEL1 and 204 * vEL2 without a context switch, so make sure we complete 205 * those walks before loading a new context. 206 */ 207 if (vcpu_has_nv(vcpu)) 208 dsb(nsh); 209 210 /* 211 * Load guest EL1 and user state 212 * 213 * We must restore the 32-bit state before the sysregs, thanks 214 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72). 215 */ 216 __sysreg32_restore_state(vcpu); 217 __sysreg_restore_user_state(guest_ctxt); 218 219 if (unlikely(__is_hyp_ctxt(guest_ctxt))) { 220 __sysreg_restore_vel2_state(vcpu); 221 } else { 222 if (vcpu_has_nv(vcpu)) { 223 /* 224 * Use the guest hypervisor's VPIDR_EL2 when in a 225 * nested state. The hardware value of MIDR_EL1 gets 226 * restored on put. 227 */ 228 write_sysreg(ctxt_sys_reg(guest_ctxt, VPIDR_EL2), vpidr_el2); 229 230 /* 231 * As we're restoring a nested guest, set the value 232 * provided by the guest hypervisor. 233 */ 234 mpidr = ctxt_sys_reg(guest_ctxt, VMPIDR_EL2); 235 } else { 236 mpidr = ctxt_sys_reg(guest_ctxt, MPIDR_EL1); 237 } 238 239 __sysreg_restore_el1_state(guest_ctxt, mpidr); 240 } 241 242 vcpu_set_flag(vcpu, SYSREGS_ON_CPU); 243 } 244 245 /** 246 * __vcpu_put_switch_sysregs - Restore host system registers to the physical CPU 247 * 248 * @vcpu: The VCPU pointer 249 * 250 * Save guest system registers that do not affect the host's execution, for 251 * example EL1 system registers on a VHE system where the host kernel 252 * runs at EL2. This function is called from KVM's vcpu_put() function 253 * and deferring saving system register state until we're no longer running the 254 * VCPU avoids having to save them on every exit from the VM. 255 */ 256 void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu) 257 { 258 struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; 259 struct kvm_cpu_context *host_ctxt; 260 261 host_ctxt = host_data_ptr(host_ctxt); 262 263 if (unlikely(__is_hyp_ctxt(guest_ctxt))) 264 __sysreg_save_vel2_state(vcpu); 265 else 266 __sysreg_save_el1_state(guest_ctxt); 267 268 __sysreg_save_user_state(guest_ctxt); 269 __sysreg32_save_state(vcpu); 270 271 /* Restore host user state */ 272 __sysreg_restore_user_state(host_ctxt); 273 274 /* If leaving a nesting guest, restore MIDR_EL1 default view */ 275 if (vcpu_has_nv(vcpu)) 276 write_sysreg(read_cpuid_id(), vpidr_el2); 277 278 vcpu_clear_flag(vcpu, SYSREGS_ON_CPU); 279 } 280