1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #ifndef __ARM64_KVM_HYP_SWITCH_H__ 8 #define __ARM64_KVM_HYP_SWITCH_H__ 9 10 #include <hyp/adjust_pc.h> 11 #include <hyp/fault.h> 12 13 #include <linux/arm-smccc.h> 14 #include <linux/kvm_host.h> 15 #include <linux/types.h> 16 #include <linux/jump_label.h> 17 #include <uapi/linux/psci.h> 18 19 #include <kvm/arm_psci.h> 20 21 #include <asm/barrier.h> 22 #include <asm/cpufeature.h> 23 #include <asm/extable.h> 24 #include <asm/kprobes.h> 25 #include <asm/kvm_asm.h> 26 #include <asm/kvm_emulate.h> 27 #include <asm/kvm_hyp.h> 28 #include <asm/kvm_mmu.h> 29 #include <asm/kvm_nested.h> 30 #include <asm/fpsimd.h> 31 #include <asm/debug-monitors.h> 32 #include <asm/processor.h> 33 #include <asm/traps.h> 34 35 struct kvm_exception_table_entry { 36 int insn, fixup; 37 }; 38 39 extern struct kvm_exception_table_entry __start___kvm_ex_table; 40 extern struct kvm_exception_table_entry __stop___kvm_ex_table; 41 42 /* Save the 32-bit only FPSIMD system register state */ 43 static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu) 44 { 45 if (!vcpu_el1_is_32bit(vcpu)) 46 return; 47 48 __vcpu_assign_sys_reg(vcpu, FPEXC32_EL2, read_sysreg(fpexc32_el2)); 49 } 50 51 static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) 52 { 53 /* 54 * We are about to set CPTR_EL2.TFP to trap all floating point 55 * register accesses to EL2, however, the ARM ARM clearly states that 56 * traps are only taken to EL2 if the operation would not otherwise 57 * trap to EL1. Therefore, always make sure that for 32-bit guests, 58 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. 59 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to 60 * it will cause an exception. 61 */ 62 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) 63 write_sysreg(1 << 30, fpexc32_el2); 64 } 65 66 static inline void __activate_cptr_traps_nvhe(struct kvm_vcpu *vcpu) 67 { 68 u64 val = CPTR_NVHE_EL2_RES1 | CPTR_EL2_TAM | CPTR_EL2_TTA; 69 70 /* 71 * Always trap SME since it's not supported in KVM. 72 * TSM is RES1 if SME isn't implemented. 73 */ 74 val |= CPTR_EL2_TSM; 75 76 if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs()) 77 val |= CPTR_EL2_TZ; 78 79 if (!guest_owns_fp_regs()) 80 val |= CPTR_EL2_TFP; 81 82 write_sysreg(val, cptr_el2); 83 } 84 85 static inline void __activate_cptr_traps_vhe(struct kvm_vcpu *vcpu) 86 { 87 /* 88 * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to 89 * CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2, 90 * except for some missing controls, such as TAM. 91 * In this case, CPTR_EL2.TAM has the same position with or without 92 * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM 93 * shift value for trapping the AMU accesses. 94 */ 95 u64 val = CPTR_EL2_TAM | CPACR_EL1_TTA; 96 u64 cptr; 97 98 if (guest_owns_fp_regs()) { 99 val |= CPACR_EL1_FPEN; 100 if (vcpu_has_sve(vcpu)) 101 val |= CPACR_EL1_ZEN; 102 } 103 104 if (!vcpu_has_nv(vcpu)) 105 goto write; 106 107 /* 108 * The architecture is a bit crap (what a surprise): an EL2 guest 109 * writing to CPTR_EL2 via CPACR_EL1 can't set any of TCPAC or TTA, 110 * as they are RES0 in the guest's view. To work around it, trap the 111 * sucker using the very same bit it can't set... 112 */ 113 if (vcpu_el2_e2h_is_set(vcpu) && is_hyp_ctxt(vcpu)) 114 val |= CPTR_EL2_TCPAC; 115 116 /* 117 * Layer the guest hypervisor's trap configuration on top of our own if 118 * we're in a nested context. 119 */ 120 if (is_hyp_ctxt(vcpu)) 121 goto write; 122 123 cptr = vcpu_sanitised_cptr_el2(vcpu); 124 125 /* 126 * Pay attention, there's some interesting detail here. 127 * 128 * The CPTR_EL2.xEN fields are 2 bits wide, although there are only two 129 * meaningful trap states when HCR_EL2.TGE = 0 (running a nested guest): 130 * 131 * - CPTR_EL2.xEN = x0, traps are enabled 132 * - CPTR_EL2.xEN = x1, traps are disabled 133 * 134 * In other words, bit[0] determines if guest accesses trap or not. In 135 * the interest of simplicity, clear the entire field if the guest 136 * hypervisor has traps enabled to dispel any illusion of something more 137 * complicated taking place. 138 */ 139 if (!(SYS_FIELD_GET(CPACR_EL1, FPEN, cptr) & BIT(0))) 140 val &= ~CPACR_EL1_FPEN; 141 if (!(SYS_FIELD_GET(CPACR_EL1, ZEN, cptr) & BIT(0))) 142 val &= ~CPACR_EL1_ZEN; 143 144 if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP)) 145 val |= cptr & CPACR_EL1_E0POE; 146 147 val |= cptr & CPTR_EL2_TCPAC; 148 149 write: 150 write_sysreg(val, cpacr_el1); 151 } 152 153 static inline void __activate_cptr_traps(struct kvm_vcpu *vcpu) 154 { 155 if (!guest_owns_fp_regs()) 156 __activate_traps_fpsimd32(vcpu); 157 158 if (has_vhe() || has_hvhe()) 159 __activate_cptr_traps_vhe(vcpu); 160 else 161 __activate_cptr_traps_nvhe(vcpu); 162 } 163 164 static inline void __deactivate_cptr_traps_nvhe(struct kvm_vcpu *vcpu) 165 { 166 u64 val = CPTR_NVHE_EL2_RES1; 167 168 if (!cpus_have_final_cap(ARM64_SVE)) 169 val |= CPTR_EL2_TZ; 170 if (!cpus_have_final_cap(ARM64_SME)) 171 val |= CPTR_EL2_TSM; 172 173 write_sysreg(val, cptr_el2); 174 } 175 176 static inline void __deactivate_cptr_traps_vhe(struct kvm_vcpu *vcpu) 177 { 178 u64 val = CPACR_EL1_FPEN; 179 180 if (cpus_have_final_cap(ARM64_SVE)) 181 val |= CPACR_EL1_ZEN; 182 if (cpus_have_final_cap(ARM64_SME)) 183 val |= CPACR_EL1_SMEN; 184 185 write_sysreg(val, cpacr_el1); 186 } 187 188 static inline void __deactivate_cptr_traps(struct kvm_vcpu *vcpu) 189 { 190 if (has_vhe() || has_hvhe()) 191 __deactivate_cptr_traps_vhe(vcpu); 192 else 193 __deactivate_cptr_traps_nvhe(vcpu); 194 } 195 196 static inline bool cpu_has_amu(void) 197 { 198 u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1); 199 200 return cpuid_feature_extract_unsigned_field(pfr0, 201 ID_AA64PFR0_EL1_AMU_SHIFT); 202 } 203 204 #define __activate_fgt(hctxt, vcpu, reg) \ 205 do { \ 206 ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \ 207 write_sysreg_s(*vcpu_fgt(vcpu, reg), SYS_ ## reg); \ 208 } while (0) 209 210 static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) 211 { 212 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); 213 214 if (!cpus_have_final_cap(ARM64_HAS_FGT)) 215 return; 216 217 __activate_fgt(hctxt, vcpu, HFGRTR_EL2); 218 __activate_fgt(hctxt, vcpu, HFGWTR_EL2); 219 __activate_fgt(hctxt, vcpu, HFGITR_EL2); 220 __activate_fgt(hctxt, vcpu, HDFGRTR_EL2); 221 __activate_fgt(hctxt, vcpu, HDFGWTR_EL2); 222 223 if (cpu_has_amu()) 224 __activate_fgt(hctxt, vcpu, HAFGRTR_EL2); 225 226 if (!cpus_have_final_cap(ARM64_HAS_FGT2)) 227 return; 228 229 __activate_fgt(hctxt, vcpu, HFGRTR2_EL2); 230 __activate_fgt(hctxt, vcpu, HFGWTR2_EL2); 231 __activate_fgt(hctxt, vcpu, HFGITR2_EL2); 232 __activate_fgt(hctxt, vcpu, HDFGRTR2_EL2); 233 __activate_fgt(hctxt, vcpu, HDFGWTR2_EL2); 234 } 235 236 #define __deactivate_fgt(htcxt, vcpu, reg) \ 237 do { \ 238 write_sysreg_s(ctxt_sys_reg(hctxt, reg), \ 239 SYS_ ## reg); \ 240 } while(0) 241 242 static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu) 243 { 244 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); 245 246 if (!cpus_have_final_cap(ARM64_HAS_FGT)) 247 return; 248 249 __deactivate_fgt(hctxt, vcpu, HFGRTR_EL2); 250 __deactivate_fgt(hctxt, vcpu, HFGWTR_EL2); 251 __deactivate_fgt(hctxt, vcpu, HFGITR_EL2); 252 __deactivate_fgt(hctxt, vcpu, HDFGRTR_EL2); 253 __deactivate_fgt(hctxt, vcpu, HDFGWTR_EL2); 254 255 if (cpu_has_amu()) 256 __deactivate_fgt(hctxt, vcpu, HAFGRTR_EL2); 257 258 if (!cpus_have_final_cap(ARM64_HAS_FGT2)) 259 return; 260 261 __deactivate_fgt(hctxt, vcpu, HFGRTR2_EL2); 262 __deactivate_fgt(hctxt, vcpu, HFGWTR2_EL2); 263 __deactivate_fgt(hctxt, vcpu, HFGITR2_EL2); 264 __deactivate_fgt(hctxt, vcpu, HDFGRTR2_EL2); 265 __deactivate_fgt(hctxt, vcpu, HDFGWTR2_EL2); 266 } 267 268 static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu) 269 { 270 u64 r = MPAM2_EL2_TRAPMPAM0EL1 | MPAM2_EL2_TRAPMPAM1EL1; 271 272 if (!system_supports_mpam()) 273 return; 274 275 /* trap guest access to MPAMIDR_EL1 */ 276 if (system_supports_mpam_hcr()) { 277 write_sysreg_s(MPAMHCR_EL2_TRAP_MPAMIDR_EL1, SYS_MPAMHCR_EL2); 278 } else { 279 /* From v1.1 TIDR can trap MPAMIDR, set it unconditionally */ 280 r |= MPAM2_EL2_TIDR; 281 } 282 283 write_sysreg_s(r, SYS_MPAM2_EL2); 284 } 285 286 static inline void __deactivate_traps_mpam(void) 287 { 288 if (!system_supports_mpam()) 289 return; 290 291 write_sysreg_s(0, SYS_MPAM2_EL2); 292 293 if (system_supports_mpam_hcr()) 294 write_sysreg_s(MPAMHCR_HOST_FLAGS, SYS_MPAMHCR_EL2); 295 } 296 297 static inline void __activate_traps_common(struct kvm_vcpu *vcpu) 298 { 299 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); 300 301 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */ 302 write_sysreg(1 << 15, hstr_el2); 303 304 /* 305 * Make sure we trap PMU access from EL0 to EL2. Also sanitize 306 * PMSELR_EL0 to make sure it never contains the cycle 307 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at 308 * EL1 instead of being trapped to EL2. 309 */ 310 if (system_supports_pmuv3()) { 311 write_sysreg(0, pmselr_el0); 312 313 ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0); 314 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); 315 vcpu_set_flag(vcpu, PMUSERENR_ON_CPU); 316 } 317 318 if (cpus_have_final_cap(ARM64_HAS_HCX)) { 319 u64 hcrx = vcpu->arch.hcrx_el2; 320 if (is_nested_ctxt(vcpu)) { 321 u64 val = __vcpu_sys_reg(vcpu, HCRX_EL2); 322 hcrx |= val & __HCRX_EL2_MASK; 323 hcrx &= ~(~val & __HCRX_EL2_nMASK); 324 } 325 326 ctxt_sys_reg(hctxt, HCRX_EL2) = read_sysreg_s(SYS_HCRX_EL2); 327 write_sysreg_s(hcrx, SYS_HCRX_EL2); 328 } 329 330 __activate_traps_hfgxtr(vcpu); 331 __activate_traps_mpam(vcpu); 332 } 333 334 static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) 335 { 336 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); 337 338 write_sysreg(0, hstr_el2); 339 if (system_supports_pmuv3()) { 340 write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0); 341 vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU); 342 } 343 344 if (cpus_have_final_cap(ARM64_HAS_HCX)) 345 write_sysreg_s(ctxt_sys_reg(hctxt, HCRX_EL2), SYS_HCRX_EL2); 346 347 __deactivate_traps_hfgxtr(vcpu); 348 __deactivate_traps_mpam(); 349 } 350 351 static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr) 352 { 353 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM)) 354 hcr |= HCR_TVM; 355 356 write_sysreg_hcr(hcr); 357 358 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) { 359 u64 vsesr; 360 361 /* 362 * When HCR_EL2.AMO is set, physical SErrors are taken to EL2 363 * and vSError injection is enabled for EL1. Conveniently, for 364 * NV this means that it is never the case where a 'physical' 365 * SError (injected by KVM or userspace) and vSError are 366 * deliverable to the same context. 367 * 368 * As such, we can trivially select between the host or guest's 369 * VSESR_EL2. Except for the case that FEAT_RAS hasn't been 370 * exposed to the guest, where ESR propagation in hardware 371 * occurs unconditionally. 372 * 373 * Paper over the architectural wart and use an IMPLEMENTATION 374 * DEFINED ESR value in case FEAT_RAS is hidden from the guest. 375 */ 376 if (!vserror_state_is_nested(vcpu)) 377 vsesr = vcpu->arch.vsesr_el2; 378 else if (kvm_has_ras(kern_hyp_va(vcpu->kvm))) 379 vsesr = __vcpu_sys_reg(vcpu, VSESR_EL2); 380 else 381 vsesr = ESR_ELx_ISV; 382 383 write_sysreg_s(vsesr, SYS_VSESR_EL2); 384 } 385 } 386 387 static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) 388 { 389 u64 *hcr; 390 391 if (vserror_state_is_nested(vcpu)) 392 hcr = __ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2); 393 else 394 hcr = &vcpu->arch.hcr_el2; 395 396 /* 397 * If we pended a virtual abort, preserve it until it gets 398 * cleared. See D1.14.3 (Virtual Interrupts) for details, but 399 * the crucial bit is "On taking a vSError interrupt, 400 * HCR_EL2.VSE is cleared to 0." 401 * 402 * Additionally, when in a nested context we need to propagate the 403 * updated state to the guest hypervisor's HCR_EL2. 404 */ 405 if (*hcr & HCR_VSE) { 406 *hcr &= ~HCR_VSE; 407 *hcr |= read_sysreg(hcr_el2) & HCR_VSE; 408 } 409 } 410 411 static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) 412 { 413 return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); 414 } 415 416 static inline bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code) 417 { 418 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); 419 arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2); 420 write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); 421 422 /* 423 * Finish potential single step before executing the prologue 424 * instruction. 425 */ 426 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; 427 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR); 428 429 return true; 430 } 431 432 static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) 433 { 434 /* 435 * The vCPU's saved SVE state layout always matches the max VL of the 436 * vCPU. Start off with the max VL so we can load the SVE state. 437 */ 438 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); 439 __sve_restore_state(vcpu_sve_pffr(vcpu), 440 &vcpu->arch.ctxt.fp_regs.fpsr, 441 true); 442 443 /* 444 * The effective VL for a VM could differ from the max VL when running a 445 * nested guest, as the guest hypervisor could select a smaller VL. Slap 446 * that into hardware before wrapping up. 447 */ 448 if (is_nested_ctxt(vcpu)) 449 sve_cond_update_zcr_vq(__vcpu_sys_reg(vcpu, ZCR_EL2), SYS_ZCR_EL2); 450 451 write_sysreg_el1(__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)), SYS_ZCR); 452 } 453 454 static inline void __hyp_sve_save_host(void) 455 { 456 struct cpu_sve_state *sve_state = *host_data_ptr(sve_state); 457 458 sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR); 459 write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2); 460 __sve_save_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl), 461 &sve_state->fpsr, 462 true); 463 } 464 465 static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu) 466 { 467 u64 zcr_el1, zcr_el2; 468 469 if (!guest_owns_fp_regs()) 470 return; 471 472 if (vcpu_has_sve(vcpu)) { 473 /* A guest hypervisor may restrict the effective max VL. */ 474 if (is_nested_ctxt(vcpu)) 475 zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2); 476 else 477 zcr_el2 = vcpu_sve_max_vq(vcpu) - 1; 478 479 write_sysreg_el2(zcr_el2, SYS_ZCR); 480 481 zcr_el1 = __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)); 482 write_sysreg_el1(zcr_el1, SYS_ZCR); 483 } 484 } 485 486 static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu) 487 { 488 u64 zcr_el1, zcr_el2; 489 490 if (!guest_owns_fp_regs()) 491 return; 492 493 /* 494 * When the guest owns the FP regs, we know that guest+hyp traps for 495 * any FPSIMD/SVE/SME features exposed to the guest have been disabled 496 * by either __activate_cptr_traps() or kvm_hyp_handle_fpsimd() 497 * prior to __guest_entry(). As __guest_entry() guarantees a context 498 * synchronization event, we don't need an ISB here to avoid taking 499 * traps for anything that was exposed to the guest. 500 */ 501 if (vcpu_has_sve(vcpu)) { 502 zcr_el1 = read_sysreg_el1(SYS_ZCR); 503 __vcpu_assign_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu), zcr_el1); 504 505 /* 506 * The guest's state is always saved using the guest's max VL. 507 * Ensure that the host has the guest's max VL active such that 508 * the host can save the guest's state lazily, but don't 509 * artificially restrict the host to the guest's max VL. 510 */ 511 if (has_vhe()) { 512 zcr_el2 = vcpu_sve_max_vq(vcpu) - 1; 513 write_sysreg_el2(zcr_el2, SYS_ZCR); 514 } else { 515 zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1; 516 write_sysreg_el2(zcr_el2, SYS_ZCR); 517 518 zcr_el1 = vcpu_sve_max_vq(vcpu) - 1; 519 write_sysreg_el1(zcr_el1, SYS_ZCR); 520 } 521 } 522 } 523 524 static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu) 525 { 526 /* 527 * Non-protected kvm relies on the host restoring its sve state. 528 * Protected kvm restores the host's sve state as not to reveal that 529 * fpsimd was used by a guest nor leak upper sve bits. 530 */ 531 if (system_supports_sve()) { 532 __hyp_sve_save_host(); 533 } else { 534 __fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs)); 535 } 536 537 if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) 538 *host_data_ptr(fpmr) = read_sysreg_s(SYS_FPMR); 539 } 540 541 542 /* 543 * We trap the first access to the FP/SIMD to save the host context and 544 * restore the guest context lazily. 545 * If FP/SIMD is not implemented, handle the trap and inject an undefined 546 * instruction exception to the guest. Similarly for trapped SVE accesses. 547 */ 548 static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) 549 { 550 bool sve_guest; 551 u8 esr_ec; 552 553 if (!system_supports_fpsimd()) 554 return false; 555 556 sve_guest = vcpu_has_sve(vcpu); 557 esr_ec = kvm_vcpu_trap_get_class(vcpu); 558 559 /* Only handle traps the vCPU can support here: */ 560 switch (esr_ec) { 561 case ESR_ELx_EC_FP_ASIMD: 562 /* Forward traps to the guest hypervisor as required */ 563 if (guest_hyp_fpsimd_traps_enabled(vcpu)) 564 return false; 565 break; 566 case ESR_ELx_EC_SYS64: 567 if (WARN_ON_ONCE(!is_hyp_ctxt(vcpu))) 568 return false; 569 fallthrough; 570 case ESR_ELx_EC_SVE: 571 if (!sve_guest) 572 return false; 573 if (guest_hyp_sve_traps_enabled(vcpu)) 574 return false; 575 break; 576 default: 577 return false; 578 } 579 580 /* Valid trap. Switch the context: */ 581 582 /* First disable enough traps to allow us to update the registers */ 583 __deactivate_cptr_traps(vcpu); 584 isb(); 585 586 /* Write out the host state if it's in the registers */ 587 if (is_protected_kvm_enabled() && host_owns_fp_regs()) 588 kvm_hyp_save_fpsimd_host(vcpu); 589 590 /* Restore the guest state */ 591 if (sve_guest) 592 __hyp_sve_restore_guest(vcpu); 593 else 594 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs); 595 596 if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) 597 write_sysreg_s(__vcpu_sys_reg(vcpu, FPMR), SYS_FPMR); 598 599 /* Skip restoring fpexc32 for AArch64 guests */ 600 if (!(read_sysreg(hcr_el2) & HCR_RW)) 601 write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2); 602 603 *host_data_ptr(fp_owner) = FP_STATE_GUEST_OWNED; 604 605 /* 606 * Re-enable traps necessary for the current state of the guest, e.g. 607 * those enabled by a guest hypervisor. The ERET to the guest will 608 * provide the necessary context synchronization. 609 */ 610 __activate_cptr_traps(vcpu); 611 612 return true; 613 } 614 615 static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu) 616 { 617 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); 618 int rt = kvm_vcpu_sys_get_rt(vcpu); 619 u64 val = vcpu_get_reg(vcpu, rt); 620 621 /* 622 * The normal sysreg handling code expects to see the traps, 623 * let's not do anything here. 624 */ 625 if (vcpu->arch.hcr_el2 & HCR_TVM) 626 return false; 627 628 switch (sysreg) { 629 case SYS_SCTLR_EL1: 630 write_sysreg_el1(val, SYS_SCTLR); 631 break; 632 case SYS_TTBR0_EL1: 633 write_sysreg_el1(val, SYS_TTBR0); 634 break; 635 case SYS_TTBR1_EL1: 636 write_sysreg_el1(val, SYS_TTBR1); 637 break; 638 case SYS_TCR_EL1: 639 write_sysreg_el1(val, SYS_TCR); 640 break; 641 case SYS_ESR_EL1: 642 write_sysreg_el1(val, SYS_ESR); 643 break; 644 case SYS_FAR_EL1: 645 write_sysreg_el1(val, SYS_FAR); 646 break; 647 case SYS_AFSR0_EL1: 648 write_sysreg_el1(val, SYS_AFSR0); 649 break; 650 case SYS_AFSR1_EL1: 651 write_sysreg_el1(val, SYS_AFSR1); 652 break; 653 case SYS_MAIR_EL1: 654 write_sysreg_el1(val, SYS_MAIR); 655 break; 656 case SYS_AMAIR_EL1: 657 write_sysreg_el1(val, SYS_AMAIR); 658 break; 659 case SYS_CONTEXTIDR_EL1: 660 write_sysreg_el1(val, SYS_CONTEXTIDR); 661 break; 662 default: 663 return false; 664 } 665 666 __kvm_skip_instr(vcpu); 667 return true; 668 } 669 670 /* Open-coded version of timer_get_offset() to allow for kern_hyp_va() */ 671 static inline u64 hyp_timer_get_offset(struct arch_timer_context *ctxt) 672 { 673 u64 offset = 0; 674 675 if (ctxt->offset.vm_offset) 676 offset += *kern_hyp_va(ctxt->offset.vm_offset); 677 if (ctxt->offset.vcpu_offset) 678 offset += *kern_hyp_va(ctxt->offset.vcpu_offset); 679 680 return offset; 681 } 682 683 static inline u64 compute_counter_value(struct arch_timer_context *ctxt) 684 { 685 return arch_timer_read_cntpct_el0() - hyp_timer_get_offset(ctxt); 686 } 687 688 static bool kvm_handle_cntxct(struct kvm_vcpu *vcpu) 689 { 690 struct arch_timer_context *ctxt; 691 u32 sysreg; 692 u64 val; 693 694 /* 695 * We only get here for 64bit guests, 32bit guests will hit 696 * the long and winding road all the way to the standard 697 * handling. Yes, it sucks to be irrelevant. 698 * 699 * Also, we only deal with non-hypervisor context here (either 700 * an EL1 guest, or a non-HYP context of an EL2 guest). 701 */ 702 if (is_hyp_ctxt(vcpu)) 703 return false; 704 705 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); 706 707 switch (sysreg) { 708 case SYS_CNTPCT_EL0: 709 case SYS_CNTPCTSS_EL0: 710 if (vcpu_has_nv(vcpu)) { 711 /* Check for guest hypervisor trapping */ 712 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2); 713 if (!vcpu_el2_e2h_is_set(vcpu)) 714 val = (val & CNTHCTL_EL1PCTEN) << 10; 715 716 if (!(val & (CNTHCTL_EL1PCTEN << 10))) 717 return false; 718 } 719 720 ctxt = vcpu_ptimer(vcpu); 721 break; 722 case SYS_CNTVCT_EL0: 723 case SYS_CNTVCTSS_EL0: 724 if (vcpu_has_nv(vcpu)) { 725 /* Check for guest hypervisor trapping */ 726 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2); 727 728 if (val & CNTHCTL_EL1TVCT) 729 return false; 730 } 731 732 ctxt = vcpu_vtimer(vcpu); 733 break; 734 default: 735 return false; 736 } 737 738 val = compute_counter_value(ctxt); 739 740 vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val); 741 __kvm_skip_instr(vcpu); 742 return true; 743 } 744 745 static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu) 746 { 747 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); 748 int rt = kvm_vcpu_sys_get_rt(vcpu); 749 u64 val = vcpu_get_reg(vcpu, rt); 750 751 if (sysreg != SYS_TCR_EL1) 752 return false; 753 754 /* 755 * Affected parts do not advertise support for hardware Access Flag / 756 * Dirty state management in ID_AA64MMFR1_EL1.HAFDBS, but the underlying 757 * control bits are still functional. The architecture requires these be 758 * RES0 on systems that do not implement FEAT_HAFDBS. 759 * 760 * Uphold the requirements of the architecture by masking guest writes 761 * to TCR_EL1.{HA,HD} here. 762 */ 763 val &= ~(TCR_HD | TCR_HA); 764 write_sysreg_el1(val, SYS_TCR); 765 __kvm_skip_instr(vcpu); 766 return true; 767 } 768 769 static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) 770 { 771 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && 772 handle_tx2_tvm(vcpu)) 773 return true; 774 775 if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) && 776 handle_ampere1_tcr(vcpu)) 777 return true; 778 779 if (static_branch_unlikely(&vgic_v3_cpuif_trap) && 780 __vgic_v3_perform_cpuif_access(vcpu) == 1) 781 return true; 782 783 if (kvm_handle_cntxct(vcpu)) 784 return true; 785 786 return false; 787 } 788 789 static inline bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code) 790 { 791 if (static_branch_unlikely(&vgic_v3_cpuif_trap) && 792 __vgic_v3_perform_cpuif_access(vcpu) == 1) 793 return true; 794 795 return false; 796 } 797 798 static inline bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, 799 u64 *exit_code) 800 { 801 if (!__populate_fault_info(vcpu)) 802 return true; 803 804 return false; 805 } 806 #define kvm_hyp_handle_iabt_low kvm_hyp_handle_memory_fault 807 #define kvm_hyp_handle_watchpt_low kvm_hyp_handle_memory_fault 808 809 static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) 810 { 811 if (kvm_hyp_handle_memory_fault(vcpu, exit_code)) 812 return true; 813 814 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) { 815 bool valid; 816 817 valid = kvm_vcpu_trap_is_translation_fault(vcpu) && 818 kvm_vcpu_dabt_isvalid(vcpu) && 819 !kvm_vcpu_abt_issea(vcpu) && 820 !kvm_vcpu_abt_iss1tw(vcpu); 821 822 if (valid) { 823 int ret = __vgic_v2_perform_cpuif_access(vcpu); 824 825 if (ret == 1) 826 return true; 827 828 /* Promote an illegal access to an SError.*/ 829 if (ret == -1) 830 *exit_code = ARM_EXCEPTION_EL1_SERROR; 831 } 832 } 833 834 return false; 835 } 836 837 typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *); 838 839 /* 840 * Allow the hypervisor to handle the exit with an exit handler if it has one. 841 * 842 * Returns true if the hypervisor handled the exit, and control should go back 843 * to the guest, or false if it hasn't. 844 */ 845 static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code, 846 const exit_handler_fn *handlers) 847 { 848 exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)]; 849 if (fn) 850 return fn(vcpu, exit_code); 851 852 return false; 853 } 854 855 static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu) 856 { 857 /* 858 * Check for the conditions of Cortex-A510's #2077057. When these occur 859 * SPSR_EL2 can't be trusted, but isn't needed either as it is 860 * unchanged from the value in vcpu_gp_regs(vcpu)->pstate. 861 * Are we single-stepping the guest, and took a PAC exception from the 862 * active-not-pending state? 863 */ 864 if (cpus_have_final_cap(ARM64_WORKAROUND_2077057) && 865 vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && 866 *vcpu_cpsr(vcpu) & DBG_SPSR_SS && 867 ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC) 868 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR); 869 870 vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR); 871 } 872 873 /* 874 * Return true when we were able to fixup the guest exit and should return to 875 * the guest, false when we should restore the host state and return to the 876 * main run loop. 877 */ 878 static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code, 879 const exit_handler_fn *handlers) 880 { 881 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) 882 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR); 883 884 if (ARM_SERROR_PENDING(*exit_code) && 885 ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) { 886 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu); 887 888 /* 889 * HVC already have an adjusted PC, which we need to 890 * correct in order to return to after having injected 891 * the SError. 892 * 893 * SMC, on the other hand, is *trapped*, meaning its 894 * preferred return address is the SMC itself. 895 */ 896 if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64) 897 write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR); 898 } 899 900 /* 901 * We're using the raw exception code in order to only process 902 * the trap if no SError is pending. We will come back to the 903 * same PC once the SError has been injected, and replay the 904 * trapping instruction. 905 */ 906 if (*exit_code != ARM_EXCEPTION_TRAP) 907 goto exit; 908 909 /* Check if there's an exit handler and allow it to handle the exit. */ 910 if (kvm_hyp_handle_exit(vcpu, exit_code, handlers)) 911 goto guest; 912 exit: 913 /* Return to the host kernel and handle the exit */ 914 return false; 915 916 guest: 917 /* Re-enter the guest */ 918 asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); 919 return true; 920 } 921 922 static inline void __kvm_unexpected_el2_exception(void) 923 { 924 extern char __guest_exit_restore_elr_and_panic[]; 925 unsigned long addr, fixup; 926 struct kvm_exception_table_entry *entry, *end; 927 unsigned long elr_el2 = read_sysreg(elr_el2); 928 929 entry = &__start___kvm_ex_table; 930 end = &__stop___kvm_ex_table; 931 932 while (entry < end) { 933 addr = (unsigned long)&entry->insn + entry->insn; 934 fixup = (unsigned long)&entry->fixup + entry->fixup; 935 936 if (addr != elr_el2) { 937 entry++; 938 continue; 939 } 940 941 write_sysreg(fixup, elr_el2); 942 return; 943 } 944 945 /* Trigger a panic after restoring the hyp context. */ 946 this_cpu_ptr(&kvm_hyp_ctxt)->sys_regs[ELR_EL2] = elr_el2; 947 write_sysreg(__guest_exit_restore_elr_and_panic, elr_el2); 948 } 949 950 #endif /* __ARM64_KVM_HYP_SWITCH_H__ */ 951