1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/kvm/reset.c 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9 */ 10 11 #include <linux/errno.h> 12 #include <linux/kernel.h> 13 #include <linux/kvm_host.h> 14 #include <linux/kvm.h> 15 #include <linux/hw_breakpoint.h> 16 #include <linux/slab.h> 17 #include <linux/string.h> 18 #include <linux/types.h> 19 20 #include <kvm/arm_arch_timer.h> 21 22 #include <asm/cpufeature.h> 23 #include <asm/cputype.h> 24 #include <asm/fpsimd.h> 25 #include <asm/ptrace.h> 26 #include <asm/kvm_arm.h> 27 #include <asm/kvm_asm.h> 28 #include <asm/kvm_emulate.h> 29 #include <asm/kvm_mmu.h> 30 #include <asm/virt.h> 31 32 /* Maximum phys_shift supported for any VM on this host */ 33 static u32 kvm_ipa_limit; 34 35 /* 36 * ARMv8 Reset Values 37 */ 38 #define VCPU_RESET_PSTATE_EL1 (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \ 39 PSR_F_BIT | PSR_D_BIT) 40 41 #define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \ 42 PSR_AA32_I_BIT | PSR_AA32_F_BIT) 43 44 unsigned int kvm_sve_max_vl; 45 46 int kvm_arm_init_sve(void) 47 { 48 if (system_supports_sve()) { 49 kvm_sve_max_vl = sve_max_virtualisable_vl; 50 51 /* 52 * The get_sve_reg()/set_sve_reg() ioctl interface will need 53 * to be extended with multiple register slice support in 54 * order to support vector lengths greater than 55 * SVE_VL_ARCH_MAX: 56 */ 57 if (WARN_ON(kvm_sve_max_vl > SVE_VL_ARCH_MAX)) 58 kvm_sve_max_vl = SVE_VL_ARCH_MAX; 59 60 /* 61 * Don't even try to make use of vector lengths that 62 * aren't available on all CPUs, for now: 63 */ 64 if (kvm_sve_max_vl < sve_max_vl) 65 pr_warn("KVM: SVE vector length for guests limited to %u bytes\n", 66 kvm_sve_max_vl); 67 } 68 69 return 0; 70 } 71 72 static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) 73 { 74 if (!system_supports_sve()) 75 return -EINVAL; 76 77 vcpu->arch.sve_max_vl = kvm_sve_max_vl; 78 79 /* 80 * Userspace can still customize the vector lengths by writing 81 * KVM_REG_ARM64_SVE_VLS. Allocation is deferred until 82 * kvm_arm_vcpu_finalize(), which freezes the configuration. 83 */ 84 vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE; 85 86 return 0; 87 } 88 89 /* 90 * Finalize vcpu's maximum SVE vector length, allocating 91 * vcpu->arch.sve_state as necessary. 92 */ 93 static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) 94 { 95 void *buf; 96 unsigned int vl; 97 98 vl = vcpu->arch.sve_max_vl; 99 100 /* 101 * Responsibility for these properties is shared between 102 * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and 103 * set_sve_vls(). Double-check here just to be sure: 104 */ 105 if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl || 106 vl > SVE_VL_ARCH_MAX)) 107 return -EIO; 108 109 buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL); 110 if (!buf) 111 return -ENOMEM; 112 113 vcpu->arch.sve_state = buf; 114 vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED; 115 return 0; 116 } 117 118 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) 119 { 120 switch (feature) { 121 case KVM_ARM_VCPU_SVE: 122 if (!vcpu_has_sve(vcpu)) 123 return -EINVAL; 124 125 if (kvm_arm_vcpu_sve_finalized(vcpu)) 126 return -EPERM; 127 128 return kvm_vcpu_finalize_sve(vcpu); 129 } 130 131 return -EINVAL; 132 } 133 134 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) 135 { 136 if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu)) 137 return false; 138 139 return true; 140 } 141 142 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) 143 { 144 kfree(vcpu->arch.sve_state); 145 } 146 147 static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) 148 { 149 if (vcpu_has_sve(vcpu)) 150 memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); 151 } 152 153 static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) 154 { 155 /* 156 * For now make sure that both address/generic pointer authentication 157 * features are requested by the userspace together and the system 158 * supports these capabilities. 159 */ 160 if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || 161 !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) || 162 !system_has_full_ptr_auth()) 163 return -EINVAL; 164 165 vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH; 166 return 0; 167 } 168 169 static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu) 170 { 171 struct kvm_vcpu *tmp; 172 bool is32bit; 173 int i; 174 175 is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT); 176 if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit) 177 return false; 178 179 /* MTE is incompatible with AArch32 */ 180 if (kvm_has_mte(vcpu->kvm) && is32bit) 181 return false; 182 183 /* Check that the vcpus are either all 32bit or all 64bit */ 184 kvm_for_each_vcpu(i, tmp, vcpu->kvm) { 185 if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit) 186 return false; 187 } 188 189 return true; 190 } 191 192 /** 193 * kvm_reset_vcpu - sets core registers and sys_regs to reset value 194 * @vcpu: The VCPU pointer 195 * 196 * This function finds the right table above and sets the registers on 197 * the virtual CPU struct to their architecturally defined reset 198 * values, except for registers whose reset is deferred until 199 * kvm_arm_vcpu_finalize(). 200 * 201 * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT 202 * ioctl or as part of handling a request issued by another VCPU in the PSCI 203 * handling code. In the first case, the VCPU will not be loaded, and in the 204 * second case the VCPU will be loaded. Because this function operates purely 205 * on the memory-backed values of system registers, we want to do a full put if 206 * we were loaded (handling a request) and load the values back at the end of 207 * the function. Otherwise we leave the state alone. In both cases, we 208 * disable preemption around the vcpu reset as we would otherwise race with 209 * preempt notifiers which also call put/load. 210 */ 211 int kvm_reset_vcpu(struct kvm_vcpu *vcpu) 212 { 213 int ret; 214 bool loaded; 215 u32 pstate; 216 217 /* Reset PMU outside of the non-preemptible section */ 218 kvm_pmu_vcpu_reset(vcpu); 219 220 preempt_disable(); 221 loaded = (vcpu->cpu != -1); 222 if (loaded) 223 kvm_arch_vcpu_put(vcpu); 224 225 if (!kvm_arm_vcpu_sve_finalized(vcpu)) { 226 if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) { 227 ret = kvm_vcpu_enable_sve(vcpu); 228 if (ret) 229 goto out; 230 } 231 } else { 232 kvm_vcpu_reset_sve(vcpu); 233 } 234 235 if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || 236 test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { 237 if (kvm_vcpu_enable_ptrauth(vcpu)) { 238 ret = -EINVAL; 239 goto out; 240 } 241 } 242 243 if (!vcpu_allowed_register_width(vcpu)) { 244 ret = -EINVAL; 245 goto out; 246 } 247 248 switch (vcpu->arch.target) { 249 default: 250 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { 251 pstate = VCPU_RESET_PSTATE_SVC; 252 } else { 253 pstate = VCPU_RESET_PSTATE_EL1; 254 } 255 256 if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) { 257 ret = -EINVAL; 258 goto out; 259 } 260 break; 261 } 262 263 /* Reset core registers */ 264 memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu))); 265 memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs)); 266 vcpu->arch.ctxt.spsr_abt = 0; 267 vcpu->arch.ctxt.spsr_und = 0; 268 vcpu->arch.ctxt.spsr_irq = 0; 269 vcpu->arch.ctxt.spsr_fiq = 0; 270 vcpu_gp_regs(vcpu)->pstate = pstate; 271 272 /* Reset system registers */ 273 kvm_reset_sys_regs(vcpu); 274 275 /* 276 * Additional reset state handling that PSCI may have imposed on us. 277 * Must be done after all the sys_reg reset. 278 */ 279 if (vcpu->arch.reset_state.reset) { 280 unsigned long target_pc = vcpu->arch.reset_state.pc; 281 282 /* Gracefully handle Thumb2 entry point */ 283 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { 284 target_pc &= ~1UL; 285 vcpu_set_thumb(vcpu); 286 } 287 288 /* Propagate caller endianness */ 289 if (vcpu->arch.reset_state.be) 290 kvm_vcpu_set_be(vcpu); 291 292 *vcpu_pc(vcpu) = target_pc; 293 vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); 294 295 vcpu->arch.reset_state.reset = false; 296 } 297 298 /* Reset timer */ 299 ret = kvm_timer_vcpu_reset(vcpu); 300 out: 301 if (loaded) 302 kvm_arch_vcpu_load(vcpu, smp_processor_id()); 303 preempt_enable(); 304 return ret; 305 } 306 307 u32 get_kvm_ipa_limit(void) 308 { 309 return kvm_ipa_limit; 310 } 311 312 int kvm_set_ipa_limit(void) 313 { 314 unsigned int parange, tgran_2; 315 u64 mmfr0; 316 317 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 318 parange = cpuid_feature_extract_unsigned_field(mmfr0, 319 ID_AA64MMFR0_PARANGE_SHIFT); 320 321 /* 322 * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at 323 * Stage-2. If not, things will stop very quickly. 324 */ 325 switch (PAGE_SIZE) { 326 default: 327 case SZ_4K: 328 tgran_2 = ID_AA64MMFR0_TGRAN4_2_SHIFT; 329 break; 330 case SZ_16K: 331 tgran_2 = ID_AA64MMFR0_TGRAN16_2_SHIFT; 332 break; 333 case SZ_64K: 334 tgran_2 = ID_AA64MMFR0_TGRAN64_2_SHIFT; 335 break; 336 } 337 338 switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) { 339 case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE: 340 kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n"); 341 return -EINVAL; 342 case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT: 343 kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n"); 344 break; 345 case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX: 346 kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n"); 347 break; 348 default: 349 kvm_err("Unsupported value for TGRAN_2, giving up\n"); 350 return -EINVAL; 351 } 352 353 kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange); 354 kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit, 355 ((kvm_ipa_limit < KVM_PHYS_SHIFT) ? 356 " (Reduced IPA size, limited VM/VMM compatibility)" : "")); 357 358 return 0; 359 } 360 361 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) 362 { 363 u64 mmfr0, mmfr1; 364 u32 phys_shift; 365 366 if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) 367 return -EINVAL; 368 369 phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); 370 if (phys_shift) { 371 if (phys_shift > kvm_ipa_limit || 372 phys_shift < 32) 373 return -EINVAL; 374 } else { 375 phys_shift = KVM_PHYS_SHIFT; 376 if (phys_shift > kvm_ipa_limit) { 377 pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n", 378 current->comm); 379 return -EINVAL; 380 } 381 } 382 383 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 384 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); 385 kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift); 386 387 return 0; 388 } 389