1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 4 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 5 */ 6 7 #include <linux/bug.h> 8 #include <linux/cpu_pm.h> 9 #include <linux/errno.h> 10 #include <linux/err.h> 11 #include <linux/kvm_host.h> 12 #include <linux/list.h> 13 #include <linux/module.h> 14 #include <linux/vmalloc.h> 15 #include <linux/fs.h> 16 #include <linux/mman.h> 17 #include <linux/sched.h> 18 #include <linux/kvm.h> 19 #include <linux/kvm_irqfd.h> 20 #include <linux/irqbypass.h> 21 #include <linux/sched/stat.h> 22 #include <linux/psci.h> 23 #include <trace/events/kvm.h> 24 25 #define CREATE_TRACE_POINTS 26 #include "trace_arm.h" 27 28 #include <linux/uaccess.h> 29 #include <asm/ptrace.h> 30 #include <asm/mman.h> 31 #include <asm/tlbflush.h> 32 #include <asm/cacheflush.h> 33 #include <asm/cpufeature.h> 34 #include <asm/virt.h> 35 #include <asm/kvm_arm.h> 36 #include <asm/kvm_asm.h> 37 #include <asm/kvm_emulate.h> 38 #include <asm/kvm_mmu.h> 39 #include <asm/kvm_nested.h> 40 #include <asm/kvm_pkvm.h> 41 #include <asm/kvm_ptrauth.h> 42 #include <asm/sections.h> 43 44 #include <kvm/arm_hypercalls.h> 45 #include <kvm/arm_pmu.h> 46 #include <kvm/arm_psci.h> 47 48 #include "sys_regs.h" 49 50 static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT; 51 52 enum kvm_wfx_trap_policy { 53 KVM_WFX_NOTRAP_SINGLE_TASK, /* Default option */ 54 KVM_WFX_NOTRAP, 55 KVM_WFX_TRAP, 56 }; 57 58 static enum kvm_wfx_trap_policy kvm_wfi_trap_policy __read_mostly = KVM_WFX_NOTRAP_SINGLE_TASK; 59 static enum kvm_wfx_trap_policy kvm_wfe_trap_policy __read_mostly = KVM_WFX_NOTRAP_SINGLE_TASK; 60 61 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); 62 63 DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base); 64 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); 65 66 DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 67 68 static bool vgic_present, kvm_arm_initialised; 69 70 static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); 71 72 bool is_kvm_arm_initialised(void) 73 { 74 return kvm_arm_initialised; 75 } 76 77 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 78 { 79 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 80 } 81 82 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 83 struct kvm_enable_cap *cap) 84 { 85 int r = -EINVAL; 86 87 if (cap->flags) 88 return -EINVAL; 89 90 if (kvm_vm_is_protected(kvm) && !kvm_pvm_ext_allowed(cap->cap)) 91 return -EINVAL; 92 93 switch (cap->cap) { 94 case KVM_CAP_ARM_NISV_TO_USER: 95 r = 0; 96 set_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER, 97 &kvm->arch.flags); 98 break; 99 case KVM_CAP_ARM_MTE: 100 mutex_lock(&kvm->lock); 101 if (system_supports_mte() && !kvm->created_vcpus) { 102 r = 0; 103 set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags); 104 } 105 mutex_unlock(&kvm->lock); 106 break; 107 case KVM_CAP_ARM_SYSTEM_SUSPEND: 108 r = 0; 109 set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags); 110 break; 111 case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE: 112 mutex_lock(&kvm->slots_lock); 113 /* 114 * To keep things simple, allow changing the chunk 115 * size only when no memory slots have been created. 116 */ 117 if (kvm_are_all_memslots_empty(kvm)) { 118 u64 new_cap = cap->args[0]; 119 120 if (!new_cap || kvm_is_block_size_supported(new_cap)) { 121 r = 0; 122 kvm->arch.mmu.split_page_chunk_size = new_cap; 123 } 124 } 125 mutex_unlock(&kvm->slots_lock); 126 break; 127 case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS: 128 mutex_lock(&kvm->lock); 129 if (!kvm->created_vcpus) { 130 r = 0; 131 set_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags); 132 } 133 mutex_unlock(&kvm->lock); 134 break; 135 default: 136 break; 137 } 138 139 return r; 140 } 141 142 static int kvm_arm_default_max_vcpus(void) 143 { 144 return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS; 145 } 146 147 /** 148 * kvm_arch_init_vm - initializes a VM data structure 149 * @kvm: pointer to the KVM struct 150 * @type: kvm device type 151 */ 152 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 153 { 154 int ret; 155 156 mutex_init(&kvm->arch.config_lock); 157 158 #ifdef CONFIG_LOCKDEP 159 /* Clue in lockdep that the config_lock must be taken inside kvm->lock */ 160 mutex_lock(&kvm->lock); 161 mutex_lock(&kvm->arch.config_lock); 162 mutex_unlock(&kvm->arch.config_lock); 163 mutex_unlock(&kvm->lock); 164 #endif 165 166 kvm_init_nested(kvm); 167 168 ret = kvm_share_hyp(kvm, kvm + 1); 169 if (ret) 170 return ret; 171 172 if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL_ACCOUNT)) { 173 ret = -ENOMEM; 174 goto err_unshare_kvm; 175 } 176 cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask); 177 178 ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type); 179 if (ret) 180 goto err_free_cpumask; 181 182 if (is_protected_kvm_enabled()) { 183 /* 184 * If any failures occur after this is successful, make sure to 185 * call __pkvm_unreserve_vm to unreserve the VM in hyp. 186 */ 187 ret = pkvm_init_host_vm(kvm); 188 if (ret) 189 goto err_free_cpumask; 190 } 191 192 kvm_vgic_early_init(kvm); 193 194 kvm_timer_init_vm(kvm); 195 196 /* The maximum number of VCPUs is limited by the host's GIC model */ 197 kvm->max_vcpus = kvm_arm_default_max_vcpus(); 198 199 kvm_arm_init_hypercalls(kvm); 200 201 bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES); 202 203 return 0; 204 205 err_free_cpumask: 206 free_cpumask_var(kvm->arch.supported_cpus); 207 err_unshare_kvm: 208 kvm_unshare_hyp(kvm, kvm + 1); 209 return ret; 210 } 211 212 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 213 { 214 return VM_FAULT_SIGBUS; 215 } 216 217 void kvm_arch_create_vm_debugfs(struct kvm *kvm) 218 { 219 kvm_sys_regs_create_debugfs(kvm); 220 kvm_s2_ptdump_create_debugfs(kvm); 221 } 222 223 static void kvm_destroy_mpidr_data(struct kvm *kvm) 224 { 225 struct kvm_mpidr_data *data; 226 227 mutex_lock(&kvm->arch.config_lock); 228 229 data = rcu_dereference_protected(kvm->arch.mpidr_data, 230 lockdep_is_held(&kvm->arch.config_lock)); 231 if (data) { 232 rcu_assign_pointer(kvm->arch.mpidr_data, NULL); 233 synchronize_rcu(); 234 kfree(data); 235 } 236 237 mutex_unlock(&kvm->arch.config_lock); 238 } 239 240 /** 241 * kvm_arch_destroy_vm - destroy the VM data structure 242 * @kvm: pointer to the KVM struct 243 */ 244 void kvm_arch_destroy_vm(struct kvm *kvm) 245 { 246 bitmap_free(kvm->arch.pmu_filter); 247 free_cpumask_var(kvm->arch.supported_cpus); 248 249 kvm_vgic_destroy(kvm); 250 251 if (is_protected_kvm_enabled()) 252 pkvm_destroy_hyp_vm(kvm); 253 254 kvm_destroy_mpidr_data(kvm); 255 256 kfree(kvm->arch.sysreg_masks); 257 kvm_destroy_vcpus(kvm); 258 259 kvm_unshare_hyp(kvm, kvm + 1); 260 261 kvm_arm_teardown_hypercalls(kvm); 262 } 263 264 static bool kvm_has_full_ptr_auth(void) 265 { 266 bool apa, gpa, api, gpi, apa3, gpa3; 267 u64 isar1, isar2, val; 268 269 /* 270 * Check that: 271 * 272 * - both Address and Generic auth are implemented for a given 273 * algorithm (Q5, IMPDEF or Q3) 274 * - only a single algorithm is implemented. 275 */ 276 if (!system_has_full_ptr_auth()) 277 return false; 278 279 isar1 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1); 280 isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); 281 282 apa = !!FIELD_GET(ID_AA64ISAR1_EL1_APA_MASK, isar1); 283 val = FIELD_GET(ID_AA64ISAR1_EL1_GPA_MASK, isar1); 284 gpa = (val == ID_AA64ISAR1_EL1_GPA_IMP); 285 286 api = !!FIELD_GET(ID_AA64ISAR1_EL1_API_MASK, isar1); 287 val = FIELD_GET(ID_AA64ISAR1_EL1_GPI_MASK, isar1); 288 gpi = (val == ID_AA64ISAR1_EL1_GPI_IMP); 289 290 apa3 = !!FIELD_GET(ID_AA64ISAR2_EL1_APA3_MASK, isar2); 291 val = FIELD_GET(ID_AA64ISAR2_EL1_GPA3_MASK, isar2); 292 gpa3 = (val == ID_AA64ISAR2_EL1_GPA3_IMP); 293 294 return (apa == gpa && api == gpi && apa3 == gpa3 && 295 (apa + api + apa3) == 1); 296 } 297 298 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 299 { 300 int r; 301 302 if (kvm && kvm_vm_is_protected(kvm) && !kvm_pvm_ext_allowed(ext)) 303 return 0; 304 305 switch (ext) { 306 case KVM_CAP_IRQCHIP: 307 r = vgic_present; 308 break; 309 case KVM_CAP_IOEVENTFD: 310 case KVM_CAP_USER_MEMORY: 311 case KVM_CAP_SYNC_MMU: 312 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 313 case KVM_CAP_ONE_REG: 314 case KVM_CAP_ARM_PSCI: 315 case KVM_CAP_ARM_PSCI_0_2: 316 case KVM_CAP_READONLY_MEM: 317 case KVM_CAP_MP_STATE: 318 case KVM_CAP_IMMEDIATE_EXIT: 319 case KVM_CAP_VCPU_EVENTS: 320 case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2: 321 case KVM_CAP_ARM_NISV_TO_USER: 322 case KVM_CAP_ARM_INJECT_EXT_DABT: 323 case KVM_CAP_SET_GUEST_DEBUG: 324 case KVM_CAP_VCPU_ATTRIBUTES: 325 case KVM_CAP_PTP_KVM: 326 case KVM_CAP_ARM_SYSTEM_SUSPEND: 327 case KVM_CAP_IRQFD_RESAMPLE: 328 case KVM_CAP_COUNTER_OFFSET: 329 case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS: 330 r = 1; 331 break; 332 case KVM_CAP_SET_GUEST_DEBUG2: 333 return KVM_GUESTDBG_VALID_MASK; 334 case KVM_CAP_ARM_SET_DEVICE_ADDR: 335 r = 1; 336 break; 337 case KVM_CAP_NR_VCPUS: 338 /* 339 * ARM64 treats KVM_CAP_NR_CPUS differently from all other 340 * architectures, as it does not always bound it to 341 * KVM_CAP_MAX_VCPUS. It should not matter much because 342 * this is just an advisory value. 343 */ 344 r = min_t(unsigned int, num_online_cpus(), 345 kvm_arm_default_max_vcpus()); 346 break; 347 case KVM_CAP_MAX_VCPUS: 348 case KVM_CAP_MAX_VCPU_ID: 349 if (kvm) 350 r = kvm->max_vcpus; 351 else 352 r = kvm_arm_default_max_vcpus(); 353 break; 354 case KVM_CAP_MSI_DEVID: 355 if (!kvm) 356 r = -EINVAL; 357 else 358 r = kvm->arch.vgic.msis_require_devid; 359 break; 360 case KVM_CAP_ARM_USER_IRQ: 361 /* 362 * 1: EL1_VTIMER, EL1_PTIMER, and PMU. 363 * (bump this number if adding more devices) 364 */ 365 r = 1; 366 break; 367 case KVM_CAP_ARM_MTE: 368 r = system_supports_mte(); 369 break; 370 case KVM_CAP_STEAL_TIME: 371 r = kvm_arm_pvtime_supported(); 372 break; 373 case KVM_CAP_ARM_EL1_32BIT: 374 r = cpus_have_final_cap(ARM64_HAS_32BIT_EL1); 375 break; 376 case KVM_CAP_ARM_EL2: 377 r = cpus_have_final_cap(ARM64_HAS_NESTED_VIRT); 378 break; 379 case KVM_CAP_ARM_EL2_E2H0: 380 r = cpus_have_final_cap(ARM64_HAS_HCR_NV1); 381 break; 382 case KVM_CAP_GUEST_DEBUG_HW_BPS: 383 r = get_num_brps(); 384 break; 385 case KVM_CAP_GUEST_DEBUG_HW_WPS: 386 r = get_num_wrps(); 387 break; 388 case KVM_CAP_ARM_PMU_V3: 389 r = kvm_supports_guest_pmuv3(); 390 break; 391 case KVM_CAP_ARM_INJECT_SERROR_ESR: 392 r = cpus_have_final_cap(ARM64_HAS_RAS_EXTN); 393 break; 394 case KVM_CAP_ARM_VM_IPA_SIZE: 395 r = get_kvm_ipa_limit(); 396 break; 397 case KVM_CAP_ARM_SVE: 398 r = system_supports_sve(); 399 break; 400 case KVM_CAP_ARM_PTRAUTH_ADDRESS: 401 case KVM_CAP_ARM_PTRAUTH_GENERIC: 402 r = kvm_has_full_ptr_auth(); 403 break; 404 case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE: 405 if (kvm) 406 r = kvm->arch.mmu.split_page_chunk_size; 407 else 408 r = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT; 409 break; 410 case KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES: 411 r = kvm_supported_block_sizes(); 412 break; 413 case KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES: 414 r = BIT(0); 415 break; 416 case KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED: 417 if (!kvm) 418 r = -EINVAL; 419 else 420 r = kvm_supports_cacheable_pfnmap(); 421 break; 422 423 default: 424 r = 0; 425 } 426 427 return r; 428 } 429 430 long kvm_arch_dev_ioctl(struct file *filp, 431 unsigned int ioctl, unsigned long arg) 432 { 433 return -EINVAL; 434 } 435 436 struct kvm *kvm_arch_alloc_vm(void) 437 { 438 size_t sz = sizeof(struct kvm); 439 440 if (!has_vhe()) 441 return kzalloc(sz, GFP_KERNEL_ACCOUNT); 442 443 return __vmalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM | __GFP_ZERO); 444 } 445 446 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) 447 { 448 if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) 449 return -EBUSY; 450 451 if (id >= kvm->max_vcpus) 452 return -EINVAL; 453 454 return 0; 455 } 456 457 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) 458 { 459 int err; 460 461 spin_lock_init(&vcpu->arch.mp_state_lock); 462 463 #ifdef CONFIG_LOCKDEP 464 /* Inform lockdep that the config_lock is acquired after vcpu->mutex */ 465 mutex_lock(&vcpu->mutex); 466 mutex_lock(&vcpu->kvm->arch.config_lock); 467 mutex_unlock(&vcpu->kvm->arch.config_lock); 468 mutex_unlock(&vcpu->mutex); 469 #endif 470 471 /* Force users to call KVM_ARM_VCPU_INIT */ 472 vcpu_clear_flag(vcpu, VCPU_INITIALIZED); 473 474 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; 475 476 /* Set up the timer */ 477 kvm_timer_vcpu_init(vcpu); 478 479 kvm_pmu_vcpu_init(vcpu); 480 481 kvm_arm_pvtime_vcpu_init(&vcpu->arch); 482 483 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; 484 485 /* 486 * This vCPU may have been created after mpidr_data was initialized. 487 * Throw out the pre-computed mappings if that is the case which forces 488 * KVM to fall back to iteratively searching the vCPUs. 489 */ 490 kvm_destroy_mpidr_data(vcpu->kvm); 491 492 err = kvm_vgic_vcpu_init(vcpu); 493 if (err) 494 return err; 495 496 err = kvm_share_hyp(vcpu, vcpu + 1); 497 if (err) 498 kvm_vgic_vcpu_destroy(vcpu); 499 500 return err; 501 } 502 503 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 504 { 505 } 506 507 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 508 { 509 if (!is_protected_kvm_enabled()) 510 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); 511 else 512 free_hyp_memcache(&vcpu->arch.pkvm_memcache); 513 kvm_timer_vcpu_terminate(vcpu); 514 kvm_pmu_vcpu_destroy(vcpu); 515 kvm_vgic_vcpu_destroy(vcpu); 516 kvm_arm_vcpu_destroy(vcpu); 517 } 518 519 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) 520 { 521 522 } 523 524 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) 525 { 526 527 } 528 529 static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu) 530 { 531 if (vcpu_has_ptrauth(vcpu) && !is_protected_kvm_enabled()) { 532 /* 533 * Either we're running an L2 guest, and the API/APK bits come 534 * from L1's HCR_EL2, or API/APK are both set. 535 */ 536 if (unlikely(is_nested_ctxt(vcpu))) { 537 u64 val; 538 539 val = __vcpu_sys_reg(vcpu, HCR_EL2); 540 val &= (HCR_API | HCR_APK); 541 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); 542 vcpu->arch.hcr_el2 |= val; 543 } else { 544 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); 545 } 546 547 /* 548 * Save the host keys if there is any chance for the guest 549 * to use pauth, as the entry code will reload the guest 550 * keys in that case. 551 */ 552 if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) { 553 struct kvm_cpu_context *ctxt; 554 555 ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt); 556 ptrauth_save_keys(ctxt); 557 } 558 } 559 } 560 561 static bool kvm_vcpu_should_clear_twi(struct kvm_vcpu *vcpu) 562 { 563 if (unlikely(kvm_wfi_trap_policy != KVM_WFX_NOTRAP_SINGLE_TASK)) 564 return kvm_wfi_trap_policy == KVM_WFX_NOTRAP; 565 566 return single_task_running() && 567 (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) || 568 vcpu->kvm->arch.vgic.nassgireq); 569 } 570 571 static bool kvm_vcpu_should_clear_twe(struct kvm_vcpu *vcpu) 572 { 573 if (unlikely(kvm_wfe_trap_policy != KVM_WFX_NOTRAP_SINGLE_TASK)) 574 return kvm_wfe_trap_policy == KVM_WFX_NOTRAP; 575 576 return single_task_running(); 577 } 578 579 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 580 { 581 struct kvm_s2_mmu *mmu; 582 int *last_ran; 583 584 if (is_protected_kvm_enabled()) 585 goto nommu; 586 587 if (vcpu_has_nv(vcpu)) 588 kvm_vcpu_load_hw_mmu(vcpu); 589 590 mmu = vcpu->arch.hw_mmu; 591 last_ran = this_cpu_ptr(mmu->last_vcpu_ran); 592 593 /* 594 * Ensure a VMID is allocated for the MMU before programming VTTBR_EL2, 595 * which happens eagerly in VHE. 596 * 597 * Also, the VMID allocator only preserves VMIDs that are active at the 598 * time of rollover, so KVM might need to grab a new VMID for the MMU if 599 * this is called from kvm_sched_in(). 600 */ 601 kvm_arm_vmid_update(&mmu->vmid); 602 603 /* 604 * We guarantee that both TLBs and I-cache are private to each 605 * vcpu. If detecting that a vcpu from the same VM has 606 * previously run on the same physical CPU, call into the 607 * hypervisor code to nuke the relevant contexts. 608 * 609 * We might get preempted before the vCPU actually runs, but 610 * over-invalidation doesn't affect correctness. 611 */ 612 if (*last_ran != vcpu->vcpu_idx) { 613 kvm_call_hyp(__kvm_flush_cpu_context, mmu); 614 *last_ran = vcpu->vcpu_idx; 615 } 616 617 nommu: 618 vcpu->cpu = cpu; 619 620 /* 621 * The timer must be loaded before the vgic to correctly set up physical 622 * interrupt deactivation in nested state (e.g. timer interrupt). 623 */ 624 kvm_timer_vcpu_load(vcpu); 625 kvm_vgic_load(vcpu); 626 kvm_vcpu_load_debug(vcpu); 627 if (has_vhe()) 628 kvm_vcpu_load_vhe(vcpu); 629 kvm_arch_vcpu_load_fp(vcpu); 630 kvm_vcpu_pmu_restore_guest(vcpu); 631 if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) 632 kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); 633 634 if (kvm_vcpu_should_clear_twe(vcpu)) 635 vcpu->arch.hcr_el2 &= ~HCR_TWE; 636 else 637 vcpu->arch.hcr_el2 |= HCR_TWE; 638 639 if (kvm_vcpu_should_clear_twi(vcpu)) 640 vcpu->arch.hcr_el2 &= ~HCR_TWI; 641 else 642 vcpu->arch.hcr_el2 |= HCR_TWI; 643 644 vcpu_set_pauth_traps(vcpu); 645 kvm_vcpu_load_fgt(vcpu); 646 647 if (is_protected_kvm_enabled()) { 648 kvm_call_hyp_nvhe(__pkvm_vcpu_load, 649 vcpu->kvm->arch.pkvm.handle, 650 vcpu->vcpu_idx, vcpu->arch.hcr_el2); 651 kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, 652 &vcpu->arch.vgic_cpu.vgic_v3); 653 } 654 655 if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus)) 656 vcpu_set_on_unsupported_cpu(vcpu); 657 } 658 659 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 660 { 661 if (is_protected_kvm_enabled()) { 662 kvm_call_hyp(__vgic_v3_save_vmcr_aprs, 663 &vcpu->arch.vgic_cpu.vgic_v3); 664 kvm_call_hyp_nvhe(__pkvm_vcpu_put); 665 } 666 667 kvm_vcpu_put_debug(vcpu); 668 kvm_arch_vcpu_put_fp(vcpu); 669 if (has_vhe()) 670 kvm_vcpu_put_vhe(vcpu); 671 kvm_timer_vcpu_put(vcpu); 672 kvm_vgic_put(vcpu); 673 kvm_vcpu_pmu_restore_host(vcpu); 674 if (vcpu_has_nv(vcpu)) 675 kvm_vcpu_put_hw_mmu(vcpu); 676 kvm_arm_vmid_clear_active(); 677 678 vcpu_clear_on_unsupported_cpu(vcpu); 679 vcpu->cpu = -1; 680 } 681 682 static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu) 683 { 684 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); 685 kvm_make_request(KVM_REQ_SLEEP, vcpu); 686 kvm_vcpu_kick(vcpu); 687 } 688 689 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu) 690 { 691 spin_lock(&vcpu->arch.mp_state_lock); 692 __kvm_arm_vcpu_power_off(vcpu); 693 spin_unlock(&vcpu->arch.mp_state_lock); 694 } 695 696 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu) 697 { 698 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED; 699 } 700 701 static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu) 702 { 703 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED); 704 kvm_make_request(KVM_REQ_SUSPEND, vcpu); 705 kvm_vcpu_kick(vcpu); 706 } 707 708 static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu) 709 { 710 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED; 711 } 712 713 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 714 struct kvm_mp_state *mp_state) 715 { 716 *mp_state = READ_ONCE(vcpu->arch.mp_state); 717 718 return 0; 719 } 720 721 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 722 struct kvm_mp_state *mp_state) 723 { 724 int ret = 0; 725 726 spin_lock(&vcpu->arch.mp_state_lock); 727 728 switch (mp_state->mp_state) { 729 case KVM_MP_STATE_RUNNABLE: 730 WRITE_ONCE(vcpu->arch.mp_state, *mp_state); 731 break; 732 case KVM_MP_STATE_STOPPED: 733 __kvm_arm_vcpu_power_off(vcpu); 734 break; 735 case KVM_MP_STATE_SUSPENDED: 736 kvm_arm_vcpu_suspend(vcpu); 737 break; 738 default: 739 ret = -EINVAL; 740 } 741 742 spin_unlock(&vcpu->arch.mp_state_lock); 743 744 return ret; 745 } 746 747 /** 748 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled 749 * @v: The VCPU pointer 750 * 751 * If the guest CPU is not waiting for interrupts or an interrupt line is 752 * asserted, the CPU is by definition runnable. 753 */ 754 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 755 { 756 bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF | HCR_VSE); 757 758 return ((irq_lines || kvm_vgic_vcpu_pending_irq(v)) 759 && !kvm_arm_vcpu_stopped(v) && !v->arch.pause); 760 } 761 762 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 763 { 764 return vcpu_mode_priv(vcpu); 765 } 766 767 #ifdef CONFIG_GUEST_PERF_EVENTS 768 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) 769 { 770 return *vcpu_pc(vcpu); 771 } 772 #endif 773 774 static void kvm_init_mpidr_data(struct kvm *kvm) 775 { 776 struct kvm_mpidr_data *data = NULL; 777 unsigned long c, mask, nr_entries; 778 u64 aff_set = 0, aff_clr = ~0UL; 779 struct kvm_vcpu *vcpu; 780 781 mutex_lock(&kvm->arch.config_lock); 782 783 if (rcu_access_pointer(kvm->arch.mpidr_data) || 784 atomic_read(&kvm->online_vcpus) == 1) 785 goto out; 786 787 kvm_for_each_vcpu(c, vcpu, kvm) { 788 u64 aff = kvm_vcpu_get_mpidr_aff(vcpu); 789 aff_set |= aff; 790 aff_clr &= aff; 791 } 792 793 /* 794 * A significant bit can be either 0 or 1, and will only appear in 795 * aff_set. Use aff_clr to weed out the useless stuff. 796 */ 797 mask = aff_set ^ aff_clr; 798 nr_entries = BIT_ULL(hweight_long(mask)); 799 800 /* 801 * Don't let userspace fool us. If we need more than a single page 802 * to describe the compressed MPIDR array, just fall back to the 803 * iterative method. Single vcpu VMs do not need this either. 804 */ 805 if (struct_size(data, cmpidr_to_idx, nr_entries) <= PAGE_SIZE) 806 data = kzalloc(struct_size(data, cmpidr_to_idx, nr_entries), 807 GFP_KERNEL_ACCOUNT); 808 809 if (!data) 810 goto out; 811 812 data->mpidr_mask = mask; 813 814 kvm_for_each_vcpu(c, vcpu, kvm) { 815 u64 aff = kvm_vcpu_get_mpidr_aff(vcpu); 816 u16 index = kvm_mpidr_index(data, aff); 817 818 data->cmpidr_to_idx[index] = c; 819 } 820 821 rcu_assign_pointer(kvm->arch.mpidr_data, data); 822 out: 823 mutex_unlock(&kvm->arch.config_lock); 824 } 825 826 /* 827 * Handle both the initialisation that is being done when the vcpu is 828 * run for the first time, as well as the updates that must be 829 * performed each time we get a new thread dealing with this vcpu. 830 */ 831 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) 832 { 833 struct kvm *kvm = vcpu->kvm; 834 int ret; 835 836 if (!kvm_vcpu_initialized(vcpu)) 837 return -ENOEXEC; 838 839 if (!kvm_arm_vcpu_is_finalized(vcpu)) 840 return -EPERM; 841 842 if (likely(vcpu_has_run_once(vcpu))) 843 return 0; 844 845 kvm_init_mpidr_data(kvm); 846 847 if (likely(irqchip_in_kernel(kvm))) { 848 /* 849 * Map the VGIC hardware resources before running a vcpu the 850 * first time on this VM. 851 */ 852 ret = kvm_vgic_map_resources(kvm); 853 if (ret) 854 return ret; 855 } 856 857 ret = kvm_finalize_sys_regs(vcpu); 858 if (ret) 859 return ret; 860 861 if (vcpu_has_nv(vcpu)) { 862 ret = kvm_vcpu_allocate_vncr_tlb(vcpu); 863 if (ret) 864 return ret; 865 866 ret = kvm_vgic_vcpu_nv_init(vcpu); 867 if (ret) 868 return ret; 869 } 870 871 /* 872 * This needs to happen after any restriction has been applied 873 * to the feature set. 874 */ 875 kvm_calculate_traps(vcpu); 876 877 ret = kvm_timer_enable(vcpu); 878 if (ret) 879 return ret; 880 881 if (kvm_vcpu_has_pmu(vcpu)) { 882 ret = kvm_arm_pmu_v3_enable(vcpu); 883 if (ret) 884 return ret; 885 } 886 887 if (is_protected_kvm_enabled()) { 888 ret = pkvm_create_hyp_vm(kvm); 889 if (ret) 890 return ret; 891 892 ret = pkvm_create_hyp_vcpu(vcpu); 893 if (ret) 894 return ret; 895 } 896 897 mutex_lock(&kvm->arch.config_lock); 898 set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags); 899 mutex_unlock(&kvm->arch.config_lock); 900 901 return ret; 902 } 903 904 bool kvm_arch_intc_initialized(struct kvm *kvm) 905 { 906 return vgic_initialized(kvm); 907 } 908 909 void kvm_arm_halt_guest(struct kvm *kvm) 910 { 911 unsigned long i; 912 struct kvm_vcpu *vcpu; 913 914 kvm_for_each_vcpu(i, vcpu, kvm) 915 vcpu->arch.pause = true; 916 kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP); 917 } 918 919 void kvm_arm_resume_guest(struct kvm *kvm) 920 { 921 unsigned long i; 922 struct kvm_vcpu *vcpu; 923 924 kvm_for_each_vcpu(i, vcpu, kvm) { 925 vcpu->arch.pause = false; 926 __kvm_vcpu_wake_up(vcpu); 927 } 928 } 929 930 static void kvm_vcpu_sleep(struct kvm_vcpu *vcpu) 931 { 932 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); 933 934 rcuwait_wait_event(wait, 935 (!kvm_arm_vcpu_stopped(vcpu)) && (!vcpu->arch.pause), 936 TASK_INTERRUPTIBLE); 937 938 if (kvm_arm_vcpu_stopped(vcpu) || vcpu->arch.pause) { 939 /* Awaken to handle a signal, request we sleep again later. */ 940 kvm_make_request(KVM_REQ_SLEEP, vcpu); 941 } 942 943 /* 944 * Make sure we will observe a potential reset request if we've 945 * observed a change to the power state. Pairs with the smp_wmb() in 946 * kvm_psci_vcpu_on(). 947 */ 948 smp_rmb(); 949 } 950 951 /** 952 * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior 953 * @vcpu: The VCPU pointer 954 * 955 * Suspend execution of a vCPU until a valid wake event is detected, i.e. until 956 * the vCPU is runnable. The vCPU may or may not be scheduled out, depending 957 * on when a wake event arrives, e.g. there may already be a pending wake event. 958 */ 959 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu) 960 { 961 /* 962 * Sync back the state of the GIC CPU interface so that we have 963 * the latest PMR and group enables. This ensures that 964 * kvm_arch_vcpu_runnable has up-to-date data to decide whether 965 * we have pending interrupts, e.g. when determining if the 966 * vCPU should block. 967 * 968 * For the same reason, we want to tell GICv4 that we need 969 * doorbells to be signalled, should an interrupt become pending. 970 */ 971 preempt_disable(); 972 vcpu_set_flag(vcpu, IN_WFI); 973 kvm_vgic_put(vcpu); 974 preempt_enable(); 975 976 kvm_vcpu_halt(vcpu); 977 vcpu_clear_flag(vcpu, IN_WFIT); 978 979 preempt_disable(); 980 vcpu_clear_flag(vcpu, IN_WFI); 981 kvm_vgic_load(vcpu); 982 preempt_enable(); 983 } 984 985 static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu) 986 { 987 if (!kvm_arm_vcpu_suspended(vcpu)) 988 return 1; 989 990 kvm_vcpu_wfi(vcpu); 991 992 /* 993 * The suspend state is sticky; we do not leave it until userspace 994 * explicitly marks the vCPU as runnable. Request that we suspend again 995 * later. 996 */ 997 kvm_make_request(KVM_REQ_SUSPEND, vcpu); 998 999 /* 1000 * Check to make sure the vCPU is actually runnable. If so, exit to 1001 * userspace informing it of the wakeup condition. 1002 */ 1003 if (kvm_arch_vcpu_runnable(vcpu)) { 1004 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); 1005 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP; 1006 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 1007 return 0; 1008 } 1009 1010 /* 1011 * Otherwise, we were unblocked to process a different event, such as a 1012 * pending signal. Return 1 and allow kvm_arch_vcpu_ioctl_run() to 1013 * process the event. 1014 */ 1015 return 1; 1016 } 1017 1018 /** 1019 * check_vcpu_requests - check and handle pending vCPU requests 1020 * @vcpu: the VCPU pointer 1021 * 1022 * Return: 1 if we should enter the guest 1023 * 0 if we should exit to userspace 1024 * < 0 if we should exit to userspace, where the return value indicates 1025 * an error 1026 */ 1027 static int check_vcpu_requests(struct kvm_vcpu *vcpu) 1028 { 1029 if (kvm_request_pending(vcpu)) { 1030 if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) 1031 return -EIO; 1032 1033 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) 1034 kvm_vcpu_sleep(vcpu); 1035 1036 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) 1037 kvm_reset_vcpu(vcpu); 1038 1039 /* 1040 * Clear IRQ_PENDING requests that were made to guarantee 1041 * that a VCPU sees new virtual interrupts. 1042 */ 1043 kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu); 1044 1045 if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu)) 1046 kvm_update_stolen_time(vcpu); 1047 1048 if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) { 1049 /* The distributor enable bits were changed */ 1050 preempt_disable(); 1051 vgic_v4_put(vcpu); 1052 vgic_v4_load(vcpu); 1053 preempt_enable(); 1054 } 1055 1056 if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu)) 1057 kvm_vcpu_reload_pmu(vcpu); 1058 1059 if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu)) 1060 kvm_vcpu_pmu_restore_guest(vcpu); 1061 1062 if (kvm_check_request(KVM_REQ_SUSPEND, vcpu)) 1063 return kvm_vcpu_suspend(vcpu); 1064 1065 if (kvm_dirty_ring_check_request(vcpu)) 1066 return 0; 1067 1068 check_nested_vcpu_requests(vcpu); 1069 } 1070 1071 return 1; 1072 } 1073 1074 static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu) 1075 { 1076 if (likely(!vcpu_mode_is_32bit(vcpu))) 1077 return false; 1078 1079 if (vcpu_has_nv(vcpu)) 1080 return true; 1081 1082 return !kvm_supports_32bit_el0(); 1083 } 1084 1085 /** 1086 * kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest 1087 * @vcpu: The VCPU pointer 1088 * @ret: Pointer to write optional return code 1089 * 1090 * Returns: true if the VCPU needs to return to a preemptible + interruptible 1091 * and skip guest entry. 1092 * 1093 * This function disambiguates between two different types of exits: exits to a 1094 * preemptible + interruptible kernel context and exits to userspace. For an 1095 * exit to userspace, this function will write the return code to ret and return 1096 * true. For an exit to preemptible + interruptible kernel context (i.e. check 1097 * for pending work and re-enter), return true without writing to ret. 1098 */ 1099 static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret) 1100 { 1101 struct kvm_run *run = vcpu->run; 1102 1103 /* 1104 * If we're using a userspace irqchip, then check if we need 1105 * to tell a userspace irqchip about timer or PMU level 1106 * changes and if so, exit to userspace (the actual level 1107 * state gets updated in kvm_timer_update_run and 1108 * kvm_pmu_update_run below). 1109 */ 1110 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { 1111 if (kvm_timer_should_notify_user(vcpu) || 1112 kvm_pmu_should_notify_user(vcpu)) { 1113 *ret = -EINTR; 1114 run->exit_reason = KVM_EXIT_INTR; 1115 return true; 1116 } 1117 } 1118 1119 if (unlikely(vcpu_on_unsupported_cpu(vcpu))) { 1120 run->exit_reason = KVM_EXIT_FAIL_ENTRY; 1121 run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED; 1122 run->fail_entry.cpu = smp_processor_id(); 1123 *ret = 0; 1124 return true; 1125 } 1126 1127 return kvm_request_pending(vcpu) || 1128 xfer_to_guest_mode_work_pending(); 1129 } 1130 1131 /* 1132 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while 1133 * the vCPU is running. 1134 * 1135 * This must be noinstr as instrumentation may make use of RCU, and this is not 1136 * safe during the EQS. 1137 */ 1138 static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu) 1139 { 1140 int ret; 1141 1142 guest_state_enter_irqoff(); 1143 ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu); 1144 guest_state_exit_irqoff(); 1145 1146 return ret; 1147 } 1148 1149 /** 1150 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code 1151 * @vcpu: The VCPU pointer 1152 * 1153 * This function is called through the VCPU_RUN ioctl called from user space. It 1154 * will execute VM code in a loop until the time slice for the process is used 1155 * or some emulation is needed from user space in which case the function will 1156 * return with return value 0 and with the kvm_run structure filled in with the 1157 * required data for the requested emulation. 1158 */ 1159 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) 1160 { 1161 struct kvm_run *run = vcpu->run; 1162 int ret; 1163 1164 if (run->exit_reason == KVM_EXIT_MMIO) { 1165 ret = kvm_handle_mmio_return(vcpu); 1166 if (ret <= 0) 1167 return ret; 1168 } 1169 1170 vcpu_load(vcpu); 1171 1172 if (!vcpu->wants_to_run) { 1173 ret = -EINTR; 1174 goto out; 1175 } 1176 1177 kvm_sigset_activate(vcpu); 1178 1179 ret = 1; 1180 run->exit_reason = KVM_EXIT_UNKNOWN; 1181 run->flags = 0; 1182 while (ret > 0) { 1183 /* 1184 * Check conditions before entering the guest 1185 */ 1186 ret = kvm_xfer_to_guest_mode_handle_work(vcpu); 1187 if (!ret) 1188 ret = 1; 1189 1190 if (ret > 0) 1191 ret = check_vcpu_requests(vcpu); 1192 1193 /* 1194 * Preparing the interrupts to be injected also 1195 * involves poking the GIC, which must be done in a 1196 * non-preemptible context. 1197 */ 1198 preempt_disable(); 1199 1200 kvm_nested_flush_hwstate(vcpu); 1201 1202 if (kvm_vcpu_has_pmu(vcpu)) 1203 kvm_pmu_flush_hwstate(vcpu); 1204 1205 local_irq_disable(); 1206 1207 kvm_vgic_flush_hwstate(vcpu); 1208 1209 kvm_pmu_update_vcpu_events(vcpu); 1210 1211 /* 1212 * Ensure we set mode to IN_GUEST_MODE after we disable 1213 * interrupts and before the final VCPU requests check. 1214 * See the comment in kvm_vcpu_exiting_guest_mode() and 1215 * Documentation/virt/kvm/vcpu-requests.rst 1216 */ 1217 smp_store_mb(vcpu->mode, IN_GUEST_MODE); 1218 1219 if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) { 1220 vcpu->mode = OUTSIDE_GUEST_MODE; 1221 isb(); /* Ensure work in x_flush_hwstate is committed */ 1222 if (kvm_vcpu_has_pmu(vcpu)) 1223 kvm_pmu_sync_hwstate(vcpu); 1224 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) 1225 kvm_timer_sync_user(vcpu); 1226 kvm_vgic_sync_hwstate(vcpu); 1227 local_irq_enable(); 1228 preempt_enable(); 1229 continue; 1230 } 1231 1232 kvm_arch_vcpu_ctxflush_fp(vcpu); 1233 1234 /************************************************************** 1235 * Enter the guest 1236 */ 1237 trace_kvm_entry(*vcpu_pc(vcpu)); 1238 guest_timing_enter_irqoff(); 1239 1240 ret = kvm_arm_vcpu_enter_exit(vcpu); 1241 1242 vcpu->mode = OUTSIDE_GUEST_MODE; 1243 vcpu->stat.exits++; 1244 /* 1245 * Back from guest 1246 *************************************************************/ 1247 1248 /* 1249 * We must sync the PMU state before the vgic state so 1250 * that the vgic can properly sample the updated state of the 1251 * interrupt line. 1252 */ 1253 if (kvm_vcpu_has_pmu(vcpu)) 1254 kvm_pmu_sync_hwstate(vcpu); 1255 1256 /* 1257 * Sync the vgic state before syncing the timer state because 1258 * the timer code needs to know if the virtual timer 1259 * interrupts are active. 1260 */ 1261 kvm_vgic_sync_hwstate(vcpu); 1262 1263 /* 1264 * Sync the timer hardware state before enabling interrupts as 1265 * we don't want vtimer interrupts to race with syncing the 1266 * timer virtual interrupt state. 1267 */ 1268 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) 1269 kvm_timer_sync_user(vcpu); 1270 1271 if (is_hyp_ctxt(vcpu)) 1272 kvm_timer_sync_nested(vcpu); 1273 1274 kvm_arch_vcpu_ctxsync_fp(vcpu); 1275 1276 /* 1277 * We must ensure that any pending interrupts are taken before 1278 * we exit guest timing so that timer ticks are accounted as 1279 * guest time. Transiently unmask interrupts so that any 1280 * pending interrupts are taken. 1281 * 1282 * Per ARM DDI 0487G.b section D1.13.4, an ISB (or other 1283 * context synchronization event) is necessary to ensure that 1284 * pending interrupts are taken. 1285 */ 1286 if (ARM_EXCEPTION_CODE(ret) == ARM_EXCEPTION_IRQ) { 1287 local_irq_enable(); 1288 isb(); 1289 local_irq_disable(); 1290 } 1291 1292 guest_timing_exit_irqoff(); 1293 1294 local_irq_enable(); 1295 1296 trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); 1297 1298 /* Exit types that need handling before we can be preempted */ 1299 handle_exit_early(vcpu, ret); 1300 1301 kvm_nested_sync_hwstate(vcpu); 1302 1303 preempt_enable(); 1304 1305 /* 1306 * The ARMv8 architecture doesn't give the hypervisor 1307 * a mechanism to prevent a guest from dropping to AArch32 EL0 1308 * if implemented by the CPU. If we spot the guest in such 1309 * state and that we decided it wasn't supposed to do so (like 1310 * with the asymmetric AArch32 case), return to userspace with 1311 * a fatal error. 1312 */ 1313 if (vcpu_mode_is_bad_32bit(vcpu)) { 1314 /* 1315 * As we have caught the guest red-handed, decide that 1316 * it isn't fit for purpose anymore by making the vcpu 1317 * invalid. The VMM can try and fix it by issuing a 1318 * KVM_ARM_VCPU_INIT if it really wants to. 1319 */ 1320 vcpu_clear_flag(vcpu, VCPU_INITIALIZED); 1321 ret = ARM_EXCEPTION_IL; 1322 } 1323 1324 ret = handle_exit(vcpu, ret); 1325 } 1326 1327 /* Tell userspace about in-kernel device output levels */ 1328 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { 1329 kvm_timer_update_run(vcpu); 1330 kvm_pmu_update_run(vcpu); 1331 } 1332 1333 kvm_sigset_deactivate(vcpu); 1334 1335 out: 1336 /* 1337 * In the unlikely event that we are returning to userspace 1338 * with pending exceptions or PC adjustment, commit these 1339 * adjustments in order to give userspace a consistent view of 1340 * the vcpu state. Note that this relies on __kvm_adjust_pc() 1341 * being preempt-safe on VHE. 1342 */ 1343 if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) || 1344 vcpu_get_flag(vcpu, INCREMENT_PC))) 1345 kvm_call_hyp(__kvm_adjust_pc, vcpu); 1346 1347 vcpu_put(vcpu); 1348 return ret; 1349 } 1350 1351 static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) 1352 { 1353 int bit_index; 1354 bool set; 1355 unsigned long *hcr; 1356 1357 if (number == KVM_ARM_IRQ_CPU_IRQ) 1358 bit_index = __ffs(HCR_VI); 1359 else /* KVM_ARM_IRQ_CPU_FIQ */ 1360 bit_index = __ffs(HCR_VF); 1361 1362 hcr = vcpu_hcr(vcpu); 1363 if (level) 1364 set = test_and_set_bit(bit_index, hcr); 1365 else 1366 set = test_and_clear_bit(bit_index, hcr); 1367 1368 /* 1369 * If we didn't change anything, no need to wake up or kick other CPUs 1370 */ 1371 if (set == level) 1372 return 0; 1373 1374 /* 1375 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and 1376 * trigger a world-switch round on the running physical CPU to set the 1377 * virtual IRQ/FIQ fields in the HCR appropriately. 1378 */ 1379 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 1380 kvm_vcpu_kick(vcpu); 1381 1382 return 0; 1383 } 1384 1385 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, 1386 bool line_status) 1387 { 1388 u32 irq = irq_level->irq; 1389 unsigned int irq_type, vcpu_id, irq_num; 1390 struct kvm_vcpu *vcpu = NULL; 1391 bool level = irq_level->level; 1392 1393 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; 1394 vcpu_id = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; 1395 vcpu_id += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1); 1396 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; 1397 1398 trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level); 1399 1400 switch (irq_type) { 1401 case KVM_ARM_IRQ_TYPE_CPU: 1402 if (irqchip_in_kernel(kvm)) 1403 return -ENXIO; 1404 1405 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id); 1406 if (!vcpu) 1407 return -EINVAL; 1408 1409 if (irq_num > KVM_ARM_IRQ_CPU_FIQ) 1410 return -EINVAL; 1411 1412 return vcpu_interrupt_line(vcpu, irq_num, level); 1413 case KVM_ARM_IRQ_TYPE_PPI: 1414 if (!irqchip_in_kernel(kvm)) 1415 return -ENXIO; 1416 1417 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id); 1418 if (!vcpu) 1419 return -EINVAL; 1420 1421 if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) 1422 return -EINVAL; 1423 1424 return kvm_vgic_inject_irq(kvm, vcpu, irq_num, level, NULL); 1425 case KVM_ARM_IRQ_TYPE_SPI: 1426 if (!irqchip_in_kernel(kvm)) 1427 return -ENXIO; 1428 1429 if (irq_num < VGIC_NR_PRIVATE_IRQS) 1430 return -EINVAL; 1431 1432 return kvm_vgic_inject_irq(kvm, NULL, irq_num, level, NULL); 1433 } 1434 1435 return -EINVAL; 1436 } 1437 1438 static unsigned long system_supported_vcpu_features(void) 1439 { 1440 unsigned long features = KVM_VCPU_VALID_FEATURES; 1441 1442 if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1)) 1443 clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features); 1444 1445 if (!kvm_supports_guest_pmuv3()) 1446 clear_bit(KVM_ARM_VCPU_PMU_V3, &features); 1447 1448 if (!system_supports_sve()) 1449 clear_bit(KVM_ARM_VCPU_SVE, &features); 1450 1451 if (!kvm_has_full_ptr_auth()) { 1452 clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features); 1453 clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features); 1454 } 1455 1456 if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT)) 1457 clear_bit(KVM_ARM_VCPU_HAS_EL2, &features); 1458 1459 return features; 1460 } 1461 1462 static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, 1463 const struct kvm_vcpu_init *init) 1464 { 1465 unsigned long features = init->features[0]; 1466 int i; 1467 1468 if (features & ~KVM_VCPU_VALID_FEATURES) 1469 return -ENOENT; 1470 1471 for (i = 1; i < ARRAY_SIZE(init->features); i++) { 1472 if (init->features[i]) 1473 return -ENOENT; 1474 } 1475 1476 if (features & ~system_supported_vcpu_features()) 1477 return -EINVAL; 1478 1479 /* 1480 * For now make sure that both address/generic pointer authentication 1481 * features are requested by the userspace together. 1482 */ 1483 if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features) != 1484 test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features)) 1485 return -EINVAL; 1486 1487 if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features)) 1488 return 0; 1489 1490 /* MTE is incompatible with AArch32 */ 1491 if (kvm_has_mte(vcpu->kvm)) 1492 return -EINVAL; 1493 1494 /* NV is incompatible with AArch32 */ 1495 if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features)) 1496 return -EINVAL; 1497 1498 return 0; 1499 } 1500 1501 static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu, 1502 const struct kvm_vcpu_init *init) 1503 { 1504 unsigned long features = init->features[0]; 1505 1506 return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features, 1507 KVM_VCPU_MAX_FEATURES); 1508 } 1509 1510 static int kvm_setup_vcpu(struct kvm_vcpu *vcpu) 1511 { 1512 struct kvm *kvm = vcpu->kvm; 1513 int ret = 0; 1514 1515 /* 1516 * When the vCPU has a PMU, but no PMU is set for the guest 1517 * yet, set the default one. 1518 */ 1519 if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu) 1520 ret = kvm_arm_set_default_pmu(kvm); 1521 1522 /* Prepare for nested if required */ 1523 if (!ret && vcpu_has_nv(vcpu)) 1524 ret = kvm_vcpu_init_nested(vcpu); 1525 1526 return ret; 1527 } 1528 1529 static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, 1530 const struct kvm_vcpu_init *init) 1531 { 1532 unsigned long features = init->features[0]; 1533 struct kvm *kvm = vcpu->kvm; 1534 int ret = -EINVAL; 1535 1536 mutex_lock(&kvm->arch.config_lock); 1537 1538 if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) && 1539 kvm_vcpu_init_changed(vcpu, init)) 1540 goto out_unlock; 1541 1542 bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES); 1543 1544 ret = kvm_setup_vcpu(vcpu); 1545 if (ret) 1546 goto out_unlock; 1547 1548 /* Now we know what it is, we can reset it. */ 1549 kvm_reset_vcpu(vcpu); 1550 1551 set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags); 1552 vcpu_set_flag(vcpu, VCPU_INITIALIZED); 1553 ret = 0; 1554 out_unlock: 1555 mutex_unlock(&kvm->arch.config_lock); 1556 return ret; 1557 } 1558 1559 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, 1560 const struct kvm_vcpu_init *init) 1561 { 1562 int ret; 1563 1564 if (init->target != KVM_ARM_TARGET_GENERIC_V8 && 1565 init->target != kvm_target_cpu()) 1566 return -EINVAL; 1567 1568 ret = kvm_vcpu_init_check_features(vcpu, init); 1569 if (ret) 1570 return ret; 1571 1572 if (!kvm_vcpu_initialized(vcpu)) 1573 return __kvm_vcpu_set_target(vcpu, init); 1574 1575 if (kvm_vcpu_init_changed(vcpu, init)) 1576 return -EINVAL; 1577 1578 kvm_reset_vcpu(vcpu); 1579 return 0; 1580 } 1581 1582 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, 1583 struct kvm_vcpu_init *init) 1584 { 1585 bool power_off = false; 1586 int ret; 1587 1588 /* 1589 * Treat the power-off vCPU feature as ephemeral. Clear the bit to avoid 1590 * reflecting it in the finalized feature set, thus limiting its scope 1591 * to a single KVM_ARM_VCPU_INIT call. 1592 */ 1593 if (init->features[0] & BIT(KVM_ARM_VCPU_POWER_OFF)) { 1594 init->features[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF); 1595 power_off = true; 1596 } 1597 1598 ret = kvm_vcpu_set_target(vcpu, init); 1599 if (ret) 1600 return ret; 1601 1602 /* 1603 * Ensure a rebooted VM will fault in RAM pages and detect if the 1604 * guest MMU is turned off and flush the caches as needed. 1605 * 1606 * S2FWB enforces all memory accesses to RAM being cacheable, 1607 * ensuring that the data side is always coherent. We still 1608 * need to invalidate the I-cache though, as FWB does *not* 1609 * imply CTR_EL0.DIC. 1610 */ 1611 if (vcpu_has_run_once(vcpu)) { 1612 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) 1613 stage2_unmap_vm(vcpu->kvm); 1614 else 1615 icache_inval_all_pou(); 1616 } 1617 1618 vcpu_reset_hcr(vcpu); 1619 1620 /* 1621 * Handle the "start in power-off" case. 1622 */ 1623 spin_lock(&vcpu->arch.mp_state_lock); 1624 1625 if (power_off) 1626 __kvm_arm_vcpu_power_off(vcpu); 1627 else 1628 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE); 1629 1630 spin_unlock(&vcpu->arch.mp_state_lock); 1631 1632 return 0; 1633 } 1634 1635 static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, 1636 struct kvm_device_attr *attr) 1637 { 1638 int ret = -ENXIO; 1639 1640 switch (attr->group) { 1641 default: 1642 ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr); 1643 break; 1644 } 1645 1646 return ret; 1647 } 1648 1649 static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, 1650 struct kvm_device_attr *attr) 1651 { 1652 int ret = -ENXIO; 1653 1654 switch (attr->group) { 1655 default: 1656 ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr); 1657 break; 1658 } 1659 1660 return ret; 1661 } 1662 1663 static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, 1664 struct kvm_device_attr *attr) 1665 { 1666 int ret = -ENXIO; 1667 1668 switch (attr->group) { 1669 default: 1670 ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr); 1671 break; 1672 } 1673 1674 return ret; 1675 } 1676 1677 static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 1678 struct kvm_vcpu_events *events) 1679 { 1680 memset(events, 0, sizeof(*events)); 1681 1682 return __kvm_arm_vcpu_get_events(vcpu, events); 1683 } 1684 1685 static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 1686 struct kvm_vcpu_events *events) 1687 { 1688 int i; 1689 1690 /* check whether the reserved field is zero */ 1691 for (i = 0; i < ARRAY_SIZE(events->reserved); i++) 1692 if (events->reserved[i]) 1693 return -EINVAL; 1694 1695 /* check whether the pad field is zero */ 1696 for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++) 1697 if (events->exception.pad[i]) 1698 return -EINVAL; 1699 1700 return __kvm_arm_vcpu_set_events(vcpu, events); 1701 } 1702 1703 long kvm_arch_vcpu_ioctl(struct file *filp, 1704 unsigned int ioctl, unsigned long arg) 1705 { 1706 struct kvm_vcpu *vcpu = filp->private_data; 1707 void __user *argp = (void __user *)arg; 1708 struct kvm_device_attr attr; 1709 long r; 1710 1711 switch (ioctl) { 1712 case KVM_ARM_VCPU_INIT: { 1713 struct kvm_vcpu_init init; 1714 1715 r = -EFAULT; 1716 if (copy_from_user(&init, argp, sizeof(init))) 1717 break; 1718 1719 r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init); 1720 break; 1721 } 1722 case KVM_SET_ONE_REG: 1723 case KVM_GET_ONE_REG: { 1724 struct kvm_one_reg reg; 1725 1726 r = -ENOEXEC; 1727 if (unlikely(!kvm_vcpu_initialized(vcpu))) 1728 break; 1729 1730 r = -EFAULT; 1731 if (copy_from_user(®, argp, sizeof(reg))) 1732 break; 1733 1734 /* 1735 * We could owe a reset due to PSCI. Handle the pending reset 1736 * here to ensure userspace register accesses are ordered after 1737 * the reset. 1738 */ 1739 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) 1740 kvm_reset_vcpu(vcpu); 1741 1742 if (ioctl == KVM_SET_ONE_REG) 1743 r = kvm_arm_set_reg(vcpu, ®); 1744 else 1745 r = kvm_arm_get_reg(vcpu, ®); 1746 break; 1747 } 1748 case KVM_GET_REG_LIST: { 1749 struct kvm_reg_list __user *user_list = argp; 1750 struct kvm_reg_list reg_list; 1751 unsigned n; 1752 1753 r = -ENOEXEC; 1754 if (unlikely(!kvm_vcpu_initialized(vcpu))) 1755 break; 1756 1757 r = -EPERM; 1758 if (!kvm_arm_vcpu_is_finalized(vcpu)) 1759 break; 1760 1761 r = -EFAULT; 1762 if (copy_from_user(®_list, user_list, sizeof(reg_list))) 1763 break; 1764 n = reg_list.n; 1765 reg_list.n = kvm_arm_num_regs(vcpu); 1766 if (copy_to_user(user_list, ®_list, sizeof(reg_list))) 1767 break; 1768 r = -E2BIG; 1769 if (n < reg_list.n) 1770 break; 1771 r = kvm_arm_copy_reg_indices(vcpu, user_list->reg); 1772 break; 1773 } 1774 case KVM_SET_DEVICE_ATTR: { 1775 r = -EFAULT; 1776 if (copy_from_user(&attr, argp, sizeof(attr))) 1777 break; 1778 r = kvm_arm_vcpu_set_attr(vcpu, &attr); 1779 break; 1780 } 1781 case KVM_GET_DEVICE_ATTR: { 1782 r = -EFAULT; 1783 if (copy_from_user(&attr, argp, sizeof(attr))) 1784 break; 1785 r = kvm_arm_vcpu_get_attr(vcpu, &attr); 1786 break; 1787 } 1788 case KVM_HAS_DEVICE_ATTR: { 1789 r = -EFAULT; 1790 if (copy_from_user(&attr, argp, sizeof(attr))) 1791 break; 1792 r = kvm_arm_vcpu_has_attr(vcpu, &attr); 1793 break; 1794 } 1795 case KVM_GET_VCPU_EVENTS: { 1796 struct kvm_vcpu_events events; 1797 1798 if (!kvm_vcpu_initialized(vcpu)) 1799 return -ENOEXEC; 1800 1801 if (kvm_arm_vcpu_get_events(vcpu, &events)) 1802 return -EINVAL; 1803 1804 if (copy_to_user(argp, &events, sizeof(events))) 1805 return -EFAULT; 1806 1807 return 0; 1808 } 1809 case KVM_SET_VCPU_EVENTS: { 1810 struct kvm_vcpu_events events; 1811 1812 if (!kvm_vcpu_initialized(vcpu)) 1813 return -ENOEXEC; 1814 1815 if (copy_from_user(&events, argp, sizeof(events))) 1816 return -EFAULT; 1817 1818 return kvm_arm_vcpu_set_events(vcpu, &events); 1819 } 1820 case KVM_ARM_VCPU_FINALIZE: { 1821 int what; 1822 1823 if (!kvm_vcpu_initialized(vcpu)) 1824 return -ENOEXEC; 1825 1826 if (get_user(what, (const int __user *)argp)) 1827 return -EFAULT; 1828 1829 return kvm_arm_vcpu_finalize(vcpu, what); 1830 } 1831 default: 1832 r = -EINVAL; 1833 } 1834 1835 return r; 1836 } 1837 1838 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 1839 { 1840 1841 } 1842 1843 static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, 1844 struct kvm_arm_device_addr *dev_addr) 1845 { 1846 switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) { 1847 case KVM_ARM_DEVICE_VGIC_V2: 1848 if (!vgic_present) 1849 return -ENXIO; 1850 return kvm_set_legacy_vgic_v2_addr(kvm, dev_addr); 1851 default: 1852 return -ENODEV; 1853 } 1854 } 1855 1856 static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) 1857 { 1858 switch (attr->group) { 1859 case KVM_ARM_VM_SMCCC_CTRL: 1860 return kvm_vm_smccc_has_attr(kvm, attr); 1861 default: 1862 return -ENXIO; 1863 } 1864 } 1865 1866 static int kvm_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) 1867 { 1868 switch (attr->group) { 1869 case KVM_ARM_VM_SMCCC_CTRL: 1870 return kvm_vm_smccc_set_attr(kvm, attr); 1871 default: 1872 return -ENXIO; 1873 } 1874 } 1875 1876 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 1877 { 1878 struct kvm *kvm = filp->private_data; 1879 void __user *argp = (void __user *)arg; 1880 struct kvm_device_attr attr; 1881 1882 switch (ioctl) { 1883 case KVM_CREATE_IRQCHIP: { 1884 int ret; 1885 if (!vgic_present) 1886 return -ENXIO; 1887 mutex_lock(&kvm->lock); 1888 ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); 1889 mutex_unlock(&kvm->lock); 1890 return ret; 1891 } 1892 case KVM_ARM_SET_DEVICE_ADDR: { 1893 struct kvm_arm_device_addr dev_addr; 1894 1895 if (copy_from_user(&dev_addr, argp, sizeof(dev_addr))) 1896 return -EFAULT; 1897 return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); 1898 } 1899 case KVM_ARM_PREFERRED_TARGET: { 1900 struct kvm_vcpu_init init = { 1901 .target = KVM_ARM_TARGET_GENERIC_V8, 1902 }; 1903 1904 if (copy_to_user(argp, &init, sizeof(init))) 1905 return -EFAULT; 1906 1907 return 0; 1908 } 1909 case KVM_ARM_MTE_COPY_TAGS: { 1910 struct kvm_arm_copy_mte_tags copy_tags; 1911 1912 if (copy_from_user(©_tags, argp, sizeof(copy_tags))) 1913 return -EFAULT; 1914 return kvm_vm_ioctl_mte_copy_tags(kvm, ©_tags); 1915 } 1916 case KVM_ARM_SET_COUNTER_OFFSET: { 1917 struct kvm_arm_counter_offset offset; 1918 1919 if (copy_from_user(&offset, argp, sizeof(offset))) 1920 return -EFAULT; 1921 return kvm_vm_ioctl_set_counter_offset(kvm, &offset); 1922 } 1923 case KVM_HAS_DEVICE_ATTR: { 1924 if (copy_from_user(&attr, argp, sizeof(attr))) 1925 return -EFAULT; 1926 1927 return kvm_vm_has_attr(kvm, &attr); 1928 } 1929 case KVM_SET_DEVICE_ATTR: { 1930 if (copy_from_user(&attr, argp, sizeof(attr))) 1931 return -EFAULT; 1932 1933 return kvm_vm_set_attr(kvm, &attr); 1934 } 1935 case KVM_ARM_GET_REG_WRITABLE_MASKS: { 1936 struct reg_mask_range range; 1937 1938 if (copy_from_user(&range, argp, sizeof(range))) 1939 return -EFAULT; 1940 return kvm_vm_ioctl_get_reg_writable_masks(kvm, &range); 1941 } 1942 default: 1943 return -EINVAL; 1944 } 1945 } 1946 1947 static unsigned long nvhe_percpu_size(void) 1948 { 1949 return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - 1950 (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start); 1951 } 1952 1953 static unsigned long nvhe_percpu_order(void) 1954 { 1955 unsigned long size = nvhe_percpu_size(); 1956 1957 return size ? get_order(size) : 0; 1958 } 1959 1960 static size_t pkvm_host_sve_state_order(void) 1961 { 1962 return get_order(pkvm_host_sve_state_size()); 1963 } 1964 1965 /* A lookup table holding the hypervisor VA for each vector slot */ 1966 static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS]; 1967 1968 static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot) 1969 { 1970 hyp_spectre_vector_selector[slot] = __kvm_vector_slot2addr(base, slot); 1971 } 1972 1973 static int kvm_init_vector_slots(void) 1974 { 1975 int err; 1976 void *base; 1977 1978 base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); 1979 kvm_init_vector_slot(base, HYP_VECTOR_DIRECT); 1980 1981 base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); 1982 kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT); 1983 1984 if (kvm_system_needs_idmapped_vectors() && 1985 !is_protected_kvm_enabled()) { 1986 err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs), 1987 __BP_HARDEN_HYP_VECS_SZ, &base); 1988 if (err) 1989 return err; 1990 } 1991 1992 kvm_init_vector_slot(base, HYP_VECTOR_INDIRECT); 1993 kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_INDIRECT); 1994 return 0; 1995 } 1996 1997 static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits) 1998 { 1999 struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); 2000 unsigned long tcr; 2001 2002 /* 2003 * Calculate the raw per-cpu offset without a translation from the 2004 * kernel's mapping to the linear mapping, and store it in tpidr_el2 2005 * so that we can use adr_l to access per-cpu variables in EL2. 2006 * Also drop the KASAN tag which gets in the way... 2007 */ 2008 params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) - 2009 (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start)); 2010 2011 params->mair_el2 = read_sysreg(mair_el1); 2012 2013 tcr = read_sysreg(tcr_el1); 2014 if (cpus_have_final_cap(ARM64_KVM_HVHE)) { 2015 tcr &= ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK); 2016 tcr |= TCR_EPD1_MASK; 2017 } else { 2018 unsigned long ips = FIELD_GET(TCR_IPS_MASK, tcr); 2019 2020 tcr &= TCR_EL2_MASK; 2021 tcr |= TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips); 2022 if (lpa2_is_enabled()) 2023 tcr |= TCR_EL2_DS; 2024 } 2025 tcr |= TCR_T0SZ(hyp_va_bits); 2026 params->tcr_el2 = tcr; 2027 2028 params->pgd_pa = kvm_mmu_get_httbr(); 2029 if (is_protected_kvm_enabled()) 2030 params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS; 2031 else 2032 params->hcr_el2 = HCR_HOST_NVHE_FLAGS; 2033 if (cpus_have_final_cap(ARM64_KVM_HVHE)) 2034 params->hcr_el2 |= HCR_E2H; 2035 params->vttbr = params->vtcr = 0; 2036 2037 /* 2038 * Flush the init params from the data cache because the struct will 2039 * be read while the MMU is off. 2040 */ 2041 kvm_flush_dcache_to_poc(params, sizeof(*params)); 2042 } 2043 2044 static void hyp_install_host_vector(void) 2045 { 2046 struct kvm_nvhe_init_params *params; 2047 struct arm_smccc_res res; 2048 2049 /* Switch from the HYP stub to our own HYP init vector */ 2050 __hyp_set_vectors(kvm_get_idmap_vector()); 2051 2052 /* 2053 * Call initialization code, and switch to the full blown HYP code. 2054 * If the cpucaps haven't been finalized yet, something has gone very 2055 * wrong, and hyp will crash and burn when it uses any 2056 * cpus_have_*_cap() wrapper. 2057 */ 2058 BUG_ON(!system_capabilities_finalized()); 2059 params = this_cpu_ptr_nvhe_sym(kvm_init_params); 2060 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res); 2061 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); 2062 } 2063 2064 static void cpu_init_hyp_mode(void) 2065 { 2066 hyp_install_host_vector(); 2067 2068 /* 2069 * Disabling SSBD on a non-VHE system requires us to enable SSBS 2070 * at EL2. 2071 */ 2072 if (this_cpu_has_cap(ARM64_SSBS) && 2073 arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) { 2074 kvm_call_hyp_nvhe(__kvm_enable_ssbs); 2075 } 2076 } 2077 2078 static void cpu_hyp_reset(void) 2079 { 2080 if (!is_kernel_in_hyp_mode()) 2081 __hyp_reset_vectors(); 2082 } 2083 2084 /* 2085 * EL2 vectors can be mapped and rerouted in a number of ways, 2086 * depending on the kernel configuration and CPU present: 2087 * 2088 * - If the CPU is affected by Spectre-v2, the hardening sequence is 2089 * placed in one of the vector slots, which is executed before jumping 2090 * to the real vectors. 2091 * 2092 * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot 2093 * containing the hardening sequence is mapped next to the idmap page, 2094 * and executed before jumping to the real vectors. 2095 * 2096 * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an 2097 * empty slot is selected, mapped next to the idmap page, and 2098 * executed before jumping to the real vectors. 2099 * 2100 * Note that ARM64_SPECTRE_V3A is somewhat incompatible with 2101 * VHE, as we don't have hypervisor-specific mappings. If the system 2102 * is VHE and yet selects this capability, it will be ignored. 2103 */ 2104 static void cpu_set_hyp_vector(void) 2105 { 2106 struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); 2107 void *vector = hyp_spectre_vector_selector[data->slot]; 2108 2109 if (!is_protected_kvm_enabled()) 2110 *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector; 2111 else 2112 kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot); 2113 } 2114 2115 static void cpu_hyp_init_context(void) 2116 { 2117 kvm_init_host_cpu_context(host_data_ptr(host_ctxt)); 2118 kvm_init_host_debug_data(); 2119 2120 if (!is_kernel_in_hyp_mode()) 2121 cpu_init_hyp_mode(); 2122 } 2123 2124 static void cpu_hyp_init_features(void) 2125 { 2126 cpu_set_hyp_vector(); 2127 2128 if (is_kernel_in_hyp_mode()) { 2129 kvm_timer_init_vhe(); 2130 kvm_debug_init_vhe(); 2131 } 2132 2133 if (vgic_present) 2134 kvm_vgic_init_cpu_hardware(); 2135 } 2136 2137 static void cpu_hyp_reinit(void) 2138 { 2139 cpu_hyp_reset(); 2140 cpu_hyp_init_context(); 2141 cpu_hyp_init_features(); 2142 } 2143 2144 static void cpu_hyp_init(void *discard) 2145 { 2146 if (!__this_cpu_read(kvm_hyp_initialized)) { 2147 cpu_hyp_reinit(); 2148 __this_cpu_write(kvm_hyp_initialized, 1); 2149 } 2150 } 2151 2152 static void cpu_hyp_uninit(void *discard) 2153 { 2154 if (!is_protected_kvm_enabled() && __this_cpu_read(kvm_hyp_initialized)) { 2155 cpu_hyp_reset(); 2156 __this_cpu_write(kvm_hyp_initialized, 0); 2157 } 2158 } 2159 2160 int kvm_arch_enable_virtualization_cpu(void) 2161 { 2162 /* 2163 * Most calls to this function are made with migration 2164 * disabled, but not with preemption disabled. The former is 2165 * enough to ensure correctness, but most of the helpers 2166 * expect the later and will throw a tantrum otherwise. 2167 */ 2168 preempt_disable(); 2169 2170 cpu_hyp_init(NULL); 2171 2172 kvm_vgic_cpu_up(); 2173 kvm_timer_cpu_up(); 2174 2175 preempt_enable(); 2176 2177 return 0; 2178 } 2179 2180 void kvm_arch_disable_virtualization_cpu(void) 2181 { 2182 kvm_timer_cpu_down(); 2183 kvm_vgic_cpu_down(); 2184 2185 if (!is_protected_kvm_enabled()) 2186 cpu_hyp_uninit(NULL); 2187 } 2188 2189 #ifdef CONFIG_CPU_PM 2190 static int hyp_init_cpu_pm_notifier(struct notifier_block *self, 2191 unsigned long cmd, 2192 void *v) 2193 { 2194 /* 2195 * kvm_hyp_initialized is left with its old value over 2196 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should 2197 * re-enable hyp. 2198 */ 2199 switch (cmd) { 2200 case CPU_PM_ENTER: 2201 if (__this_cpu_read(kvm_hyp_initialized)) 2202 /* 2203 * don't update kvm_hyp_initialized here 2204 * so that the hyp will be re-enabled 2205 * when we resume. See below. 2206 */ 2207 cpu_hyp_reset(); 2208 2209 return NOTIFY_OK; 2210 case CPU_PM_ENTER_FAILED: 2211 case CPU_PM_EXIT: 2212 if (__this_cpu_read(kvm_hyp_initialized)) 2213 /* The hyp was enabled before suspend. */ 2214 cpu_hyp_reinit(); 2215 2216 return NOTIFY_OK; 2217 2218 default: 2219 return NOTIFY_DONE; 2220 } 2221 } 2222 2223 static struct notifier_block hyp_init_cpu_pm_nb = { 2224 .notifier_call = hyp_init_cpu_pm_notifier, 2225 }; 2226 2227 static void __init hyp_cpu_pm_init(void) 2228 { 2229 if (!is_protected_kvm_enabled()) 2230 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb); 2231 } 2232 static void __init hyp_cpu_pm_exit(void) 2233 { 2234 if (!is_protected_kvm_enabled()) 2235 cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb); 2236 } 2237 #else 2238 static inline void __init hyp_cpu_pm_init(void) 2239 { 2240 } 2241 static inline void __init hyp_cpu_pm_exit(void) 2242 { 2243 } 2244 #endif 2245 2246 static void __init init_cpu_logical_map(void) 2247 { 2248 unsigned int cpu; 2249 2250 /* 2251 * Copy the MPIDR <-> logical CPU ID mapping to hyp. 2252 * Only copy the set of online CPUs whose features have been checked 2253 * against the finalized system capabilities. The hypervisor will not 2254 * allow any other CPUs from the `possible` set to boot. 2255 */ 2256 for_each_online_cpu(cpu) 2257 hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu); 2258 } 2259 2260 #define init_psci_0_1_impl_state(config, what) \ 2261 config.psci_0_1_ ## what ## _implemented = psci_ops.what 2262 2263 static bool __init init_psci_relay(void) 2264 { 2265 /* 2266 * If PSCI has not been initialized, protected KVM cannot install 2267 * itself on newly booted CPUs. 2268 */ 2269 if (!psci_ops.get_version) { 2270 kvm_err("Cannot initialize protected mode without PSCI\n"); 2271 return false; 2272 } 2273 2274 kvm_host_psci_config.version = psci_ops.get_version(); 2275 kvm_host_psci_config.smccc_version = arm_smccc_get_version(); 2276 2277 if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) { 2278 kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids(); 2279 init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend); 2280 init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on); 2281 init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off); 2282 init_psci_0_1_impl_state(kvm_host_psci_config, migrate); 2283 } 2284 return true; 2285 } 2286 2287 static int __init init_subsystems(void) 2288 { 2289 int err = 0; 2290 2291 /* 2292 * Enable hardware so that subsystem initialisation can access EL2. 2293 */ 2294 on_each_cpu(cpu_hyp_init, NULL, 1); 2295 2296 /* 2297 * Register CPU lower-power notifier 2298 */ 2299 hyp_cpu_pm_init(); 2300 2301 /* 2302 * Init HYP view of VGIC 2303 */ 2304 err = kvm_vgic_hyp_init(); 2305 switch (err) { 2306 case 0: 2307 vgic_present = true; 2308 break; 2309 case -ENODEV: 2310 case -ENXIO: 2311 /* 2312 * No VGIC? No pKVM for you. 2313 * 2314 * Protected mode assumes that VGICv3 is present, so no point 2315 * in trying to hobble along if vgic initialization fails. 2316 */ 2317 if (is_protected_kvm_enabled()) 2318 goto out; 2319 2320 /* 2321 * Otherwise, userspace could choose to implement a GIC for its 2322 * guest on non-cooperative hardware. 2323 */ 2324 vgic_present = false; 2325 err = 0; 2326 break; 2327 default: 2328 goto out; 2329 } 2330 2331 if (kvm_mode == KVM_MODE_NV && 2332 !(vgic_present && (kvm_vgic_global_state.type == VGIC_V3 || 2333 kvm_vgic_global_state.has_gcie_v3_compat))) { 2334 kvm_err("NV support requires GICv3 or GICv5 with legacy support, giving up\n"); 2335 err = -EINVAL; 2336 goto out; 2337 } 2338 2339 /* 2340 * Init HYP architected timer support 2341 */ 2342 err = kvm_timer_hyp_init(vgic_present); 2343 if (err) 2344 goto out; 2345 2346 kvm_register_perf_callbacks(NULL); 2347 2348 out: 2349 if (err) 2350 hyp_cpu_pm_exit(); 2351 2352 if (err || !is_protected_kvm_enabled()) 2353 on_each_cpu(cpu_hyp_uninit, NULL, 1); 2354 2355 return err; 2356 } 2357 2358 static void __init teardown_subsystems(void) 2359 { 2360 kvm_unregister_perf_callbacks(); 2361 hyp_cpu_pm_exit(); 2362 } 2363 2364 static void __init teardown_hyp_mode(void) 2365 { 2366 bool free_sve = system_supports_sve() && is_protected_kvm_enabled(); 2367 int cpu; 2368 2369 free_hyp_pgds(); 2370 for_each_possible_cpu(cpu) { 2371 if (per_cpu(kvm_hyp_initialized, cpu)) 2372 continue; 2373 2374 free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT); 2375 2376 if (!kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]) 2377 continue; 2378 2379 if (free_sve) { 2380 struct cpu_sve_state *sve_state; 2381 2382 sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; 2383 free_pages((unsigned long) sve_state, pkvm_host_sve_state_order()); 2384 } 2385 2386 free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order()); 2387 2388 } 2389 } 2390 2391 static int __init do_pkvm_init(u32 hyp_va_bits) 2392 { 2393 void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)); 2394 int ret; 2395 2396 preempt_disable(); 2397 cpu_hyp_init_context(); 2398 ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size, 2399 num_possible_cpus(), kern_hyp_va(per_cpu_base), 2400 hyp_va_bits); 2401 cpu_hyp_init_features(); 2402 2403 /* 2404 * The stub hypercalls are now disabled, so set our local flag to 2405 * prevent a later re-init attempt in kvm_arch_enable_virtualization_cpu(). 2406 */ 2407 __this_cpu_write(kvm_hyp_initialized, 1); 2408 preempt_enable(); 2409 2410 return ret; 2411 } 2412 2413 static u64 get_hyp_id_aa64pfr0_el1(void) 2414 { 2415 /* 2416 * Track whether the system isn't affected by spectre/meltdown in the 2417 * hypervisor's view of id_aa64pfr0_el1, used for protected VMs. 2418 * Although this is per-CPU, we make it global for simplicity, e.g., not 2419 * to have to worry about vcpu migration. 2420 * 2421 * Unlike for non-protected VMs, userspace cannot override this for 2422 * protected VMs. 2423 */ 2424 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 2425 2426 val &= ~(ID_AA64PFR0_EL1_CSV2 | 2427 ID_AA64PFR0_EL1_CSV3); 2428 2429 val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV2, 2430 arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED); 2431 val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV3, 2432 arm64_get_meltdown_state() == SPECTRE_UNAFFECTED); 2433 2434 return val; 2435 } 2436 2437 static void kvm_hyp_init_symbols(void) 2438 { 2439 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1(); 2440 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); 2441 kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1); 2442 kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1); 2443 kvm_nvhe_sym(id_aa64isar2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); 2444 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 2445 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); 2446 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1); 2447 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1); 2448 kvm_nvhe_sym(__icache_flags) = __icache_flags; 2449 kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits; 2450 2451 /* Propagate the FGT state to the the nVHE side */ 2452 kvm_nvhe_sym(hfgrtr_masks) = hfgrtr_masks; 2453 kvm_nvhe_sym(hfgwtr_masks) = hfgwtr_masks; 2454 kvm_nvhe_sym(hfgitr_masks) = hfgitr_masks; 2455 kvm_nvhe_sym(hdfgrtr_masks) = hdfgrtr_masks; 2456 kvm_nvhe_sym(hdfgwtr_masks) = hdfgwtr_masks; 2457 kvm_nvhe_sym(hafgrtr_masks) = hafgrtr_masks; 2458 kvm_nvhe_sym(hfgrtr2_masks) = hfgrtr2_masks; 2459 kvm_nvhe_sym(hfgwtr2_masks) = hfgwtr2_masks; 2460 kvm_nvhe_sym(hfgitr2_masks) = hfgitr2_masks; 2461 kvm_nvhe_sym(hdfgrtr2_masks)= hdfgrtr2_masks; 2462 kvm_nvhe_sym(hdfgwtr2_masks)= hdfgwtr2_masks; 2463 2464 /* 2465 * Flush entire BSS since part of its data containing init symbols is read 2466 * while the MMU is off. 2467 */ 2468 kvm_flush_dcache_to_poc(kvm_ksym_ref(__hyp_bss_start), 2469 kvm_ksym_ref(__hyp_bss_end) - kvm_ksym_ref(__hyp_bss_start)); 2470 } 2471 2472 static int __init kvm_hyp_init_protection(u32 hyp_va_bits) 2473 { 2474 void *addr = phys_to_virt(hyp_mem_base); 2475 int ret; 2476 2477 ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP); 2478 if (ret) 2479 return ret; 2480 2481 ret = do_pkvm_init(hyp_va_bits); 2482 if (ret) 2483 return ret; 2484 2485 free_hyp_pgds(); 2486 2487 return 0; 2488 } 2489 2490 static int init_pkvm_host_sve_state(void) 2491 { 2492 int cpu; 2493 2494 if (!system_supports_sve()) 2495 return 0; 2496 2497 /* Allocate pages for host sve state in protected mode. */ 2498 for_each_possible_cpu(cpu) { 2499 struct page *page = alloc_pages(GFP_KERNEL, pkvm_host_sve_state_order()); 2500 2501 if (!page) 2502 return -ENOMEM; 2503 2504 per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page); 2505 } 2506 2507 /* 2508 * Don't map the pages in hyp since these are only used in protected 2509 * mode, which will (re)create its own mapping when initialized. 2510 */ 2511 2512 return 0; 2513 } 2514 2515 /* 2516 * Finalizes the initialization of hyp mode, once everything else is initialized 2517 * and the initialziation process cannot fail. 2518 */ 2519 static void finalize_init_hyp_mode(void) 2520 { 2521 int cpu; 2522 2523 if (system_supports_sve() && is_protected_kvm_enabled()) { 2524 for_each_possible_cpu(cpu) { 2525 struct cpu_sve_state *sve_state; 2526 2527 sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; 2528 per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = 2529 kern_hyp_va(sve_state); 2530 } 2531 } 2532 } 2533 2534 static void pkvm_hyp_init_ptrauth(void) 2535 { 2536 struct kvm_cpu_context *hyp_ctxt; 2537 int cpu; 2538 2539 for_each_possible_cpu(cpu) { 2540 hyp_ctxt = per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt, cpu); 2541 hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long(); 2542 hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long(); 2543 hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long(); 2544 hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long(); 2545 hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long(); 2546 hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long(); 2547 hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long(); 2548 hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long(); 2549 hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long(); 2550 hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long(); 2551 } 2552 } 2553 2554 /* Inits Hyp-mode on all online CPUs */ 2555 static int __init init_hyp_mode(void) 2556 { 2557 u32 hyp_va_bits; 2558 int cpu; 2559 int err = -ENOMEM; 2560 2561 /* 2562 * The protected Hyp-mode cannot be initialized if the memory pool 2563 * allocation has failed. 2564 */ 2565 if (is_protected_kvm_enabled() && !hyp_mem_base) 2566 goto out_err; 2567 2568 /* 2569 * Allocate Hyp PGD and setup Hyp identity mapping 2570 */ 2571 err = kvm_mmu_init(&hyp_va_bits); 2572 if (err) 2573 goto out_err; 2574 2575 /* 2576 * Allocate stack pages for Hypervisor-mode 2577 */ 2578 for_each_possible_cpu(cpu) { 2579 unsigned long stack_base; 2580 2581 stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT); 2582 if (!stack_base) { 2583 err = -ENOMEM; 2584 goto out_err; 2585 } 2586 2587 per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base; 2588 } 2589 2590 /* 2591 * Allocate and initialize pages for Hypervisor-mode percpu regions. 2592 */ 2593 for_each_possible_cpu(cpu) { 2594 struct page *page; 2595 void *page_addr; 2596 2597 page = alloc_pages(GFP_KERNEL, nvhe_percpu_order()); 2598 if (!page) { 2599 err = -ENOMEM; 2600 goto out_err; 2601 } 2602 2603 page_addr = page_address(page); 2604 memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size()); 2605 kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr; 2606 } 2607 2608 /* 2609 * Map the Hyp-code called directly from the host 2610 */ 2611 err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start), 2612 kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC); 2613 if (err) { 2614 kvm_err("Cannot map world-switch code\n"); 2615 goto out_err; 2616 } 2617 2618 err = create_hyp_mappings(kvm_ksym_ref(__hyp_data_start), 2619 kvm_ksym_ref(__hyp_data_end), PAGE_HYP); 2620 if (err) { 2621 kvm_err("Cannot map .hyp.data section\n"); 2622 goto out_err; 2623 } 2624 2625 err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start), 2626 kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO); 2627 if (err) { 2628 kvm_err("Cannot map .hyp.rodata section\n"); 2629 goto out_err; 2630 } 2631 2632 err = create_hyp_mappings(kvm_ksym_ref(__start_rodata), 2633 kvm_ksym_ref(__end_rodata), PAGE_HYP_RO); 2634 if (err) { 2635 kvm_err("Cannot map rodata section\n"); 2636 goto out_err; 2637 } 2638 2639 /* 2640 * .hyp.bss is guaranteed to be placed at the beginning of the .bss 2641 * section thanks to an assertion in the linker script. Map it RW and 2642 * the rest of .bss RO. 2643 */ 2644 err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_start), 2645 kvm_ksym_ref(__hyp_bss_end), PAGE_HYP); 2646 if (err) { 2647 kvm_err("Cannot map hyp bss section: %d\n", err); 2648 goto out_err; 2649 } 2650 2651 err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end), 2652 kvm_ksym_ref(__bss_stop), PAGE_HYP_RO); 2653 if (err) { 2654 kvm_err("Cannot map bss section\n"); 2655 goto out_err; 2656 } 2657 2658 /* 2659 * Map the Hyp stack pages 2660 */ 2661 for_each_possible_cpu(cpu) { 2662 struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); 2663 char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu); 2664 2665 err = create_hyp_stack(__pa(stack_base), ¶ms->stack_hyp_va); 2666 if (err) { 2667 kvm_err("Cannot map hyp stack\n"); 2668 goto out_err; 2669 } 2670 2671 /* 2672 * Save the stack PA in nvhe_init_params. This will be needed 2673 * to recreate the stack mapping in protected nVHE mode. 2674 * __hyp_pa() won't do the right thing there, since the stack 2675 * has been mapped in the flexible private VA space. 2676 */ 2677 params->stack_pa = __pa(stack_base); 2678 } 2679 2680 for_each_possible_cpu(cpu) { 2681 char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; 2682 char *percpu_end = percpu_begin + nvhe_percpu_size(); 2683 2684 /* Map Hyp percpu pages */ 2685 err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP); 2686 if (err) { 2687 kvm_err("Cannot map hyp percpu region\n"); 2688 goto out_err; 2689 } 2690 2691 /* Prepare the CPU initialization parameters */ 2692 cpu_prepare_hyp_mode(cpu, hyp_va_bits); 2693 } 2694 2695 kvm_hyp_init_symbols(); 2696 2697 if (is_protected_kvm_enabled()) { 2698 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) && 2699 cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH)) 2700 pkvm_hyp_init_ptrauth(); 2701 2702 init_cpu_logical_map(); 2703 2704 if (!init_psci_relay()) { 2705 err = -ENODEV; 2706 goto out_err; 2707 } 2708 2709 err = init_pkvm_host_sve_state(); 2710 if (err) 2711 goto out_err; 2712 2713 err = kvm_hyp_init_protection(hyp_va_bits); 2714 if (err) { 2715 kvm_err("Failed to init hyp memory protection\n"); 2716 goto out_err; 2717 } 2718 } 2719 2720 return 0; 2721 2722 out_err: 2723 teardown_hyp_mode(); 2724 kvm_err("error initializing Hyp mode: %d\n", err); 2725 return err; 2726 } 2727 2728 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) 2729 { 2730 struct kvm_vcpu *vcpu = NULL; 2731 struct kvm_mpidr_data *data; 2732 unsigned long i; 2733 2734 mpidr &= MPIDR_HWID_BITMASK; 2735 2736 rcu_read_lock(); 2737 data = rcu_dereference(kvm->arch.mpidr_data); 2738 2739 if (data) { 2740 u16 idx = kvm_mpidr_index(data, mpidr); 2741 2742 vcpu = kvm_get_vcpu(kvm, data->cmpidr_to_idx[idx]); 2743 if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu)) 2744 vcpu = NULL; 2745 } 2746 2747 rcu_read_unlock(); 2748 2749 if (vcpu) 2750 return vcpu; 2751 2752 kvm_for_each_vcpu(i, vcpu, kvm) { 2753 if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu)) 2754 return vcpu; 2755 } 2756 return NULL; 2757 } 2758 2759 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) 2760 { 2761 return irqchip_in_kernel(kvm); 2762 } 2763 2764 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 2765 struct irq_bypass_producer *prod) 2766 { 2767 struct kvm_kernel_irqfd *irqfd = 2768 container_of(cons, struct kvm_kernel_irqfd, consumer); 2769 struct kvm_kernel_irq_routing_entry *irq_entry = &irqfd->irq_entry; 2770 2771 /* 2772 * The only thing we have a chance of directly-injecting is LPIs. Maybe 2773 * one day... 2774 */ 2775 if (irq_entry->type != KVM_IRQ_ROUTING_MSI) 2776 return 0; 2777 2778 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq, 2779 &irqfd->irq_entry); 2780 } 2781 2782 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 2783 struct irq_bypass_producer *prod) 2784 { 2785 struct kvm_kernel_irqfd *irqfd = 2786 container_of(cons, struct kvm_kernel_irqfd, consumer); 2787 struct kvm_kernel_irq_routing_entry *irq_entry = &irqfd->irq_entry; 2788 2789 if (irq_entry->type != KVM_IRQ_ROUTING_MSI) 2790 return; 2791 2792 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq); 2793 } 2794 2795 void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, 2796 struct kvm_kernel_irq_routing_entry *old, 2797 struct kvm_kernel_irq_routing_entry *new) 2798 { 2799 if (old->type == KVM_IRQ_ROUTING_MSI && 2800 new->type == KVM_IRQ_ROUTING_MSI && 2801 !memcmp(&old->msi, &new->msi, sizeof(new->msi))) 2802 return; 2803 2804 /* 2805 * Remapping the vLPI requires taking the its_lock mutex to resolve 2806 * the new translation. We're in spinlock land at this point, so no 2807 * chance of resolving the translation. 2808 * 2809 * Unmap the vLPI and fall back to software LPI injection. 2810 */ 2811 return kvm_vgic_v4_unset_forwarding(irqfd->kvm, irqfd->producer->irq); 2812 } 2813 2814 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons) 2815 { 2816 struct kvm_kernel_irqfd *irqfd = 2817 container_of(cons, struct kvm_kernel_irqfd, consumer); 2818 2819 kvm_arm_halt_guest(irqfd->kvm); 2820 } 2821 2822 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons) 2823 { 2824 struct kvm_kernel_irqfd *irqfd = 2825 container_of(cons, struct kvm_kernel_irqfd, consumer); 2826 2827 kvm_arm_resume_guest(irqfd->kvm); 2828 } 2829 2830 /* Initialize Hyp-mode and memory mappings on all CPUs */ 2831 static __init int kvm_arm_init(void) 2832 { 2833 int err; 2834 bool in_hyp_mode; 2835 2836 if (!is_hyp_mode_available()) { 2837 kvm_info("HYP mode not available\n"); 2838 return -ENODEV; 2839 } 2840 2841 if (kvm_get_mode() == KVM_MODE_NONE) { 2842 kvm_info("KVM disabled from command line\n"); 2843 return -ENODEV; 2844 } 2845 2846 err = kvm_sys_reg_table_init(); 2847 if (err) { 2848 kvm_info("Error initializing system register tables"); 2849 return err; 2850 } 2851 2852 in_hyp_mode = is_kernel_in_hyp_mode(); 2853 2854 if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) || 2855 cpus_have_final_cap(ARM64_WORKAROUND_1508412)) 2856 kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ 2857 "Only trusted guests should be used on this system.\n"); 2858 2859 err = kvm_set_ipa_limit(); 2860 if (err) 2861 return err; 2862 2863 err = kvm_arm_init_sve(); 2864 if (err) 2865 return err; 2866 2867 err = kvm_arm_vmid_alloc_init(); 2868 if (err) { 2869 kvm_err("Failed to initialize VMID allocator.\n"); 2870 return err; 2871 } 2872 2873 if (!in_hyp_mode) { 2874 err = init_hyp_mode(); 2875 if (err) 2876 goto out_err; 2877 } 2878 2879 err = kvm_init_vector_slots(); 2880 if (err) { 2881 kvm_err("Cannot initialise vector slots\n"); 2882 goto out_hyp; 2883 } 2884 2885 err = init_subsystems(); 2886 if (err) 2887 goto out_hyp; 2888 2889 kvm_info("%s%sVHE%s mode initialized successfully\n", 2890 in_hyp_mode ? "" : (is_protected_kvm_enabled() ? 2891 "Protected " : "Hyp "), 2892 in_hyp_mode ? "" : (cpus_have_final_cap(ARM64_KVM_HVHE) ? 2893 "h" : "n"), 2894 cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) ? "+NV2": ""); 2895 2896 /* 2897 * FIXME: Do something reasonable if kvm_init() fails after pKVM 2898 * hypervisor protection is finalized. 2899 */ 2900 err = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); 2901 if (err) 2902 goto out_subs; 2903 2904 /* 2905 * This should be called after initialization is done and failure isn't 2906 * possible anymore. 2907 */ 2908 if (!in_hyp_mode) 2909 finalize_init_hyp_mode(); 2910 2911 kvm_arm_initialised = true; 2912 2913 return 0; 2914 2915 out_subs: 2916 teardown_subsystems(); 2917 out_hyp: 2918 if (!in_hyp_mode) 2919 teardown_hyp_mode(); 2920 out_err: 2921 kvm_arm_vmid_alloc_free(); 2922 return err; 2923 } 2924 2925 static int __init early_kvm_mode_cfg(char *arg) 2926 { 2927 if (!arg) 2928 return -EINVAL; 2929 2930 if (strcmp(arg, "none") == 0) { 2931 kvm_mode = KVM_MODE_NONE; 2932 return 0; 2933 } 2934 2935 if (!is_hyp_mode_available()) { 2936 pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n"); 2937 return 0; 2938 } 2939 2940 if (strcmp(arg, "protected") == 0) { 2941 if (!is_kernel_in_hyp_mode()) 2942 kvm_mode = KVM_MODE_PROTECTED; 2943 else 2944 pr_warn_once("Protected KVM not available with VHE\n"); 2945 2946 return 0; 2947 } 2948 2949 if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) { 2950 kvm_mode = KVM_MODE_DEFAULT; 2951 return 0; 2952 } 2953 2954 if (strcmp(arg, "nested") == 0 && !WARN_ON(!is_kernel_in_hyp_mode())) { 2955 kvm_mode = KVM_MODE_NV; 2956 return 0; 2957 } 2958 2959 return -EINVAL; 2960 } 2961 early_param("kvm-arm.mode", early_kvm_mode_cfg); 2962 2963 static int __init early_kvm_wfx_trap_policy_cfg(char *arg, enum kvm_wfx_trap_policy *p) 2964 { 2965 if (!arg) 2966 return -EINVAL; 2967 2968 if (strcmp(arg, "trap") == 0) { 2969 *p = KVM_WFX_TRAP; 2970 return 0; 2971 } 2972 2973 if (strcmp(arg, "notrap") == 0) { 2974 *p = KVM_WFX_NOTRAP; 2975 return 0; 2976 } 2977 2978 return -EINVAL; 2979 } 2980 2981 static int __init early_kvm_wfi_trap_policy_cfg(char *arg) 2982 { 2983 return early_kvm_wfx_trap_policy_cfg(arg, &kvm_wfi_trap_policy); 2984 } 2985 early_param("kvm-arm.wfi_trap_policy", early_kvm_wfi_trap_policy_cfg); 2986 2987 static int __init early_kvm_wfe_trap_policy_cfg(char *arg) 2988 { 2989 return early_kvm_wfx_trap_policy_cfg(arg, &kvm_wfe_trap_policy); 2990 } 2991 early_param("kvm-arm.wfe_trap_policy", early_kvm_wfe_trap_policy_cfg); 2992 2993 enum kvm_mode kvm_get_mode(void) 2994 { 2995 return kvm_mode; 2996 } 2997 2998 module_init(kvm_arm_init); 2999