19ed24f4bSMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only 29ed24f4bSMarc Zyngier /* 39ed24f4bSMarc Zyngier * Copyright (C) 2012 - Virtual Open Systems and Columbia University 49ed24f4bSMarc Zyngier * Author: Christoffer Dall <c.dall@virtualopensystems.com> 59ed24f4bSMarc Zyngier */ 69ed24f4bSMarc Zyngier 79ed24f4bSMarc Zyngier #include <linux/bug.h> 89ed24f4bSMarc Zyngier #include <linux/cpu_pm.h> 96caa5812SOliver Upton #include <linux/entry-kvm.h> 109ed24f4bSMarc Zyngier #include <linux/errno.h> 119ed24f4bSMarc Zyngier #include <linux/err.h> 129ed24f4bSMarc Zyngier #include <linux/kvm_host.h> 139ed24f4bSMarc Zyngier #include <linux/list.h> 149ed24f4bSMarc Zyngier #include <linux/module.h> 159ed24f4bSMarc Zyngier #include <linux/vmalloc.h> 169ed24f4bSMarc Zyngier #include <linux/fs.h> 179ed24f4bSMarc Zyngier #include <linux/mman.h> 189ed24f4bSMarc Zyngier #include <linux/sched.h> 199ed24f4bSMarc Zyngier #include <linux/kvm.h> 209ed24f4bSMarc Zyngier #include <linux/kvm_irqfd.h> 219ed24f4bSMarc Zyngier #include <linux/irqbypass.h> 229ed24f4bSMarc Zyngier #include <linux/sched/stat.h> 23eeeee719SDavid Brazdil #include <linux/psci.h> 249ed24f4bSMarc Zyngier #include <trace/events/kvm.h> 259ed24f4bSMarc Zyngier 269ed24f4bSMarc Zyngier #define CREATE_TRACE_POINTS 279ed24f4bSMarc Zyngier #include "trace_arm.h" 289ed24f4bSMarc Zyngier 299ed24f4bSMarc Zyngier #include <linux/uaccess.h> 309ed24f4bSMarc Zyngier #include <asm/ptrace.h> 319ed24f4bSMarc Zyngier #include <asm/mman.h> 329ed24f4bSMarc Zyngier #include <asm/tlbflush.h> 339ed24f4bSMarc Zyngier #include <asm/cacheflush.h> 349ed24f4bSMarc Zyngier #include <asm/cpufeature.h> 359ed24f4bSMarc Zyngier #include <asm/virt.h> 369ed24f4bSMarc Zyngier #include <asm/kvm_arm.h> 379ed24f4bSMarc Zyngier #include <asm/kvm_asm.h> 389ed24f4bSMarc Zyngier #include <asm/kvm_mmu.h> 399d0c063aSFuad Tabba #include <asm/kvm_pkvm.h> 409ed24f4bSMarc Zyngier #include <asm/kvm_emulate.h> 419ed24f4bSMarc Zyngier #include <asm/sections.h> 429ed24f4bSMarc Zyngier 439ed24f4bSMarc Zyngier #include <kvm/arm_hypercalls.h> 449ed24f4bSMarc Zyngier #include <kvm/arm_pmu.h> 459ed24f4bSMarc Zyngier #include <kvm/arm_psci.h> 469ed24f4bSMarc Zyngier 47d8b369c4SDavid Brazdil static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT; 48d8b369c4SDavid Brazdil 4914ef9d04SMarc Zyngier DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); 5014ef9d04SMarc Zyngier 51db129d48SKalesh Singh DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); 5263fec243SDavid Brazdil DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); 539ed24f4bSMarc Zyngier 548c15c2a0SMostafa Saleh DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 558c15c2a0SMostafa Saleh 569ed24f4bSMarc Zyngier static bool vgic_present; 579ed24f4bSMarc Zyngier 589ed24f4bSMarc Zyngier static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); 599ed24f4bSMarc Zyngier DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 609ed24f4bSMarc Zyngier 619ed24f4bSMarc Zyngier int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 629ed24f4bSMarc Zyngier { 639ed24f4bSMarc Zyngier return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 649ed24f4bSMarc Zyngier } 659ed24f4bSMarc Zyngier 669ed24f4bSMarc Zyngier int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 679ed24f4bSMarc Zyngier struct kvm_enable_cap *cap) 689ed24f4bSMarc Zyngier { 699ed24f4bSMarc Zyngier int r; 702f440b72SRicardo Koller u64 new_cap; 719ed24f4bSMarc Zyngier 729ed24f4bSMarc Zyngier if (cap->flags) 739ed24f4bSMarc Zyngier return -EINVAL; 749ed24f4bSMarc Zyngier 759ed24f4bSMarc Zyngier switch (cap->cap) { 769ed24f4bSMarc Zyngier case KVM_CAP_ARM_NISV_TO_USER: 779ed24f4bSMarc Zyngier r = 0; 7806394531SMarc Zyngier set_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER, 7906394531SMarc Zyngier &kvm->arch.flags); 809ed24f4bSMarc Zyngier break; 81673638f4SSteven Price case KVM_CAP_ARM_MTE: 82c4d7c518SSteven Price mutex_lock(&kvm->lock); 83c4d7c518SSteven Price if (!system_supports_mte() || kvm->created_vcpus) { 84c4d7c518SSteven Price r = -EINVAL; 85c4d7c518SSteven Price } else { 86673638f4SSteven Price r = 0; 8706394531SMarc Zyngier set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags); 88c4d7c518SSteven Price } 89c4d7c518SSteven Price mutex_unlock(&kvm->lock); 90673638f4SSteven Price break; 91bfbab445SOliver Upton case KVM_CAP_ARM_SYSTEM_SUSPEND: 92bfbab445SOliver Upton r = 0; 93bfbab445SOliver Upton set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags); 94bfbab445SOliver Upton break; 952f440b72SRicardo Koller case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE: 962f440b72SRicardo Koller new_cap = cap->args[0]; 972f440b72SRicardo Koller 982f440b72SRicardo Koller mutex_lock(&kvm->slots_lock); 992f440b72SRicardo Koller /* 1002f440b72SRicardo Koller * To keep things simple, allow changing the chunk 1012f440b72SRicardo Koller * size only when no memory slots have been created. 1022f440b72SRicardo Koller */ 1032f440b72SRicardo Koller if (!kvm_are_all_memslots_empty(kvm)) { 1042f440b72SRicardo Koller r = -EINVAL; 1052f440b72SRicardo Koller } else if (new_cap && !kvm_is_block_size_supported(new_cap)) { 1062f440b72SRicardo Koller r = -EINVAL; 1072f440b72SRicardo Koller } else { 1082f440b72SRicardo Koller r = 0; 1092f440b72SRicardo Koller kvm->arch.mmu.split_page_chunk_size = new_cap; 1102f440b72SRicardo Koller } 1112f440b72SRicardo Koller mutex_unlock(&kvm->slots_lock); 1122f440b72SRicardo Koller break; 1139ed24f4bSMarc Zyngier default: 1149ed24f4bSMarc Zyngier r = -EINVAL; 1159ed24f4bSMarc Zyngier break; 1169ed24f4bSMarc Zyngier } 1179ed24f4bSMarc Zyngier 1189ed24f4bSMarc Zyngier return r; 1199ed24f4bSMarc Zyngier } 1209ed24f4bSMarc Zyngier 1215107000fSMarc Zyngier static int kvm_arm_default_max_vcpus(void) 1225107000fSMarc Zyngier { 1235107000fSMarc Zyngier return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS; 1245107000fSMarc Zyngier } 1255107000fSMarc Zyngier 1269ed24f4bSMarc Zyngier /** 1279ed24f4bSMarc Zyngier * kvm_arch_init_vm - initializes a VM data structure 1289ed24f4bSMarc Zyngier * @kvm: pointer to the KVM struct 1299ed24f4bSMarc Zyngier */ 1309ed24f4bSMarc Zyngier int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 1319ed24f4bSMarc Zyngier { 132a0e50aa3SChristoffer Dall int ret; 1339ed24f4bSMarc Zyngier 134c43120afSOliver Upton mutex_init(&kvm->arch.config_lock); 135c43120afSOliver Upton 136c43120afSOliver Upton #ifdef CONFIG_LOCKDEP 137c43120afSOliver Upton /* Clue in lockdep that the config_lock must be taken inside kvm->lock */ 138c43120afSOliver Upton mutex_lock(&kvm->lock); 139c43120afSOliver Upton mutex_lock(&kvm->arch.config_lock); 140c43120afSOliver Upton mutex_unlock(&kvm->arch.config_lock); 141c43120afSOliver Upton mutex_unlock(&kvm->lock); 142c43120afSOliver Upton #endif 143c43120afSOliver Upton 1443f868e14SQuentin Perret ret = kvm_share_hyp(kvm, kvm + 1); 1459ed24f4bSMarc Zyngier if (ret) 146315775ffSQuentin Perret return ret; 1479ed24f4bSMarc Zyngier 1489d0c063aSFuad Tabba ret = pkvm_init_host_vm(kvm); 1499d0c063aSFuad Tabba if (ret) 150315775ffSQuentin Perret goto err_unshare_kvm; 1519ed24f4bSMarc Zyngier 1525f623a59SOliver Upton if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL_ACCOUNT)) { 153ae187fecSWill Deacon ret = -ENOMEM; 154315775ffSQuentin Perret goto err_unshare_kvm; 155ae187fecSWill Deacon } 156583cda1bSAlexandru Elisei cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask); 157583cda1bSAlexandru Elisei 158315775ffSQuentin Perret ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type); 159315775ffSQuentin Perret if (ret) 160315775ffSQuentin Perret goto err_free_cpumask; 161315775ffSQuentin Perret 1629ed24f4bSMarc Zyngier kvm_vgic_early_init(kvm); 1639ed24f4bSMarc Zyngier 1648a5eb2d2SMarc Zyngier kvm_timer_init_vm(kvm); 1658a5eb2d2SMarc Zyngier 1669ed24f4bSMarc Zyngier /* The maximum number of VCPUs is limited by the host's GIC model */ 167f502cc56SSean Christopherson kvm->max_vcpus = kvm_arm_default_max_vcpus(); 1689ed24f4bSMarc Zyngier 16905714cabSRaghavendra Rao Ananta kvm_arm_init_hypercalls(kvm); 17023711a5eSMarc Zyngier 1712251e9ffSOliver Upton bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES); 1723d0dba57SMarc Zyngier 173315775ffSQuentin Perret return 0; 174315775ffSQuentin Perret 175315775ffSQuentin Perret err_free_cpumask: 176315775ffSQuentin Perret free_cpumask_var(kvm->arch.supported_cpus); 177315775ffSQuentin Perret err_unshare_kvm: 178315775ffSQuentin Perret kvm_unshare_hyp(kvm, kvm + 1); 1799ed24f4bSMarc Zyngier return ret; 1809ed24f4bSMarc Zyngier } 1819ed24f4bSMarc Zyngier 1829ed24f4bSMarc Zyngier vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 1839ed24f4bSMarc Zyngier { 1849ed24f4bSMarc Zyngier return VM_FAULT_SIGBUS; 1859ed24f4bSMarc Zyngier } 1869ed24f4bSMarc Zyngier 1879ed24f4bSMarc Zyngier 1889ed24f4bSMarc Zyngier /** 1899ed24f4bSMarc Zyngier * kvm_arch_destroy_vm - destroy the VM data structure 1909ed24f4bSMarc Zyngier * @kvm: pointer to the KVM struct 1919ed24f4bSMarc Zyngier */ 1929ed24f4bSMarc Zyngier void kvm_arch_destroy_vm(struct kvm *kvm) 1939ed24f4bSMarc Zyngier { 194d7eec236SMarc Zyngier bitmap_free(kvm->arch.pmu_filter); 195583cda1bSAlexandru Elisei free_cpumask_var(kvm->arch.supported_cpus); 196d7eec236SMarc Zyngier 1979ed24f4bSMarc Zyngier kvm_vgic_destroy(kvm); 1989ed24f4bSMarc Zyngier 1999d0c063aSFuad Tabba if (is_protected_kvm_enabled()) 2009d0c063aSFuad Tabba pkvm_destroy_hyp_vm(kvm); 2019d0c063aSFuad Tabba 20227592ae8SMarc Zyngier kvm_destroy_vcpus(kvm); 20352b28657SQuentin Perret 20452b28657SQuentin Perret kvm_unshare_hyp(kvm, kvm + 1); 205fb88707dSOliver Upton 206fb88707dSOliver Upton kvm_arm_teardown_hypercalls(kvm); 2079ed24f4bSMarc Zyngier } 2089ed24f4bSMarc Zyngier 2099ed24f4bSMarc Zyngier int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 2109ed24f4bSMarc Zyngier { 2119ed24f4bSMarc Zyngier int r; 2129ed24f4bSMarc Zyngier switch (ext) { 2139ed24f4bSMarc Zyngier case KVM_CAP_IRQCHIP: 2149ed24f4bSMarc Zyngier r = vgic_present; 2159ed24f4bSMarc Zyngier break; 2169ed24f4bSMarc Zyngier case KVM_CAP_IOEVENTFD: 2179ed24f4bSMarc Zyngier case KVM_CAP_DEVICE_CTRL: 2189ed24f4bSMarc Zyngier case KVM_CAP_USER_MEMORY: 2199ed24f4bSMarc Zyngier case KVM_CAP_SYNC_MMU: 2209ed24f4bSMarc Zyngier case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 2219ed24f4bSMarc Zyngier case KVM_CAP_ONE_REG: 2229ed24f4bSMarc Zyngier case KVM_CAP_ARM_PSCI: 2239ed24f4bSMarc Zyngier case KVM_CAP_ARM_PSCI_0_2: 2249ed24f4bSMarc Zyngier case KVM_CAP_READONLY_MEM: 2259ed24f4bSMarc Zyngier case KVM_CAP_MP_STATE: 2269ed24f4bSMarc Zyngier case KVM_CAP_IMMEDIATE_EXIT: 2279ed24f4bSMarc Zyngier case KVM_CAP_VCPU_EVENTS: 2289ed24f4bSMarc Zyngier case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2: 2299ed24f4bSMarc Zyngier case KVM_CAP_ARM_NISV_TO_USER: 2309ed24f4bSMarc Zyngier case KVM_CAP_ARM_INJECT_EXT_DABT: 23136fb4cd5SWill Deacon case KVM_CAP_SET_GUEST_DEBUG: 23236fb4cd5SWill Deacon case KVM_CAP_VCPU_ATTRIBUTES: 2333bf72569SJianyong Wu case KVM_CAP_PTP_KVM: 234bfbab445SOliver Upton case KVM_CAP_ARM_SYSTEM_SUSPEND: 23552882b9cSAlexey Kardashevskiy case KVM_CAP_IRQFD_RESAMPLE: 23630ec7997SMarc Zyngier case KVM_CAP_COUNTER_OFFSET: 2379ed24f4bSMarc Zyngier r = 1; 2389ed24f4bSMarc Zyngier break; 239fa18aca9SMaxim Levitsky case KVM_CAP_SET_GUEST_DEBUG2: 240fa18aca9SMaxim Levitsky return KVM_GUESTDBG_VALID_MASK; 2419ed24f4bSMarc Zyngier case KVM_CAP_ARM_SET_DEVICE_ADDR: 2429ed24f4bSMarc Zyngier r = 1; 2439ed24f4bSMarc Zyngier break; 2449ed24f4bSMarc Zyngier case KVM_CAP_NR_VCPUS: 245f60a00d7SVitaly Kuznetsov /* 246f60a00d7SVitaly Kuznetsov * ARM64 treats KVM_CAP_NR_CPUS differently from all other 247f60a00d7SVitaly Kuznetsov * architectures, as it does not always bound it to 248f60a00d7SVitaly Kuznetsov * KVM_CAP_MAX_VCPUS. It should not matter much because 249f60a00d7SVitaly Kuznetsov * this is just an advisory value. 250f60a00d7SVitaly Kuznetsov */ 251f60a00d7SVitaly Kuznetsov r = min_t(unsigned int, num_online_cpus(), 252f60a00d7SVitaly Kuznetsov kvm_arm_default_max_vcpus()); 2539ed24f4bSMarc Zyngier break; 2549ed24f4bSMarc Zyngier case KVM_CAP_MAX_VCPUS: 2559ed24f4bSMarc Zyngier case KVM_CAP_MAX_VCPU_ID: 2565107000fSMarc Zyngier if (kvm) 257f502cc56SSean Christopherson r = kvm->max_vcpus; 2585107000fSMarc Zyngier else 2595107000fSMarc Zyngier r = kvm_arm_default_max_vcpus(); 2609ed24f4bSMarc Zyngier break; 2619ed24f4bSMarc Zyngier case KVM_CAP_MSI_DEVID: 2629ed24f4bSMarc Zyngier if (!kvm) 2639ed24f4bSMarc Zyngier r = -EINVAL; 2649ed24f4bSMarc Zyngier else 2659ed24f4bSMarc Zyngier r = kvm->arch.vgic.msis_require_devid; 2669ed24f4bSMarc Zyngier break; 2679ed24f4bSMarc Zyngier case KVM_CAP_ARM_USER_IRQ: 2689ed24f4bSMarc Zyngier /* 2699ed24f4bSMarc Zyngier * 1: EL1_VTIMER, EL1_PTIMER, and PMU. 2709ed24f4bSMarc Zyngier * (bump this number if adding more devices) 2719ed24f4bSMarc Zyngier */ 2729ed24f4bSMarc Zyngier r = 1; 2739ed24f4bSMarc Zyngier break; 274673638f4SSteven Price case KVM_CAP_ARM_MTE: 275673638f4SSteven Price r = system_supports_mte(); 276673638f4SSteven Price break; 277004a0124SAndrew Jones case KVM_CAP_STEAL_TIME: 278004a0124SAndrew Jones r = kvm_arm_pvtime_supported(); 279004a0124SAndrew Jones break; 28036fb4cd5SWill Deacon case KVM_CAP_ARM_EL1_32BIT: 28136fb4cd5SWill Deacon r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1); 2829ed24f4bSMarc Zyngier break; 28336fb4cd5SWill Deacon case KVM_CAP_GUEST_DEBUG_HW_BPS: 28436fb4cd5SWill Deacon r = get_num_brps(); 28536fb4cd5SWill Deacon break; 28636fb4cd5SWill Deacon case KVM_CAP_GUEST_DEBUG_HW_WPS: 28736fb4cd5SWill Deacon r = get_num_wrps(); 28836fb4cd5SWill Deacon break; 28936fb4cd5SWill Deacon case KVM_CAP_ARM_PMU_V3: 29036fb4cd5SWill Deacon r = kvm_arm_support_pmu_v3(); 29136fb4cd5SWill Deacon break; 29236fb4cd5SWill Deacon case KVM_CAP_ARM_INJECT_SERROR_ESR: 29336fb4cd5SWill Deacon r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); 29436fb4cd5SWill Deacon break; 29536fb4cd5SWill Deacon case KVM_CAP_ARM_VM_IPA_SIZE: 29636fb4cd5SWill Deacon r = get_kvm_ipa_limit(); 29736fb4cd5SWill Deacon break; 29836fb4cd5SWill Deacon case KVM_CAP_ARM_SVE: 29936fb4cd5SWill Deacon r = system_supports_sve(); 30036fb4cd5SWill Deacon break; 30136fb4cd5SWill Deacon case KVM_CAP_ARM_PTRAUTH_ADDRESS: 30236fb4cd5SWill Deacon case KVM_CAP_ARM_PTRAUTH_GENERIC: 30336fb4cd5SWill Deacon r = system_has_full_ptr_auth(); 30436fb4cd5SWill Deacon break; 3052f440b72SRicardo Koller case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE: 3062f440b72SRicardo Koller if (kvm) 3072f440b72SRicardo Koller r = kvm->arch.mmu.split_page_chunk_size; 3082f440b72SRicardo Koller else 3092f440b72SRicardo Koller r = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT; 3102f440b72SRicardo Koller break; 3112f440b72SRicardo Koller case KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES: 3122f440b72SRicardo Koller r = kvm_supported_block_sizes(); 3132f440b72SRicardo Koller break; 31436fb4cd5SWill Deacon default: 31536fb4cd5SWill Deacon r = 0; 3169ed24f4bSMarc Zyngier } 31736fb4cd5SWill Deacon 3189ed24f4bSMarc Zyngier return r; 3199ed24f4bSMarc Zyngier } 3209ed24f4bSMarc Zyngier 3219ed24f4bSMarc Zyngier long kvm_arch_dev_ioctl(struct file *filp, 3229ed24f4bSMarc Zyngier unsigned int ioctl, unsigned long arg) 3239ed24f4bSMarc Zyngier { 3249ed24f4bSMarc Zyngier return -EINVAL; 3259ed24f4bSMarc Zyngier } 3269ed24f4bSMarc Zyngier 3279ed24f4bSMarc Zyngier struct kvm *kvm_arch_alloc_vm(void) 3289ed24f4bSMarc Zyngier { 329115bae92SJia He size_t sz = sizeof(struct kvm); 3309ed24f4bSMarc Zyngier 331115bae92SJia He if (!has_vhe()) 332115bae92SJia He return kzalloc(sz, GFP_KERNEL_ACCOUNT); 333115bae92SJia He 334115bae92SJia He return __vmalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM | __GFP_ZERO); 3359ed24f4bSMarc Zyngier } 3369ed24f4bSMarc Zyngier 3379ed24f4bSMarc Zyngier int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) 3389ed24f4bSMarc Zyngier { 3399ed24f4bSMarc Zyngier if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) 3409ed24f4bSMarc Zyngier return -EBUSY; 3419ed24f4bSMarc Zyngier 342f502cc56SSean Christopherson if (id >= kvm->max_vcpus) 3439ed24f4bSMarc Zyngier return -EINVAL; 3449ed24f4bSMarc Zyngier 3459ed24f4bSMarc Zyngier return 0; 3469ed24f4bSMarc Zyngier } 3479ed24f4bSMarc Zyngier 3489ed24f4bSMarc Zyngier int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) 3499ed24f4bSMarc Zyngier { 3509ed24f4bSMarc Zyngier int err; 3519ed24f4bSMarc Zyngier 3520acc7239SOliver Upton spin_lock_init(&vcpu->arch.mp_state_lock); 3530acc7239SOliver Upton 354c43120afSOliver Upton #ifdef CONFIG_LOCKDEP 355c43120afSOliver Upton /* Inform lockdep that the config_lock is acquired after vcpu->mutex */ 356c43120afSOliver Upton mutex_lock(&vcpu->mutex); 357c43120afSOliver Upton mutex_lock(&vcpu->kvm->arch.config_lock); 358c43120afSOliver Upton mutex_unlock(&vcpu->kvm->arch.config_lock); 359c43120afSOliver Upton mutex_unlock(&vcpu->mutex); 360c43120afSOliver Upton #endif 361c43120afSOliver Upton 3629ed24f4bSMarc Zyngier /* Force users to call KVM_ARM_VCPU_INIT */ 3639ed24f4bSMarc Zyngier vcpu->arch.target = -1; 3649ed24f4bSMarc Zyngier bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); 3659ed24f4bSMarc Zyngier 366e539451bSSean Christopherson vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; 367e539451bSSean Christopherson 368b4da9187SMarc Zyngier /* 369b4da9187SMarc Zyngier * Default value for the FP state, will be overloaded at load 370b4da9187SMarc Zyngier * time if we support FP (pretty likely) 371b4da9187SMarc Zyngier */ 372b4da9187SMarc Zyngier vcpu->arch.fp_state = FP_STATE_FREE; 373b4da9187SMarc Zyngier 3749ed24f4bSMarc Zyngier /* Set up the timer */ 3759ed24f4bSMarc Zyngier kvm_timer_vcpu_init(vcpu); 3769ed24f4bSMarc Zyngier 3779ed24f4bSMarc Zyngier kvm_pmu_vcpu_init(vcpu); 3789ed24f4bSMarc Zyngier 3799ed24f4bSMarc Zyngier kvm_arm_reset_debug_ptr(vcpu); 3809ed24f4bSMarc Zyngier 3819ed24f4bSMarc Zyngier kvm_arm_pvtime_vcpu_init(&vcpu->arch); 3829ed24f4bSMarc Zyngier 383a0e50aa3SChristoffer Dall vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; 384a0e50aa3SChristoffer Dall 3859ed24f4bSMarc Zyngier err = kvm_vgic_vcpu_init(vcpu); 3869ed24f4bSMarc Zyngier if (err) 3879ed24f4bSMarc Zyngier return err; 3889ed24f4bSMarc Zyngier 3893f868e14SQuentin Perret return kvm_share_hyp(vcpu, vcpu + 1); 3909ed24f4bSMarc Zyngier } 3919ed24f4bSMarc Zyngier 3929ed24f4bSMarc Zyngier void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 3939ed24f4bSMarc Zyngier { 3949ed24f4bSMarc Zyngier } 3959ed24f4bSMarc Zyngier 3969ed24f4bSMarc Zyngier void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 3979ed24f4bSMarc Zyngier { 398cc5705fbSMarc Zyngier if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm))) 3999ed24f4bSMarc Zyngier static_branch_dec(&userspace_irqchip_in_use); 4009ed24f4bSMarc Zyngier 4019af3e08bSWill Deacon kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); 4029ed24f4bSMarc Zyngier kvm_timer_vcpu_terminate(vcpu); 4039ed24f4bSMarc Zyngier kvm_pmu_vcpu_destroy(vcpu); 4049ed24f4bSMarc Zyngier 4059ed24f4bSMarc Zyngier kvm_arm_vcpu_destroy(vcpu); 4069ed24f4bSMarc Zyngier } 4079ed24f4bSMarc Zyngier 4089ed24f4bSMarc Zyngier void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) 4099ed24f4bSMarc Zyngier { 4106109c5a6SSean Christopherson 4119ed24f4bSMarc Zyngier } 4129ed24f4bSMarc Zyngier 4139ed24f4bSMarc Zyngier void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) 4149ed24f4bSMarc Zyngier { 4156109c5a6SSean Christopherson 4169ed24f4bSMarc Zyngier } 4179ed24f4bSMarc Zyngier 4189ed24f4bSMarc Zyngier void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 4199ed24f4bSMarc Zyngier { 420a0e50aa3SChristoffer Dall struct kvm_s2_mmu *mmu; 4219ed24f4bSMarc Zyngier int *last_ran; 4229ed24f4bSMarc Zyngier 423a0e50aa3SChristoffer Dall mmu = vcpu->arch.hw_mmu; 424a0e50aa3SChristoffer Dall last_ran = this_cpu_ptr(mmu->last_vcpu_ran); 4259ed24f4bSMarc Zyngier 4269ed24f4bSMarc Zyngier /* 42701dc9262SMarc Zyngier * We guarantee that both TLBs and I-cache are private to each 42801dc9262SMarc Zyngier * vcpu. If detecting that a vcpu from the same VM has 42901dc9262SMarc Zyngier * previously run on the same physical CPU, call into the 43001dc9262SMarc Zyngier * hypervisor code to nuke the relevant contexts. 43101dc9262SMarc Zyngier * 4329ed24f4bSMarc Zyngier * We might get preempted before the vCPU actually runs, but 4339ed24f4bSMarc Zyngier * over-invalidation doesn't affect correctness. 4349ed24f4bSMarc Zyngier */ 4359ed24f4bSMarc Zyngier if (*last_ran != vcpu->vcpu_id) { 43601dc9262SMarc Zyngier kvm_call_hyp(__kvm_flush_cpu_context, mmu); 4379ed24f4bSMarc Zyngier *last_ran = vcpu->vcpu_id; 4389ed24f4bSMarc Zyngier } 4399ed24f4bSMarc Zyngier 4409ed24f4bSMarc Zyngier vcpu->cpu = cpu; 4419ed24f4bSMarc Zyngier 4429ed24f4bSMarc Zyngier kvm_vgic_load(vcpu); 4439ed24f4bSMarc Zyngier kvm_timer_vcpu_load(vcpu); 44413aeb9b4SDavid Brazdil if (has_vhe()) 44513aeb9b4SDavid Brazdil kvm_vcpu_load_sysregs_vhe(vcpu); 4469ed24f4bSMarc Zyngier kvm_arch_vcpu_load_fp(vcpu); 4479ed24f4bSMarc Zyngier kvm_vcpu_pmu_restore_guest(vcpu); 4489ed24f4bSMarc Zyngier if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) 4499ed24f4bSMarc Zyngier kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); 4509ed24f4bSMarc Zyngier 4519ed24f4bSMarc Zyngier if (single_task_running()) 4529ed24f4bSMarc Zyngier vcpu_clear_wfx_traps(vcpu); 4539ed24f4bSMarc Zyngier else 4549ed24f4bSMarc Zyngier vcpu_set_wfx_traps(vcpu); 4559ed24f4bSMarc Zyngier 45629eb5a3cSMarc Zyngier if (vcpu_has_ptrauth(vcpu)) 457ef3e40a7SMarc Zyngier vcpu_ptrauth_disable(vcpu); 458d2602bb4SSuzuki K Poulose kvm_arch_vcpu_load_debug_state_flags(vcpu); 459583cda1bSAlexandru Elisei 460583cda1bSAlexandru Elisei if (!cpumask_test_cpu(smp_processor_id(), vcpu->kvm->arch.supported_cpus)) 461583cda1bSAlexandru Elisei vcpu_set_on_unsupported_cpu(vcpu); 4629ed24f4bSMarc Zyngier } 4639ed24f4bSMarc Zyngier 4649ed24f4bSMarc Zyngier void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 4659ed24f4bSMarc Zyngier { 466d2602bb4SSuzuki K Poulose kvm_arch_vcpu_put_debug_state_flags(vcpu); 4679ed24f4bSMarc Zyngier kvm_arch_vcpu_put_fp(vcpu); 46813aeb9b4SDavid Brazdil if (has_vhe()) 46913aeb9b4SDavid Brazdil kvm_vcpu_put_sysregs_vhe(vcpu); 4709ed24f4bSMarc Zyngier kvm_timer_vcpu_put(vcpu); 4719ed24f4bSMarc Zyngier kvm_vgic_put(vcpu); 4729ed24f4bSMarc Zyngier kvm_vcpu_pmu_restore_host(vcpu); 473100b4f09SShameer Kolothum kvm_arm_vmid_clear_active(); 4749ed24f4bSMarc Zyngier 475583cda1bSAlexandru Elisei vcpu_clear_on_unsupported_cpu(vcpu); 4769ed24f4bSMarc Zyngier vcpu->cpu = -1; 4779ed24f4bSMarc Zyngier } 4789ed24f4bSMarc Zyngier 4790acc7239SOliver Upton static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu) 4809ed24f4bSMarc Zyngier { 4810acc7239SOliver Upton WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); 4829ed24f4bSMarc Zyngier kvm_make_request(KVM_REQ_SLEEP, vcpu); 4839ed24f4bSMarc Zyngier kvm_vcpu_kick(vcpu); 4849ed24f4bSMarc Zyngier } 4859ed24f4bSMarc Zyngier 4860acc7239SOliver Upton void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu) 4870acc7239SOliver Upton { 4880acc7239SOliver Upton spin_lock(&vcpu->arch.mp_state_lock); 4890acc7239SOliver Upton __kvm_arm_vcpu_power_off(vcpu); 4900acc7239SOliver Upton spin_unlock(&vcpu->arch.mp_state_lock); 4910acc7239SOliver Upton } 4920acc7239SOliver Upton 493b171f9bbSOliver Upton bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu) 494b171f9bbSOliver Upton { 4950acc7239SOliver Upton return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED; 496b171f9bbSOliver Upton } 497b171f9bbSOliver Upton 4987b33a09dSOliver Upton static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu) 4997b33a09dSOliver Upton { 5000acc7239SOliver Upton WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED); 5017b33a09dSOliver Upton kvm_make_request(KVM_REQ_SUSPEND, vcpu); 5027b33a09dSOliver Upton kvm_vcpu_kick(vcpu); 5037b33a09dSOliver Upton } 5047b33a09dSOliver Upton 5057b33a09dSOliver Upton static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu) 5067b33a09dSOliver Upton { 5070acc7239SOliver Upton return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED; 5087b33a09dSOliver Upton } 5097b33a09dSOliver Upton 5109ed24f4bSMarc Zyngier int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 5119ed24f4bSMarc Zyngier struct kvm_mp_state *mp_state) 5129ed24f4bSMarc Zyngier { 5130acc7239SOliver Upton *mp_state = READ_ONCE(vcpu->arch.mp_state); 5149ed24f4bSMarc Zyngier 5159ed24f4bSMarc Zyngier return 0; 5169ed24f4bSMarc Zyngier } 5179ed24f4bSMarc Zyngier 5189ed24f4bSMarc Zyngier int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 5199ed24f4bSMarc Zyngier struct kvm_mp_state *mp_state) 5209ed24f4bSMarc Zyngier { 5219ed24f4bSMarc Zyngier int ret = 0; 5229ed24f4bSMarc Zyngier 5230acc7239SOliver Upton spin_lock(&vcpu->arch.mp_state_lock); 5240acc7239SOliver Upton 5259ed24f4bSMarc Zyngier switch (mp_state->mp_state) { 5269ed24f4bSMarc Zyngier case KVM_MP_STATE_RUNNABLE: 5270acc7239SOliver Upton WRITE_ONCE(vcpu->arch.mp_state, *mp_state); 5289ed24f4bSMarc Zyngier break; 5299ed24f4bSMarc Zyngier case KVM_MP_STATE_STOPPED: 5300acc7239SOliver Upton __kvm_arm_vcpu_power_off(vcpu); 5319ed24f4bSMarc Zyngier break; 5327b33a09dSOliver Upton case KVM_MP_STATE_SUSPENDED: 5337b33a09dSOliver Upton kvm_arm_vcpu_suspend(vcpu); 5349ed24f4bSMarc Zyngier break; 5359ed24f4bSMarc Zyngier default: 5369ed24f4bSMarc Zyngier ret = -EINVAL; 5379ed24f4bSMarc Zyngier } 5389ed24f4bSMarc Zyngier 5390acc7239SOliver Upton spin_unlock(&vcpu->arch.mp_state_lock); 5400acc7239SOliver Upton 5419ed24f4bSMarc Zyngier return ret; 5429ed24f4bSMarc Zyngier } 5439ed24f4bSMarc Zyngier 5449ed24f4bSMarc Zyngier /** 5459ed24f4bSMarc Zyngier * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled 5469ed24f4bSMarc Zyngier * @v: The VCPU pointer 5479ed24f4bSMarc Zyngier * 5489ed24f4bSMarc Zyngier * If the guest CPU is not waiting for interrupts or an interrupt line is 5499ed24f4bSMarc Zyngier * asserted, the CPU is by definition runnable. 5509ed24f4bSMarc Zyngier */ 5519ed24f4bSMarc Zyngier int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 5529ed24f4bSMarc Zyngier { 5539ed24f4bSMarc Zyngier bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF); 5549ed24f4bSMarc Zyngier return ((irq_lines || kvm_vgic_vcpu_pending_irq(v)) 555b171f9bbSOliver Upton && !kvm_arm_vcpu_stopped(v) && !v->arch.pause); 5569ed24f4bSMarc Zyngier } 5579ed24f4bSMarc Zyngier 5589ed24f4bSMarc Zyngier bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 5599ed24f4bSMarc Zyngier { 5609ed24f4bSMarc Zyngier return vcpu_mode_priv(vcpu); 5619ed24f4bSMarc Zyngier } 5629ed24f4bSMarc Zyngier 5638e5b0adeSLinus Torvalds #ifdef CONFIG_GUEST_PERF_EVENTS 564e1bfc245SSean Christopherson unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) 565e1bfc245SSean Christopherson { 566e1bfc245SSean Christopherson return *vcpu_pc(vcpu); 567e1bfc245SSean Christopherson } 5688e5b0adeSLinus Torvalds #endif 569e1bfc245SSean Christopherson 570b5aa368aSMarc Zyngier static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) 571052f064dSMarc Zyngier { 572b5aa368aSMarc Zyngier return vcpu->arch.target >= 0; 573052f064dSMarc Zyngier } 574052f064dSMarc Zyngier 575b5aa368aSMarc Zyngier /* 576b5aa368aSMarc Zyngier * Handle both the initialisation that is being done when the vcpu is 577b5aa368aSMarc Zyngier * run for the first time, as well as the updates that must be 578b5aa368aSMarc Zyngier * performed each time we get a new thread dealing with this vcpu. 579b5aa368aSMarc Zyngier */ 580b5aa368aSMarc Zyngier int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) 5819ed24f4bSMarc Zyngier { 5829ed24f4bSMarc Zyngier struct kvm *kvm = vcpu->kvm; 583b5aa368aSMarc Zyngier int ret; 5849ed24f4bSMarc Zyngier 585b5aa368aSMarc Zyngier if (!kvm_vcpu_initialized(vcpu)) 586b5aa368aSMarc Zyngier return -ENOEXEC; 5879ed24f4bSMarc Zyngier 5889ed24f4bSMarc Zyngier if (!kvm_arm_vcpu_is_finalized(vcpu)) 5899ed24f4bSMarc Zyngier return -EPERM; 5909ed24f4bSMarc Zyngier 591b5aa368aSMarc Zyngier ret = kvm_arch_vcpu_run_map_fp(vcpu); 592b5aa368aSMarc Zyngier if (ret) 593b5aa368aSMarc Zyngier return ret; 594b5aa368aSMarc Zyngier 595cc5705fbSMarc Zyngier if (likely(vcpu_has_run_once(vcpu))) 596b5aa368aSMarc Zyngier return 0; 5979ed24f4bSMarc Zyngier 598263d6287SAlexandru Elisei kvm_arm_vcpu_init_debug(vcpu); 599263d6287SAlexandru Elisei 6009ed24f4bSMarc Zyngier if (likely(irqchip_in_kernel(kvm))) { 6019ed24f4bSMarc Zyngier /* 6029ed24f4bSMarc Zyngier * Map the VGIC hardware resources before running a vcpu the 6039ed24f4bSMarc Zyngier * first time on this VM. 6049ed24f4bSMarc Zyngier */ 6059ed24f4bSMarc Zyngier ret = kvm_vgic_map_resources(kvm); 6069ed24f4bSMarc Zyngier if (ret) 6079ed24f4bSMarc Zyngier return ret; 6089ed24f4bSMarc Zyngier } 6099ed24f4bSMarc Zyngier 6109ed24f4bSMarc Zyngier ret = kvm_timer_enable(vcpu); 6119ed24f4bSMarc Zyngier if (ret) 6129ed24f4bSMarc Zyngier return ret; 6139ed24f4bSMarc Zyngier 6149ed24f4bSMarc Zyngier ret = kvm_arm_pmu_v3_enable(vcpu); 6151408e73dSMarc Zyngier if (ret) 6161408e73dSMarc Zyngier return ret; 6171408e73dSMarc Zyngier 6189d0c063aSFuad Tabba if (is_protected_kvm_enabled()) { 6199d0c063aSFuad Tabba ret = pkvm_create_hyp_vm(kvm); 6209d0c063aSFuad Tabba if (ret) 6219d0c063aSFuad Tabba return ret; 6229d0c063aSFuad Tabba } 6239d0c063aSFuad Tabba 6241408e73dSMarc Zyngier if (!irqchip_in_kernel(kvm)) { 6251408e73dSMarc Zyngier /* 6261408e73dSMarc Zyngier * Tell the rest of the code that there are userspace irqchip 6271408e73dSMarc Zyngier * VMs in the wild. 6281408e73dSMarc Zyngier */ 6291408e73dSMarc Zyngier static_branch_inc(&userspace_irqchip_in_use); 6301408e73dSMarc Zyngier } 6319ed24f4bSMarc Zyngier 6322a0c3433SFuad Tabba /* 6332a0c3433SFuad Tabba * Initialize traps for protected VMs. 6342a0c3433SFuad Tabba * NOTE: Move to run in EL2 directly, rather than via a hypercall, once 6352a0c3433SFuad Tabba * the code is in place for first run initialization at EL2. 6362a0c3433SFuad Tabba */ 6372a0c3433SFuad Tabba if (kvm_vm_is_protected(kvm)) 6382a0c3433SFuad Tabba kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu); 6392a0c3433SFuad Tabba 6404bba7f7dSOliver Upton mutex_lock(&kvm->arch.config_lock); 64106394531SMarc Zyngier set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags); 6424bba7f7dSOliver Upton mutex_unlock(&kvm->arch.config_lock); 6435177fe91SMarc Zyngier 6449ed24f4bSMarc Zyngier return ret; 6459ed24f4bSMarc Zyngier } 6469ed24f4bSMarc Zyngier 6479ed24f4bSMarc Zyngier bool kvm_arch_intc_initialized(struct kvm *kvm) 6489ed24f4bSMarc Zyngier { 6499ed24f4bSMarc Zyngier return vgic_initialized(kvm); 6509ed24f4bSMarc Zyngier } 6519ed24f4bSMarc Zyngier 6529ed24f4bSMarc Zyngier void kvm_arm_halt_guest(struct kvm *kvm) 6539ed24f4bSMarc Zyngier { 65446808a4cSMarc Zyngier unsigned long i; 6559ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu; 6569ed24f4bSMarc Zyngier 6579ed24f4bSMarc Zyngier kvm_for_each_vcpu(i, vcpu, kvm) 6589ed24f4bSMarc Zyngier vcpu->arch.pause = true; 6599ed24f4bSMarc Zyngier kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP); 6609ed24f4bSMarc Zyngier } 6619ed24f4bSMarc Zyngier 6629ed24f4bSMarc Zyngier void kvm_arm_resume_guest(struct kvm *kvm) 6639ed24f4bSMarc Zyngier { 66446808a4cSMarc Zyngier unsigned long i; 6659ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu; 6669ed24f4bSMarc Zyngier 6679ed24f4bSMarc Zyngier kvm_for_each_vcpu(i, vcpu, kvm) { 6689ed24f4bSMarc Zyngier vcpu->arch.pause = false; 669d92a5d1cSSean Christopherson __kvm_vcpu_wake_up(vcpu); 6709ed24f4bSMarc Zyngier } 6719ed24f4bSMarc Zyngier } 6729ed24f4bSMarc Zyngier 6731c6219e3SOliver Upton static void kvm_vcpu_sleep(struct kvm_vcpu *vcpu) 6749ed24f4bSMarc Zyngier { 67538060944SPaolo Bonzini struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); 6769ed24f4bSMarc Zyngier 67738060944SPaolo Bonzini rcuwait_wait_event(wait, 678b171f9bbSOliver Upton (!kvm_arm_vcpu_stopped(vcpu)) && (!vcpu->arch.pause), 67938060944SPaolo Bonzini TASK_INTERRUPTIBLE); 6809ed24f4bSMarc Zyngier 681b171f9bbSOliver Upton if (kvm_arm_vcpu_stopped(vcpu) || vcpu->arch.pause) { 6829ed24f4bSMarc Zyngier /* Awaken to handle a signal, request we sleep again later. */ 6839ed24f4bSMarc Zyngier kvm_make_request(KVM_REQ_SLEEP, vcpu); 6849ed24f4bSMarc Zyngier } 6859ed24f4bSMarc Zyngier 6869ed24f4bSMarc Zyngier /* 6879ed24f4bSMarc Zyngier * Make sure we will observe a potential reset request if we've 6889ed24f4bSMarc Zyngier * observed a change to the power state. Pairs with the smp_wmb() in 6899ed24f4bSMarc Zyngier * kvm_psci_vcpu_on(). 6909ed24f4bSMarc Zyngier */ 6919ed24f4bSMarc Zyngier smp_rmb(); 6929ed24f4bSMarc Zyngier } 6939ed24f4bSMarc Zyngier 6946109c5a6SSean Christopherson /** 6956109c5a6SSean Christopherson * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior 6966109c5a6SSean Christopherson * @vcpu: The VCPU pointer 6976109c5a6SSean Christopherson * 6986109c5a6SSean Christopherson * Suspend execution of a vCPU until a valid wake event is detected, i.e. until 6996109c5a6SSean Christopherson * the vCPU is runnable. The vCPU may or may not be scheduled out, depending 7006109c5a6SSean Christopherson * on when a wake event arrives, e.g. there may already be a pending wake event. 7016109c5a6SSean Christopherson */ 7026109c5a6SSean Christopherson void kvm_vcpu_wfi(struct kvm_vcpu *vcpu) 7036109c5a6SSean Christopherson { 7046109c5a6SSean Christopherson /* 7056109c5a6SSean Christopherson * Sync back the state of the GIC CPU interface so that we have 7066109c5a6SSean Christopherson * the latest PMR and group enables. This ensures that 7076109c5a6SSean Christopherson * kvm_arch_vcpu_runnable has up-to-date data to decide whether 7086109c5a6SSean Christopherson * we have pending interrupts, e.g. when determining if the 7096109c5a6SSean Christopherson * vCPU should block. 7106109c5a6SSean Christopherson * 7116109c5a6SSean Christopherson * For the same reason, we want to tell GICv4 that we need 7126109c5a6SSean Christopherson * doorbells to be signalled, should an interrupt become pending. 7136109c5a6SSean Christopherson */ 7146109c5a6SSean Christopherson preempt_disable(); 7156109c5a6SSean Christopherson kvm_vgic_vmcr_sync(vcpu); 7166109c5a6SSean Christopherson vgic_v4_put(vcpu, true); 7176109c5a6SSean Christopherson preempt_enable(); 7186109c5a6SSean Christopherson 71991b99ea7SSean Christopherson kvm_vcpu_halt(vcpu); 720eebc538dSMarc Zyngier vcpu_clear_flag(vcpu, IN_WFIT); 7216109c5a6SSean Christopherson 7226109c5a6SSean Christopherson preempt_disable(); 7236109c5a6SSean Christopherson vgic_v4_load(vcpu); 7246109c5a6SSean Christopherson preempt_enable(); 7256109c5a6SSean Christopherson } 7266109c5a6SSean Christopherson 7277b33a09dSOliver Upton static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu) 7287b33a09dSOliver Upton { 7297b33a09dSOliver Upton if (!kvm_arm_vcpu_suspended(vcpu)) 7307b33a09dSOliver Upton return 1; 7317b33a09dSOliver Upton 7327b33a09dSOliver Upton kvm_vcpu_wfi(vcpu); 7337b33a09dSOliver Upton 7347b33a09dSOliver Upton /* 7357b33a09dSOliver Upton * The suspend state is sticky; we do not leave it until userspace 7367b33a09dSOliver Upton * explicitly marks the vCPU as runnable. Request that we suspend again 7377b33a09dSOliver Upton * later. 7387b33a09dSOliver Upton */ 7397b33a09dSOliver Upton kvm_make_request(KVM_REQ_SUSPEND, vcpu); 7407b33a09dSOliver Upton 7417b33a09dSOliver Upton /* 7427b33a09dSOliver Upton * Check to make sure the vCPU is actually runnable. If so, exit to 7437b33a09dSOliver Upton * userspace informing it of the wakeup condition. 7447b33a09dSOliver Upton */ 7457b33a09dSOliver Upton if (kvm_arch_vcpu_runnable(vcpu)) { 7467b33a09dSOliver Upton memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); 7477b33a09dSOliver Upton vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP; 7487b33a09dSOliver Upton vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 7497b33a09dSOliver Upton return 0; 7507b33a09dSOliver Upton } 7517b33a09dSOliver Upton 7527b33a09dSOliver Upton /* 7537b33a09dSOliver Upton * Otherwise, we were unblocked to process a different event, such as a 7547b33a09dSOliver Upton * pending signal. Return 1 and allow kvm_arch_vcpu_ioctl_run() to 7557b33a09dSOliver Upton * process the event. 7567b33a09dSOliver Upton */ 7577b33a09dSOliver Upton return 1; 7587b33a09dSOliver Upton } 7597b33a09dSOliver Upton 7603fdd0459SOliver Upton /** 7613fdd0459SOliver Upton * check_vcpu_requests - check and handle pending vCPU requests 7623fdd0459SOliver Upton * @vcpu: the VCPU pointer 7633fdd0459SOliver Upton * 7643fdd0459SOliver Upton * Return: 1 if we should enter the guest 7653fdd0459SOliver Upton * 0 if we should exit to userspace 7663fdd0459SOliver Upton * < 0 if we should exit to userspace, where the return value indicates 7673fdd0459SOliver Upton * an error 7683fdd0459SOliver Upton */ 7693fdd0459SOliver Upton static int check_vcpu_requests(struct kvm_vcpu *vcpu) 7709ed24f4bSMarc Zyngier { 7719ed24f4bSMarc Zyngier if (kvm_request_pending(vcpu)) { 7729ed24f4bSMarc Zyngier if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) 7731c6219e3SOliver Upton kvm_vcpu_sleep(vcpu); 7749ed24f4bSMarc Zyngier 7759ed24f4bSMarc Zyngier if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) 7769ed24f4bSMarc Zyngier kvm_reset_vcpu(vcpu); 7779ed24f4bSMarc Zyngier 7789ed24f4bSMarc Zyngier /* 7799ed24f4bSMarc Zyngier * Clear IRQ_PENDING requests that were made to guarantee 7809ed24f4bSMarc Zyngier * that a VCPU sees new virtual interrupts. 7819ed24f4bSMarc Zyngier */ 7829ed24f4bSMarc Zyngier kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu); 7839ed24f4bSMarc Zyngier 7849ed24f4bSMarc Zyngier if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu)) 7859ed24f4bSMarc Zyngier kvm_update_stolen_time(vcpu); 7869ed24f4bSMarc Zyngier 7879ed24f4bSMarc Zyngier if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) { 7889ed24f4bSMarc Zyngier /* The distributor enable bits were changed */ 7899ed24f4bSMarc Zyngier preempt_disable(); 7909ed24f4bSMarc Zyngier vgic_v4_put(vcpu, false); 7919ed24f4bSMarc Zyngier vgic_v4_load(vcpu); 7929ed24f4bSMarc Zyngier preempt_enable(); 7939ed24f4bSMarc Zyngier } 794d0c94c49SMarc Zyngier 795d0c94c49SMarc Zyngier if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu)) 796d0c94c49SMarc Zyngier kvm_pmu_handle_pmcr(vcpu, 797d0c94c49SMarc Zyngier __vcpu_sys_reg(vcpu, PMCR_EL0)); 7987b33a09dSOliver Upton 7997b33a09dSOliver Upton if (kvm_check_request(KVM_REQ_SUSPEND, vcpu)) 8007b33a09dSOliver Upton return kvm_vcpu_suspend(vcpu); 8019cb1096fSGavin Shan 8029cb1096fSGavin Shan if (kvm_dirty_ring_check_request(vcpu)) 8039cb1096fSGavin Shan return 0; 8049ed24f4bSMarc Zyngier } 8053fdd0459SOliver Upton 8063fdd0459SOliver Upton return 1; 8079ed24f4bSMarc Zyngier } 8089ed24f4bSMarc Zyngier 8092f6a49bbSWill Deacon static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu) 8102f6a49bbSWill Deacon { 8112f6a49bbSWill Deacon if (likely(!vcpu_mode_is_32bit(vcpu))) 8122f6a49bbSWill Deacon return false; 8132f6a49bbSWill Deacon 814f3c6efc7SOliver Upton return !kvm_supports_32bit_el0(); 8152f6a49bbSWill Deacon } 8162f6a49bbSWill Deacon 8179ed24f4bSMarc Zyngier /** 8186caa5812SOliver Upton * kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest 8196caa5812SOliver Upton * @vcpu: The VCPU pointer 8206caa5812SOliver Upton * @ret: Pointer to write optional return code 8216caa5812SOliver Upton * 8226caa5812SOliver Upton * Returns: true if the VCPU needs to return to a preemptible + interruptible 8236caa5812SOliver Upton * and skip guest entry. 8246caa5812SOliver Upton * 8256caa5812SOliver Upton * This function disambiguates between two different types of exits: exits to a 8266caa5812SOliver Upton * preemptible + interruptible kernel context and exits to userspace. For an 8276caa5812SOliver Upton * exit to userspace, this function will write the return code to ret and return 8286caa5812SOliver Upton * true. For an exit to preemptible + interruptible kernel context (i.e. check 8296caa5812SOliver Upton * for pending work and re-enter), return true without writing to ret. 8306caa5812SOliver Upton */ 8316caa5812SOliver Upton static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret) 8326caa5812SOliver Upton { 8336caa5812SOliver Upton struct kvm_run *run = vcpu->run; 8346caa5812SOliver Upton 8356caa5812SOliver Upton /* 8366caa5812SOliver Upton * If we're using a userspace irqchip, then check if we need 8376caa5812SOliver Upton * to tell a userspace irqchip about timer or PMU level 8386caa5812SOliver Upton * changes and if so, exit to userspace (the actual level 8396caa5812SOliver Upton * state gets updated in kvm_timer_update_run and 8406caa5812SOliver Upton * kvm_pmu_update_run below). 8416caa5812SOliver Upton */ 8426caa5812SOliver Upton if (static_branch_unlikely(&userspace_irqchip_in_use)) { 8436caa5812SOliver Upton if (kvm_timer_should_notify_user(vcpu) || 8446caa5812SOliver Upton kvm_pmu_should_notify_user(vcpu)) { 8456caa5812SOliver Upton *ret = -EINTR; 8466caa5812SOliver Upton run->exit_reason = KVM_EXIT_INTR; 8476caa5812SOliver Upton return true; 8486caa5812SOliver Upton } 8496caa5812SOliver Upton } 8506caa5812SOliver Upton 851583cda1bSAlexandru Elisei if (unlikely(vcpu_on_unsupported_cpu(vcpu))) { 852583cda1bSAlexandru Elisei run->exit_reason = KVM_EXIT_FAIL_ENTRY; 853583cda1bSAlexandru Elisei run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED; 854583cda1bSAlexandru Elisei run->fail_entry.cpu = smp_processor_id(); 855583cda1bSAlexandru Elisei *ret = 0; 856583cda1bSAlexandru Elisei return true; 857583cda1bSAlexandru Elisei } 858583cda1bSAlexandru Elisei 8596caa5812SOliver Upton return kvm_request_pending(vcpu) || 8606caa5812SOliver Upton xfer_to_guest_mode_work_pending(); 8616caa5812SOliver Upton } 8626caa5812SOliver Upton 8638cfe148aSMark Rutland /* 8648cfe148aSMark Rutland * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while 8658cfe148aSMark Rutland * the vCPU is running. 8668cfe148aSMark Rutland * 8678cfe148aSMark Rutland * This must be noinstr as instrumentation may make use of RCU, and this is not 8688cfe148aSMark Rutland * safe during the EQS. 8698cfe148aSMark Rutland */ 8708cfe148aSMark Rutland static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu) 8718cfe148aSMark Rutland { 8728cfe148aSMark Rutland int ret; 8738cfe148aSMark Rutland 8748cfe148aSMark Rutland guest_state_enter_irqoff(); 8758cfe148aSMark Rutland ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu); 8768cfe148aSMark Rutland guest_state_exit_irqoff(); 8778cfe148aSMark Rutland 8788cfe148aSMark Rutland return ret; 8798cfe148aSMark Rutland } 8808cfe148aSMark Rutland 8816caa5812SOliver Upton /** 8829ed24f4bSMarc Zyngier * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code 8839ed24f4bSMarc Zyngier * @vcpu: The VCPU pointer 8849ed24f4bSMarc Zyngier * 8859ed24f4bSMarc Zyngier * This function is called through the VCPU_RUN ioctl called from user space. It 8869ed24f4bSMarc Zyngier * will execute VM code in a loop until the time slice for the process is used 8879ed24f4bSMarc Zyngier * or some emulation is needed from user space in which case the function will 8889ed24f4bSMarc Zyngier * return with return value 0 and with the kvm_run structure filled in with the 8899ed24f4bSMarc Zyngier * required data for the requested emulation. 8909ed24f4bSMarc Zyngier */ 89138060944SPaolo Bonzini int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) 8929ed24f4bSMarc Zyngier { 89338060944SPaolo Bonzini struct kvm_run *run = vcpu->run; 8949ed24f4bSMarc Zyngier int ret; 8959ed24f4bSMarc Zyngier 8969ed24f4bSMarc Zyngier if (run->exit_reason == KVM_EXIT_MMIO) { 89774cc7e0cSTianjia Zhang ret = kvm_handle_mmio_return(vcpu); 8989ed24f4bSMarc Zyngier if (ret) 8999ed24f4bSMarc Zyngier return ret; 9009ed24f4bSMarc Zyngier } 9019ed24f4bSMarc Zyngier 9029ed24f4bSMarc Zyngier vcpu_load(vcpu); 9039ed24f4bSMarc Zyngier 904e3e880bbSZenghui Yu if (run->immediate_exit) { 905e3e880bbSZenghui Yu ret = -EINTR; 906e3e880bbSZenghui Yu goto out; 907e3e880bbSZenghui Yu } 908e3e880bbSZenghui Yu 9099ed24f4bSMarc Zyngier kvm_sigset_activate(vcpu); 9109ed24f4bSMarc Zyngier 9119ed24f4bSMarc Zyngier ret = 1; 9129ed24f4bSMarc Zyngier run->exit_reason = KVM_EXIT_UNKNOWN; 91318f3976fSAlexandru Elisei run->flags = 0; 9149ed24f4bSMarc Zyngier while (ret > 0) { 9159ed24f4bSMarc Zyngier /* 9169ed24f4bSMarc Zyngier * Check conditions before entering the guest 9179ed24f4bSMarc Zyngier */ 9186caa5812SOliver Upton ret = xfer_to_guest_mode_handle_work(vcpu); 9196caa5812SOliver Upton if (!ret) 9206caa5812SOliver Upton ret = 1; 9219ed24f4bSMarc Zyngier 9223fdd0459SOliver Upton if (ret > 0) 9233fdd0459SOliver Upton ret = check_vcpu_requests(vcpu); 9249ed24f4bSMarc Zyngier 9259ed24f4bSMarc Zyngier /* 9269ed24f4bSMarc Zyngier * Preparing the interrupts to be injected also 9279ed24f4bSMarc Zyngier * involves poking the GIC, which must be done in a 9289ed24f4bSMarc Zyngier * non-preemptible context. 9299ed24f4bSMarc Zyngier */ 9309ed24f4bSMarc Zyngier preempt_disable(); 9319ed24f4bSMarc Zyngier 9323248136bSJulien Grall /* 9333248136bSJulien Grall * The VMID allocator only tracks active VMIDs per 9343248136bSJulien Grall * physical CPU, and therefore the VMID allocated may not be 9353248136bSJulien Grall * preserved on VMID roll-over if the task was preempted, 9363248136bSJulien Grall * making a thread's VMID inactive. So we need to call 9373248136bSJulien Grall * kvm_arm_vmid_update() in non-premptible context. 9383248136bSJulien Grall */ 9393248136bSJulien Grall kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid); 9403248136bSJulien Grall 9419ed24f4bSMarc Zyngier kvm_pmu_flush_hwstate(vcpu); 9429ed24f4bSMarc Zyngier 9439ed24f4bSMarc Zyngier local_irq_disable(); 9449ed24f4bSMarc Zyngier 9459ed24f4bSMarc Zyngier kvm_vgic_flush_hwstate(vcpu); 9469ed24f4bSMarc Zyngier 94784d751a0SFuad Tabba kvm_pmu_update_vcpu_events(vcpu); 94884d751a0SFuad Tabba 9499ed24f4bSMarc Zyngier /* 9509ed24f4bSMarc Zyngier * Ensure we set mode to IN_GUEST_MODE after we disable 9519ed24f4bSMarc Zyngier * interrupts and before the final VCPU requests check. 9529ed24f4bSMarc Zyngier * See the comment in kvm_vcpu_exiting_guest_mode() and 9539ed24f4bSMarc Zyngier * Documentation/virt/kvm/vcpu-requests.rst 9549ed24f4bSMarc Zyngier */ 9559ed24f4bSMarc Zyngier smp_store_mb(vcpu->mode, IN_GUEST_MODE); 9569ed24f4bSMarc Zyngier 9576caa5812SOliver Upton if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) { 9589ed24f4bSMarc Zyngier vcpu->mode = OUTSIDE_GUEST_MODE; 9599ed24f4bSMarc Zyngier isb(); /* Ensure work in x_flush_hwstate is committed */ 9609ed24f4bSMarc Zyngier kvm_pmu_sync_hwstate(vcpu); 9619ed24f4bSMarc Zyngier if (static_branch_unlikely(&userspace_irqchip_in_use)) 9623c5ff0c6SMarc Zyngier kvm_timer_sync_user(vcpu); 9639ed24f4bSMarc Zyngier kvm_vgic_sync_hwstate(vcpu); 9649ed24f4bSMarc Zyngier local_irq_enable(); 9659ed24f4bSMarc Zyngier preempt_enable(); 9669ed24f4bSMarc Zyngier continue; 9679ed24f4bSMarc Zyngier } 9689ed24f4bSMarc Zyngier 9699ed24f4bSMarc Zyngier kvm_arm_setup_debug(vcpu); 970af9a0e21SMarc Zyngier kvm_arch_vcpu_ctxflush_fp(vcpu); 9719ed24f4bSMarc Zyngier 9729ed24f4bSMarc Zyngier /************************************************************** 9739ed24f4bSMarc Zyngier * Enter the guest 9749ed24f4bSMarc Zyngier */ 9759ed24f4bSMarc Zyngier trace_kvm_entry(*vcpu_pc(vcpu)); 9768cfe148aSMark Rutland guest_timing_enter_irqoff(); 9779ed24f4bSMarc Zyngier 9788cfe148aSMark Rutland ret = kvm_arm_vcpu_enter_exit(vcpu); 9799ed24f4bSMarc Zyngier 9809ed24f4bSMarc Zyngier vcpu->mode = OUTSIDE_GUEST_MODE; 9819ed24f4bSMarc Zyngier vcpu->stat.exits++; 9829ed24f4bSMarc Zyngier /* 9839ed24f4bSMarc Zyngier * Back from guest 9849ed24f4bSMarc Zyngier *************************************************************/ 9859ed24f4bSMarc Zyngier 9869ed24f4bSMarc Zyngier kvm_arm_clear_debug(vcpu); 9879ed24f4bSMarc Zyngier 9889ed24f4bSMarc Zyngier /* 9899ed24f4bSMarc Zyngier * We must sync the PMU state before the vgic state so 9909ed24f4bSMarc Zyngier * that the vgic can properly sample the updated state of the 9919ed24f4bSMarc Zyngier * interrupt line. 9929ed24f4bSMarc Zyngier */ 9939ed24f4bSMarc Zyngier kvm_pmu_sync_hwstate(vcpu); 9949ed24f4bSMarc Zyngier 9959ed24f4bSMarc Zyngier /* 9969ed24f4bSMarc Zyngier * Sync the vgic state before syncing the timer state because 9979ed24f4bSMarc Zyngier * the timer code needs to know if the virtual timer 9989ed24f4bSMarc Zyngier * interrupts are active. 9999ed24f4bSMarc Zyngier */ 10009ed24f4bSMarc Zyngier kvm_vgic_sync_hwstate(vcpu); 10019ed24f4bSMarc Zyngier 10029ed24f4bSMarc Zyngier /* 10039ed24f4bSMarc Zyngier * Sync the timer hardware state before enabling interrupts as 10049ed24f4bSMarc Zyngier * we don't want vtimer interrupts to race with syncing the 10059ed24f4bSMarc Zyngier * timer virtual interrupt state. 10069ed24f4bSMarc Zyngier */ 10079ed24f4bSMarc Zyngier if (static_branch_unlikely(&userspace_irqchip_in_use)) 10083c5ff0c6SMarc Zyngier kvm_timer_sync_user(vcpu); 10099ed24f4bSMarc Zyngier 10109ed24f4bSMarc Zyngier kvm_arch_vcpu_ctxsync_fp(vcpu); 10119ed24f4bSMarc Zyngier 10129ed24f4bSMarc Zyngier /* 10138cfe148aSMark Rutland * We must ensure that any pending interrupts are taken before 10148cfe148aSMark Rutland * we exit guest timing so that timer ticks are accounted as 10158cfe148aSMark Rutland * guest time. Transiently unmask interrupts so that any 10168cfe148aSMark Rutland * pending interrupts are taken. 10179ed24f4bSMarc Zyngier * 10188cfe148aSMark Rutland * Per ARM DDI 0487G.b section D1.13.4, an ISB (or other 10198cfe148aSMark Rutland * context synchronization event) is necessary to ensure that 10208cfe148aSMark Rutland * pending interrupts are taken. 10219ed24f4bSMarc Zyngier */ 1022f7659f8bSMarc Zyngier if (ARM_EXCEPTION_CODE(ret) == ARM_EXCEPTION_IRQ) { 10239ed24f4bSMarc Zyngier local_irq_enable(); 10248cfe148aSMark Rutland isb(); 10258cfe148aSMark Rutland local_irq_disable(); 1026f7659f8bSMarc Zyngier } 10279ed24f4bSMarc Zyngier 10288cfe148aSMark Rutland guest_timing_exit_irqoff(); 10298cfe148aSMark Rutland 10308cfe148aSMark Rutland local_irq_enable(); 10318cfe148aSMark Rutland 10329ed24f4bSMarc Zyngier trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); 10339ed24f4bSMarc Zyngier 10349ed24f4bSMarc Zyngier /* Exit types that need handling before we can be preempted */ 103574cc7e0cSTianjia Zhang handle_exit_early(vcpu, ret); 10369ed24f4bSMarc Zyngier 10379ed24f4bSMarc Zyngier preempt_enable(); 10389ed24f4bSMarc Zyngier 103922f55384SQais Yousef /* 104022f55384SQais Yousef * The ARMv8 architecture doesn't give the hypervisor 104122f55384SQais Yousef * a mechanism to prevent a guest from dropping to AArch32 EL0 104222f55384SQais Yousef * if implemented by the CPU. If we spot the guest in such 104322f55384SQais Yousef * state and that we decided it wasn't supposed to do so (like 104422f55384SQais Yousef * with the asymmetric AArch32 case), return to userspace with 104522f55384SQais Yousef * a fatal error. 104622f55384SQais Yousef */ 10472f6a49bbSWill Deacon if (vcpu_mode_is_bad_32bit(vcpu)) { 104822f55384SQais Yousef /* 104922f55384SQais Yousef * As we have caught the guest red-handed, decide that 105022f55384SQais Yousef * it isn't fit for purpose anymore by making the vcpu 105122f55384SQais Yousef * invalid. The VMM can try and fix it by issuing a 105222f55384SQais Yousef * KVM_ARM_VCPU_INIT if it really wants to. 105322f55384SQais Yousef */ 105422f55384SQais Yousef vcpu->arch.target = -1; 105522f55384SQais Yousef ret = ARM_EXCEPTION_IL; 105622f55384SQais Yousef } 105722f55384SQais Yousef 105874cc7e0cSTianjia Zhang ret = handle_exit(vcpu, ret); 10599ed24f4bSMarc Zyngier } 10609ed24f4bSMarc Zyngier 10619ed24f4bSMarc Zyngier /* Tell userspace about in-kernel device output levels */ 10629ed24f4bSMarc Zyngier if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { 10639ed24f4bSMarc Zyngier kvm_timer_update_run(vcpu); 10649ed24f4bSMarc Zyngier kvm_pmu_update_run(vcpu); 10659ed24f4bSMarc Zyngier } 10669ed24f4bSMarc Zyngier 10679ed24f4bSMarc Zyngier kvm_sigset_deactivate(vcpu); 10689ed24f4bSMarc Zyngier 1069e3e880bbSZenghui Yu out: 107026778aaaSMarc Zyngier /* 107126778aaaSMarc Zyngier * In the unlikely event that we are returning to userspace 107226778aaaSMarc Zyngier * with pending exceptions or PC adjustment, commit these 107326778aaaSMarc Zyngier * adjustments in order to give userspace a consistent view of 107426778aaaSMarc Zyngier * the vcpu state. Note that this relies on __kvm_adjust_pc() 107526778aaaSMarc Zyngier * being preempt-safe on VHE. 107626778aaaSMarc Zyngier */ 1077699bb2e0SMarc Zyngier if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) || 1078699bb2e0SMarc Zyngier vcpu_get_flag(vcpu, INCREMENT_PC))) 107926778aaaSMarc Zyngier kvm_call_hyp(__kvm_adjust_pc, vcpu); 108026778aaaSMarc Zyngier 10819ed24f4bSMarc Zyngier vcpu_put(vcpu); 10829ed24f4bSMarc Zyngier return ret; 10839ed24f4bSMarc Zyngier } 10849ed24f4bSMarc Zyngier 10859ed24f4bSMarc Zyngier static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) 10869ed24f4bSMarc Zyngier { 10879ed24f4bSMarc Zyngier int bit_index; 10889ed24f4bSMarc Zyngier bool set; 10899ed24f4bSMarc Zyngier unsigned long *hcr; 10909ed24f4bSMarc Zyngier 10919ed24f4bSMarc Zyngier if (number == KVM_ARM_IRQ_CPU_IRQ) 10929ed24f4bSMarc Zyngier bit_index = __ffs(HCR_VI); 10939ed24f4bSMarc Zyngier else /* KVM_ARM_IRQ_CPU_FIQ */ 10949ed24f4bSMarc Zyngier bit_index = __ffs(HCR_VF); 10959ed24f4bSMarc Zyngier 10969ed24f4bSMarc Zyngier hcr = vcpu_hcr(vcpu); 10979ed24f4bSMarc Zyngier if (level) 10989ed24f4bSMarc Zyngier set = test_and_set_bit(bit_index, hcr); 10999ed24f4bSMarc Zyngier else 11009ed24f4bSMarc Zyngier set = test_and_clear_bit(bit_index, hcr); 11019ed24f4bSMarc Zyngier 11029ed24f4bSMarc Zyngier /* 11039ed24f4bSMarc Zyngier * If we didn't change anything, no need to wake up or kick other CPUs 11049ed24f4bSMarc Zyngier */ 11059ed24f4bSMarc Zyngier if (set == level) 11069ed24f4bSMarc Zyngier return 0; 11079ed24f4bSMarc Zyngier 11089ed24f4bSMarc Zyngier /* 11099ed24f4bSMarc Zyngier * The vcpu irq_lines field was updated, wake up sleeping VCPUs and 11109ed24f4bSMarc Zyngier * trigger a world-switch round on the running physical CPU to set the 11119ed24f4bSMarc Zyngier * virtual IRQ/FIQ fields in the HCR appropriately. 11129ed24f4bSMarc Zyngier */ 11139ed24f4bSMarc Zyngier kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 11149ed24f4bSMarc Zyngier kvm_vcpu_kick(vcpu); 11159ed24f4bSMarc Zyngier 11169ed24f4bSMarc Zyngier return 0; 11179ed24f4bSMarc Zyngier } 11189ed24f4bSMarc Zyngier 11199ed24f4bSMarc Zyngier int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, 11209ed24f4bSMarc Zyngier bool line_status) 11219ed24f4bSMarc Zyngier { 11229ed24f4bSMarc Zyngier u32 irq = irq_level->irq; 11239ed24f4bSMarc Zyngier unsigned int irq_type, vcpu_idx, irq_num; 11249ed24f4bSMarc Zyngier int nrcpus = atomic_read(&kvm->online_vcpus); 11259ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu = NULL; 11269ed24f4bSMarc Zyngier bool level = irq_level->level; 11279ed24f4bSMarc Zyngier 11289ed24f4bSMarc Zyngier irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; 11299ed24f4bSMarc Zyngier vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; 11309ed24f4bSMarc Zyngier vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1); 11319ed24f4bSMarc Zyngier irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; 11329ed24f4bSMarc Zyngier 11339ed24f4bSMarc Zyngier trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); 11349ed24f4bSMarc Zyngier 11359ed24f4bSMarc Zyngier switch (irq_type) { 11369ed24f4bSMarc Zyngier case KVM_ARM_IRQ_TYPE_CPU: 11379ed24f4bSMarc Zyngier if (irqchip_in_kernel(kvm)) 11389ed24f4bSMarc Zyngier return -ENXIO; 11399ed24f4bSMarc Zyngier 11409ed24f4bSMarc Zyngier if (vcpu_idx >= nrcpus) 11419ed24f4bSMarc Zyngier return -EINVAL; 11429ed24f4bSMarc Zyngier 11439ed24f4bSMarc Zyngier vcpu = kvm_get_vcpu(kvm, vcpu_idx); 11449ed24f4bSMarc Zyngier if (!vcpu) 11459ed24f4bSMarc Zyngier return -EINVAL; 11469ed24f4bSMarc Zyngier 11479ed24f4bSMarc Zyngier if (irq_num > KVM_ARM_IRQ_CPU_FIQ) 11489ed24f4bSMarc Zyngier return -EINVAL; 11499ed24f4bSMarc Zyngier 11509ed24f4bSMarc Zyngier return vcpu_interrupt_line(vcpu, irq_num, level); 11519ed24f4bSMarc Zyngier case KVM_ARM_IRQ_TYPE_PPI: 11529ed24f4bSMarc Zyngier if (!irqchip_in_kernel(kvm)) 11539ed24f4bSMarc Zyngier return -ENXIO; 11549ed24f4bSMarc Zyngier 11559ed24f4bSMarc Zyngier if (vcpu_idx >= nrcpus) 11569ed24f4bSMarc Zyngier return -EINVAL; 11579ed24f4bSMarc Zyngier 11589ed24f4bSMarc Zyngier vcpu = kvm_get_vcpu(kvm, vcpu_idx); 11599ed24f4bSMarc Zyngier if (!vcpu) 11609ed24f4bSMarc Zyngier return -EINVAL; 11619ed24f4bSMarc Zyngier 11629ed24f4bSMarc Zyngier if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) 11639ed24f4bSMarc Zyngier return -EINVAL; 11649ed24f4bSMarc Zyngier 11659ed24f4bSMarc Zyngier return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL); 11669ed24f4bSMarc Zyngier case KVM_ARM_IRQ_TYPE_SPI: 11679ed24f4bSMarc Zyngier if (!irqchip_in_kernel(kvm)) 11689ed24f4bSMarc Zyngier return -ENXIO; 11699ed24f4bSMarc Zyngier 11709ed24f4bSMarc Zyngier if (irq_num < VGIC_NR_PRIVATE_IRQS) 11719ed24f4bSMarc Zyngier return -EINVAL; 11729ed24f4bSMarc Zyngier 11739ed24f4bSMarc Zyngier return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL); 11749ed24f4bSMarc Zyngier } 11759ed24f4bSMarc Zyngier 11769ed24f4bSMarc Zyngier return -EINVAL; 11779ed24f4bSMarc Zyngier } 11789ed24f4bSMarc Zyngier 1179a7a2c72aSOliver Upton static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, 11809ed24f4bSMarc Zyngier const struct kvm_vcpu_init *init) 11819ed24f4bSMarc Zyngier { 1182a7a2c72aSOliver Upton unsigned long features = init->features[0]; 1183a7a2c72aSOliver Upton int i; 11849ed24f4bSMarc Zyngier 1185a7a2c72aSOliver Upton if (features & ~KVM_VCPU_VALID_FEATURES) 11869ed24f4bSMarc Zyngier return -ENOENT; 11879ed24f4bSMarc Zyngier 1188a7a2c72aSOliver Upton for (i = 1; i < ARRAY_SIZE(init->features); i++) { 1189a7a2c72aSOliver Upton if (init->features[i]) 1190a7a2c72aSOliver Upton return -ENOENT; 11919ed24f4bSMarc Zyngier } 11929ed24f4bSMarc Zyngier 11932251e9ffSOliver Upton if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features)) 11942251e9ffSOliver Upton return 0; 11952251e9ffSOliver Upton 11962251e9ffSOliver Upton if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) 11972251e9ffSOliver Upton return -EINVAL; 11982251e9ffSOliver Upton 11992251e9ffSOliver Upton /* MTE is incompatible with AArch32 */ 12002251e9ffSOliver Upton if (kvm_has_mte(vcpu->kvm)) 12012251e9ffSOliver Upton return -EINVAL; 12022251e9ffSOliver Upton 12032251e9ffSOliver Upton /* NV is incompatible with AArch32 */ 12042251e9ffSOliver Upton if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features)) 12052251e9ffSOliver Upton return -EINVAL; 12062251e9ffSOliver Upton 1207a7a2c72aSOliver Upton return 0; 1208a7a2c72aSOliver Upton } 1209a7a2c72aSOliver Upton 1210a7a2c72aSOliver Upton static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu, 1211a7a2c72aSOliver Upton const struct kvm_vcpu_init *init) 1212a7a2c72aSOliver Upton { 1213a7a2c72aSOliver Upton unsigned long features = init->features[0]; 1214a7a2c72aSOliver Upton 1215a7a2c72aSOliver Upton return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES) || 1216a7a2c72aSOliver Upton vcpu->arch.target != init->target; 1217a7a2c72aSOliver Upton } 1218a7a2c72aSOliver Upton 1219a7a2c72aSOliver Upton static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, 1220a7a2c72aSOliver Upton const struct kvm_vcpu_init *init) 1221a7a2c72aSOliver Upton { 1222a7a2c72aSOliver Upton unsigned long features = init->features[0]; 12232251e9ffSOliver Upton struct kvm *kvm = vcpu->kvm; 12242251e9ffSOliver Upton int ret = -EINVAL; 12252251e9ffSOliver Upton 12262251e9ffSOliver Upton mutex_lock(&kvm->arch.config_lock); 12272251e9ffSOliver Upton 12282251e9ffSOliver Upton if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) && 12292251e9ffSOliver Upton !bitmap_equal(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES)) 12302251e9ffSOliver Upton goto out_unlock; 1231a7a2c72aSOliver Upton 1232a7a2c72aSOliver Upton vcpu->arch.target = init->target; 1233a7a2c72aSOliver Upton bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES); 12349ed24f4bSMarc Zyngier 12359ed24f4bSMarc Zyngier /* Now we know what it is, we can reset it. */ 12369ed24f4bSMarc Zyngier ret = kvm_reset_vcpu(vcpu); 12379ed24f4bSMarc Zyngier if (ret) { 12389ed24f4bSMarc Zyngier vcpu->arch.target = -1; 12399ed24f4bSMarc Zyngier bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); 12402251e9ffSOliver Upton goto out_unlock; 12419ed24f4bSMarc Zyngier } 12429ed24f4bSMarc Zyngier 12432251e9ffSOliver Upton bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES); 12442251e9ffSOliver Upton set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags); 12452251e9ffSOliver Upton 12462251e9ffSOliver Upton out_unlock: 12472251e9ffSOliver Upton mutex_unlock(&kvm->arch.config_lock); 12489ed24f4bSMarc Zyngier return ret; 12499ed24f4bSMarc Zyngier } 12509ed24f4bSMarc Zyngier 1251a7a2c72aSOliver Upton static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, 1252a7a2c72aSOliver Upton const struct kvm_vcpu_init *init) 1253a7a2c72aSOliver Upton { 1254a7a2c72aSOliver Upton int ret; 1255a7a2c72aSOliver Upton 1256a7a2c72aSOliver Upton if (init->target != kvm_target_cpu()) 1257a7a2c72aSOliver Upton return -EINVAL; 1258a7a2c72aSOliver Upton 1259a7a2c72aSOliver Upton ret = kvm_vcpu_init_check_features(vcpu, init); 1260a7a2c72aSOliver Upton if (ret) 1261a7a2c72aSOliver Upton return ret; 1262a7a2c72aSOliver Upton 1263a7a2c72aSOliver Upton if (vcpu->arch.target == -1) 1264a7a2c72aSOliver Upton return __kvm_vcpu_set_target(vcpu, init); 1265a7a2c72aSOliver Upton 1266a7a2c72aSOliver Upton if (kvm_vcpu_init_changed(vcpu, init)) 1267a7a2c72aSOliver Upton return -EINVAL; 1268a7a2c72aSOliver Upton 1269a7a2c72aSOliver Upton return kvm_reset_vcpu(vcpu); 1270a7a2c72aSOliver Upton } 1271a7a2c72aSOliver Upton 12729ed24f4bSMarc Zyngier static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, 12739ed24f4bSMarc Zyngier struct kvm_vcpu_init *init) 12749ed24f4bSMarc Zyngier { 1275e3c1c0caSOliver Upton bool power_off = false; 12769ed24f4bSMarc Zyngier int ret; 12779ed24f4bSMarc Zyngier 1278e3c1c0caSOliver Upton /* 1279e3c1c0caSOliver Upton * Treat the power-off vCPU feature as ephemeral. Clear the bit to avoid 1280e3c1c0caSOliver Upton * reflecting it in the finalized feature set, thus limiting its scope 1281e3c1c0caSOliver Upton * to a single KVM_ARM_VCPU_INIT call. 1282e3c1c0caSOliver Upton */ 1283*192df2aaSOliver Upton if (init->features[0] & BIT(KVM_ARM_VCPU_POWER_OFF)) { 1284*192df2aaSOliver Upton init->features[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF); 1285e3c1c0caSOliver Upton power_off = true; 1286e3c1c0caSOliver Upton } 1287e3c1c0caSOliver Upton 12889ed24f4bSMarc Zyngier ret = kvm_vcpu_set_target(vcpu, init); 12899ed24f4bSMarc Zyngier if (ret) 12909ed24f4bSMarc Zyngier return ret; 12919ed24f4bSMarc Zyngier 12929ed24f4bSMarc Zyngier /* 12939ed24f4bSMarc Zyngier * Ensure a rebooted VM will fault in RAM pages and detect if the 12949ed24f4bSMarc Zyngier * guest MMU is turned off and flush the caches as needed. 1295892713e9SZenghui Yu * 12967ae2f3dbSMarc Zyngier * S2FWB enforces all memory accesses to RAM being cacheable, 12977ae2f3dbSMarc Zyngier * ensuring that the data side is always coherent. We still 12987ae2f3dbSMarc Zyngier * need to invalidate the I-cache though, as FWB does *not* 12997ae2f3dbSMarc Zyngier * imply CTR_EL0.DIC. 13009ed24f4bSMarc Zyngier */ 1301cc5705fbSMarc Zyngier if (vcpu_has_run_once(vcpu)) { 13027ae2f3dbSMarc Zyngier if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) 13039ed24f4bSMarc Zyngier stage2_unmap_vm(vcpu->kvm); 13047ae2f3dbSMarc Zyngier else 1305fade9c2cSFuad Tabba icache_inval_all_pou(); 13067ae2f3dbSMarc Zyngier } 13079ed24f4bSMarc Zyngier 13089ed24f4bSMarc Zyngier vcpu_reset_hcr(vcpu); 130975c76ab5SMarc Zyngier vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu); 13109ed24f4bSMarc Zyngier 13119ed24f4bSMarc Zyngier /* 13129ed24f4bSMarc Zyngier * Handle the "start in power-off" case. 13139ed24f4bSMarc Zyngier */ 13144ff910beSReiji Watanabe spin_lock(&vcpu->arch.mp_state_lock); 13154ff910beSReiji Watanabe 1316e3c1c0caSOliver Upton if (power_off) 13174ff910beSReiji Watanabe __kvm_arm_vcpu_power_off(vcpu); 13189ed24f4bSMarc Zyngier else 13190acc7239SOliver Upton WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE); 13209ed24f4bSMarc Zyngier 13214ff910beSReiji Watanabe spin_unlock(&vcpu->arch.mp_state_lock); 13229ed24f4bSMarc Zyngier 13239ed24f4bSMarc Zyngier return 0; 13249ed24f4bSMarc Zyngier } 13259ed24f4bSMarc Zyngier 13269ed24f4bSMarc Zyngier static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, 13279ed24f4bSMarc Zyngier struct kvm_device_attr *attr) 13289ed24f4bSMarc Zyngier { 13299ed24f4bSMarc Zyngier int ret = -ENXIO; 13309ed24f4bSMarc Zyngier 13319ed24f4bSMarc Zyngier switch (attr->group) { 13329ed24f4bSMarc Zyngier default: 13339ed24f4bSMarc Zyngier ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr); 13349ed24f4bSMarc Zyngier break; 13359ed24f4bSMarc Zyngier } 13369ed24f4bSMarc Zyngier 13379ed24f4bSMarc Zyngier return ret; 13389ed24f4bSMarc Zyngier } 13399ed24f4bSMarc Zyngier 13409ed24f4bSMarc Zyngier static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, 13419ed24f4bSMarc Zyngier struct kvm_device_attr *attr) 13429ed24f4bSMarc Zyngier { 13439ed24f4bSMarc Zyngier int ret = -ENXIO; 13449ed24f4bSMarc Zyngier 13459ed24f4bSMarc Zyngier switch (attr->group) { 13469ed24f4bSMarc Zyngier default: 13479ed24f4bSMarc Zyngier ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr); 13489ed24f4bSMarc Zyngier break; 13499ed24f4bSMarc Zyngier } 13509ed24f4bSMarc Zyngier 13519ed24f4bSMarc Zyngier return ret; 13529ed24f4bSMarc Zyngier } 13539ed24f4bSMarc Zyngier 13549ed24f4bSMarc Zyngier static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, 13559ed24f4bSMarc Zyngier struct kvm_device_attr *attr) 13569ed24f4bSMarc Zyngier { 13579ed24f4bSMarc Zyngier int ret = -ENXIO; 13589ed24f4bSMarc Zyngier 13599ed24f4bSMarc Zyngier switch (attr->group) { 13609ed24f4bSMarc Zyngier default: 13619ed24f4bSMarc Zyngier ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr); 13629ed24f4bSMarc Zyngier break; 13639ed24f4bSMarc Zyngier } 13649ed24f4bSMarc Zyngier 13659ed24f4bSMarc Zyngier return ret; 13669ed24f4bSMarc Zyngier } 13679ed24f4bSMarc Zyngier 13689ed24f4bSMarc Zyngier static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 13699ed24f4bSMarc Zyngier struct kvm_vcpu_events *events) 13709ed24f4bSMarc Zyngier { 13719ed24f4bSMarc Zyngier memset(events, 0, sizeof(*events)); 13729ed24f4bSMarc Zyngier 13739ed24f4bSMarc Zyngier return __kvm_arm_vcpu_get_events(vcpu, events); 13749ed24f4bSMarc Zyngier } 13759ed24f4bSMarc Zyngier 13769ed24f4bSMarc Zyngier static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 13779ed24f4bSMarc Zyngier struct kvm_vcpu_events *events) 13789ed24f4bSMarc Zyngier { 13799ed24f4bSMarc Zyngier int i; 13809ed24f4bSMarc Zyngier 13819ed24f4bSMarc Zyngier /* check whether the reserved field is zero */ 13829ed24f4bSMarc Zyngier for (i = 0; i < ARRAY_SIZE(events->reserved); i++) 13839ed24f4bSMarc Zyngier if (events->reserved[i]) 13849ed24f4bSMarc Zyngier return -EINVAL; 13859ed24f4bSMarc Zyngier 13869ed24f4bSMarc Zyngier /* check whether the pad field is zero */ 13879ed24f4bSMarc Zyngier for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++) 13889ed24f4bSMarc Zyngier if (events->exception.pad[i]) 13899ed24f4bSMarc Zyngier return -EINVAL; 13909ed24f4bSMarc Zyngier 13919ed24f4bSMarc Zyngier return __kvm_arm_vcpu_set_events(vcpu, events); 13929ed24f4bSMarc Zyngier } 13939ed24f4bSMarc Zyngier 13949ed24f4bSMarc Zyngier long kvm_arch_vcpu_ioctl(struct file *filp, 13959ed24f4bSMarc Zyngier unsigned int ioctl, unsigned long arg) 13969ed24f4bSMarc Zyngier { 13979ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu = filp->private_data; 13989ed24f4bSMarc Zyngier void __user *argp = (void __user *)arg; 13999ed24f4bSMarc Zyngier struct kvm_device_attr attr; 14009ed24f4bSMarc Zyngier long r; 14019ed24f4bSMarc Zyngier 14029ed24f4bSMarc Zyngier switch (ioctl) { 14039ed24f4bSMarc Zyngier case KVM_ARM_VCPU_INIT: { 14049ed24f4bSMarc Zyngier struct kvm_vcpu_init init; 14059ed24f4bSMarc Zyngier 14069ed24f4bSMarc Zyngier r = -EFAULT; 14079ed24f4bSMarc Zyngier if (copy_from_user(&init, argp, sizeof(init))) 14089ed24f4bSMarc Zyngier break; 14099ed24f4bSMarc Zyngier 14109ed24f4bSMarc Zyngier r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init); 14119ed24f4bSMarc Zyngier break; 14129ed24f4bSMarc Zyngier } 14139ed24f4bSMarc Zyngier case KVM_SET_ONE_REG: 14149ed24f4bSMarc Zyngier case KVM_GET_ONE_REG: { 14159ed24f4bSMarc Zyngier struct kvm_one_reg reg; 14169ed24f4bSMarc Zyngier 14179ed24f4bSMarc Zyngier r = -ENOEXEC; 14189ed24f4bSMarc Zyngier if (unlikely(!kvm_vcpu_initialized(vcpu))) 14199ed24f4bSMarc Zyngier break; 14209ed24f4bSMarc Zyngier 14219ed24f4bSMarc Zyngier r = -EFAULT; 14229ed24f4bSMarc Zyngier if (copy_from_user(®, argp, sizeof(reg))) 14239ed24f4bSMarc Zyngier break; 14249ed24f4bSMarc Zyngier 14256826c684SOliver Upton /* 14266826c684SOliver Upton * We could owe a reset due to PSCI. Handle the pending reset 14276826c684SOliver Upton * here to ensure userspace register accesses are ordered after 14286826c684SOliver Upton * the reset. 14296826c684SOliver Upton */ 14306826c684SOliver Upton if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) 14316826c684SOliver Upton kvm_reset_vcpu(vcpu); 14326826c684SOliver Upton 14339ed24f4bSMarc Zyngier if (ioctl == KVM_SET_ONE_REG) 14349ed24f4bSMarc Zyngier r = kvm_arm_set_reg(vcpu, ®); 14359ed24f4bSMarc Zyngier else 14369ed24f4bSMarc Zyngier r = kvm_arm_get_reg(vcpu, ®); 14379ed24f4bSMarc Zyngier break; 14389ed24f4bSMarc Zyngier } 14399ed24f4bSMarc Zyngier case KVM_GET_REG_LIST: { 14409ed24f4bSMarc Zyngier struct kvm_reg_list __user *user_list = argp; 14419ed24f4bSMarc Zyngier struct kvm_reg_list reg_list; 14429ed24f4bSMarc Zyngier unsigned n; 14439ed24f4bSMarc Zyngier 14449ed24f4bSMarc Zyngier r = -ENOEXEC; 14459ed24f4bSMarc Zyngier if (unlikely(!kvm_vcpu_initialized(vcpu))) 14469ed24f4bSMarc Zyngier break; 14479ed24f4bSMarc Zyngier 14489ed24f4bSMarc Zyngier r = -EPERM; 14499ed24f4bSMarc Zyngier if (!kvm_arm_vcpu_is_finalized(vcpu)) 14509ed24f4bSMarc Zyngier break; 14519ed24f4bSMarc Zyngier 14529ed24f4bSMarc Zyngier r = -EFAULT; 14539ed24f4bSMarc Zyngier if (copy_from_user(®_list, user_list, sizeof(reg_list))) 14549ed24f4bSMarc Zyngier break; 14559ed24f4bSMarc Zyngier n = reg_list.n; 14569ed24f4bSMarc Zyngier reg_list.n = kvm_arm_num_regs(vcpu); 14579ed24f4bSMarc Zyngier if (copy_to_user(user_list, ®_list, sizeof(reg_list))) 14589ed24f4bSMarc Zyngier break; 14599ed24f4bSMarc Zyngier r = -E2BIG; 14609ed24f4bSMarc Zyngier if (n < reg_list.n) 14619ed24f4bSMarc Zyngier break; 14629ed24f4bSMarc Zyngier r = kvm_arm_copy_reg_indices(vcpu, user_list->reg); 14639ed24f4bSMarc Zyngier break; 14649ed24f4bSMarc Zyngier } 14659ed24f4bSMarc Zyngier case KVM_SET_DEVICE_ATTR: { 14669ed24f4bSMarc Zyngier r = -EFAULT; 14679ed24f4bSMarc Zyngier if (copy_from_user(&attr, argp, sizeof(attr))) 14689ed24f4bSMarc Zyngier break; 14699ed24f4bSMarc Zyngier r = kvm_arm_vcpu_set_attr(vcpu, &attr); 14709ed24f4bSMarc Zyngier break; 14719ed24f4bSMarc Zyngier } 14729ed24f4bSMarc Zyngier case KVM_GET_DEVICE_ATTR: { 14739ed24f4bSMarc Zyngier r = -EFAULT; 14749ed24f4bSMarc Zyngier if (copy_from_user(&attr, argp, sizeof(attr))) 14759ed24f4bSMarc Zyngier break; 14769ed24f4bSMarc Zyngier r = kvm_arm_vcpu_get_attr(vcpu, &attr); 14779ed24f4bSMarc Zyngier break; 14789ed24f4bSMarc Zyngier } 14799ed24f4bSMarc Zyngier case KVM_HAS_DEVICE_ATTR: { 14809ed24f4bSMarc Zyngier r = -EFAULT; 14819ed24f4bSMarc Zyngier if (copy_from_user(&attr, argp, sizeof(attr))) 14829ed24f4bSMarc Zyngier break; 14839ed24f4bSMarc Zyngier r = kvm_arm_vcpu_has_attr(vcpu, &attr); 14849ed24f4bSMarc Zyngier break; 14859ed24f4bSMarc Zyngier } 14869ed24f4bSMarc Zyngier case KVM_GET_VCPU_EVENTS: { 14879ed24f4bSMarc Zyngier struct kvm_vcpu_events events; 14889ed24f4bSMarc Zyngier 14899ed24f4bSMarc Zyngier if (kvm_arm_vcpu_get_events(vcpu, &events)) 14909ed24f4bSMarc Zyngier return -EINVAL; 14919ed24f4bSMarc Zyngier 14929ed24f4bSMarc Zyngier if (copy_to_user(argp, &events, sizeof(events))) 14939ed24f4bSMarc Zyngier return -EFAULT; 14949ed24f4bSMarc Zyngier 14959ed24f4bSMarc Zyngier return 0; 14969ed24f4bSMarc Zyngier } 14979ed24f4bSMarc Zyngier case KVM_SET_VCPU_EVENTS: { 14989ed24f4bSMarc Zyngier struct kvm_vcpu_events events; 14999ed24f4bSMarc Zyngier 15009ed24f4bSMarc Zyngier if (copy_from_user(&events, argp, sizeof(events))) 15019ed24f4bSMarc Zyngier return -EFAULT; 15029ed24f4bSMarc Zyngier 15039ed24f4bSMarc Zyngier return kvm_arm_vcpu_set_events(vcpu, &events); 15049ed24f4bSMarc Zyngier } 15059ed24f4bSMarc Zyngier case KVM_ARM_VCPU_FINALIZE: { 15069ed24f4bSMarc Zyngier int what; 15079ed24f4bSMarc Zyngier 15089ed24f4bSMarc Zyngier if (!kvm_vcpu_initialized(vcpu)) 15099ed24f4bSMarc Zyngier return -ENOEXEC; 15109ed24f4bSMarc Zyngier 15119ed24f4bSMarc Zyngier if (get_user(what, (const int __user *)argp)) 15129ed24f4bSMarc Zyngier return -EFAULT; 15139ed24f4bSMarc Zyngier 15149ed24f4bSMarc Zyngier return kvm_arm_vcpu_finalize(vcpu, what); 15159ed24f4bSMarc Zyngier } 15169ed24f4bSMarc Zyngier default: 15179ed24f4bSMarc Zyngier r = -EINVAL; 15189ed24f4bSMarc Zyngier } 15199ed24f4bSMarc Zyngier 15209ed24f4bSMarc Zyngier return r; 15219ed24f4bSMarc Zyngier } 15229ed24f4bSMarc Zyngier 15239ed24f4bSMarc Zyngier void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 15249ed24f4bSMarc Zyngier { 15259ed24f4bSMarc Zyngier 15269ed24f4bSMarc Zyngier } 15279ed24f4bSMarc Zyngier 15289ed24f4bSMarc Zyngier void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, 15296c9dd6d2SPaolo Bonzini const struct kvm_memory_slot *memslot) 15309ed24f4bSMarc Zyngier { 15319ed24f4bSMarc Zyngier kvm_flush_remote_tlbs(kvm); 15329ed24f4bSMarc Zyngier } 15339ed24f4bSMarc Zyngier 15349ed24f4bSMarc Zyngier static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, 15359ed24f4bSMarc Zyngier struct kvm_arm_device_addr *dev_addr) 15369ed24f4bSMarc Zyngier { 15379f968c92SMarc Zyngier switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) { 15389ed24f4bSMarc Zyngier case KVM_ARM_DEVICE_VGIC_V2: 15399ed24f4bSMarc Zyngier if (!vgic_present) 15409ed24f4bSMarc Zyngier return -ENXIO; 15419f968c92SMarc Zyngier return kvm_set_legacy_vgic_v2_addr(kvm, dev_addr); 15429ed24f4bSMarc Zyngier default: 15439ed24f4bSMarc Zyngier return -ENODEV; 15449ed24f4bSMarc Zyngier } 15459ed24f4bSMarc Zyngier } 15469ed24f4bSMarc Zyngier 1547e0fc6b21SOliver Upton static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) 1548e0fc6b21SOliver Upton { 1549e0fc6b21SOliver Upton switch (attr->group) { 1550821d935cSOliver Upton case KVM_ARM_VM_SMCCC_CTRL: 1551821d935cSOliver Upton return kvm_vm_smccc_has_attr(kvm, attr); 1552e0fc6b21SOliver Upton default: 1553e0fc6b21SOliver Upton return -ENXIO; 1554e0fc6b21SOliver Upton } 1555e0fc6b21SOliver Upton } 1556e0fc6b21SOliver Upton 1557e0fc6b21SOliver Upton static int kvm_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) 1558e0fc6b21SOliver Upton { 1559e0fc6b21SOliver Upton switch (attr->group) { 1560821d935cSOliver Upton case KVM_ARM_VM_SMCCC_CTRL: 1561821d935cSOliver Upton return kvm_vm_smccc_set_attr(kvm, attr); 1562e0fc6b21SOliver Upton default: 1563e0fc6b21SOliver Upton return -ENXIO; 1564e0fc6b21SOliver Upton } 1565e0fc6b21SOliver Upton } 1566e0fc6b21SOliver Upton 1567d8708b80SThomas Huth int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 15689ed24f4bSMarc Zyngier { 15699ed24f4bSMarc Zyngier struct kvm *kvm = filp->private_data; 15709ed24f4bSMarc Zyngier void __user *argp = (void __user *)arg; 1571e0fc6b21SOliver Upton struct kvm_device_attr attr; 15729ed24f4bSMarc Zyngier 15739ed24f4bSMarc Zyngier switch (ioctl) { 15749ed24f4bSMarc Zyngier case KVM_CREATE_IRQCHIP: { 15759ed24f4bSMarc Zyngier int ret; 15769ed24f4bSMarc Zyngier if (!vgic_present) 15779ed24f4bSMarc Zyngier return -ENXIO; 15789ed24f4bSMarc Zyngier mutex_lock(&kvm->lock); 15799ed24f4bSMarc Zyngier ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); 15809ed24f4bSMarc Zyngier mutex_unlock(&kvm->lock); 15819ed24f4bSMarc Zyngier return ret; 15829ed24f4bSMarc Zyngier } 15839ed24f4bSMarc Zyngier case KVM_ARM_SET_DEVICE_ADDR: { 15849ed24f4bSMarc Zyngier struct kvm_arm_device_addr dev_addr; 15859ed24f4bSMarc Zyngier 15869ed24f4bSMarc Zyngier if (copy_from_user(&dev_addr, argp, sizeof(dev_addr))) 15879ed24f4bSMarc Zyngier return -EFAULT; 15889ed24f4bSMarc Zyngier return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); 15899ed24f4bSMarc Zyngier } 15909ed24f4bSMarc Zyngier case KVM_ARM_PREFERRED_TARGET: { 15919ed24f4bSMarc Zyngier struct kvm_vcpu_init init; 15929ed24f4bSMarc Zyngier 159308e873cbSYueHaibing kvm_vcpu_preferred_target(&init); 15949ed24f4bSMarc Zyngier 15959ed24f4bSMarc Zyngier if (copy_to_user(argp, &init, sizeof(init))) 15969ed24f4bSMarc Zyngier return -EFAULT; 15979ed24f4bSMarc Zyngier 15989ed24f4bSMarc Zyngier return 0; 15999ed24f4bSMarc Zyngier } 1600f0376edbSSteven Price case KVM_ARM_MTE_COPY_TAGS: { 1601f0376edbSSteven Price struct kvm_arm_copy_mte_tags copy_tags; 1602f0376edbSSteven Price 1603f0376edbSSteven Price if (copy_from_user(©_tags, argp, sizeof(copy_tags))) 1604f0376edbSSteven Price return -EFAULT; 1605f0376edbSSteven Price return kvm_vm_ioctl_mte_copy_tags(kvm, ©_tags); 1606f0376edbSSteven Price } 160730ec7997SMarc Zyngier case KVM_ARM_SET_COUNTER_OFFSET: { 160830ec7997SMarc Zyngier struct kvm_arm_counter_offset offset; 160930ec7997SMarc Zyngier 161030ec7997SMarc Zyngier if (copy_from_user(&offset, argp, sizeof(offset))) 161130ec7997SMarc Zyngier return -EFAULT; 161230ec7997SMarc Zyngier return kvm_vm_ioctl_set_counter_offset(kvm, &offset); 161330ec7997SMarc Zyngier } 1614e0fc6b21SOliver Upton case KVM_HAS_DEVICE_ATTR: { 1615e0fc6b21SOliver Upton if (copy_from_user(&attr, argp, sizeof(attr))) 1616e0fc6b21SOliver Upton return -EFAULT; 1617e0fc6b21SOliver Upton 1618e0fc6b21SOliver Upton return kvm_vm_has_attr(kvm, &attr); 1619e0fc6b21SOliver Upton } 1620e0fc6b21SOliver Upton case KVM_SET_DEVICE_ATTR: { 1621e0fc6b21SOliver Upton if (copy_from_user(&attr, argp, sizeof(attr))) 1622e0fc6b21SOliver Upton return -EFAULT; 1623e0fc6b21SOliver Upton 1624e0fc6b21SOliver Upton return kvm_vm_set_attr(kvm, &attr); 1625e0fc6b21SOliver Upton } 16269ed24f4bSMarc Zyngier default: 16279ed24f4bSMarc Zyngier return -EINVAL; 16289ed24f4bSMarc Zyngier } 16299ed24f4bSMarc Zyngier } 16309ed24f4bSMarc Zyngier 163196906a91SMarc Zyngier /* unlocks vcpus from @vcpu_lock_idx and smaller */ 163296906a91SMarc Zyngier static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx) 163396906a91SMarc Zyngier { 163496906a91SMarc Zyngier struct kvm_vcpu *tmp_vcpu; 163596906a91SMarc Zyngier 163696906a91SMarc Zyngier for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { 163796906a91SMarc Zyngier tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); 163896906a91SMarc Zyngier mutex_unlock(&tmp_vcpu->mutex); 163996906a91SMarc Zyngier } 164096906a91SMarc Zyngier } 164196906a91SMarc Zyngier 164296906a91SMarc Zyngier void unlock_all_vcpus(struct kvm *kvm) 164396906a91SMarc Zyngier { 164496906a91SMarc Zyngier lockdep_assert_held(&kvm->lock); 164596906a91SMarc Zyngier 164696906a91SMarc Zyngier unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1); 164796906a91SMarc Zyngier } 164896906a91SMarc Zyngier 164996906a91SMarc Zyngier /* Returns true if all vcpus were locked, false otherwise */ 165096906a91SMarc Zyngier bool lock_all_vcpus(struct kvm *kvm) 165196906a91SMarc Zyngier { 165296906a91SMarc Zyngier struct kvm_vcpu *tmp_vcpu; 165396906a91SMarc Zyngier unsigned long c; 165496906a91SMarc Zyngier 165596906a91SMarc Zyngier lockdep_assert_held(&kvm->lock); 165696906a91SMarc Zyngier 165796906a91SMarc Zyngier /* 165896906a91SMarc Zyngier * Any time a vcpu is in an ioctl (including running), the 165996906a91SMarc Zyngier * core KVM code tries to grab the vcpu->mutex. 166096906a91SMarc Zyngier * 166196906a91SMarc Zyngier * By grabbing the vcpu->mutex of all VCPUs we ensure that no 166296906a91SMarc Zyngier * other VCPUs can fiddle with the state while we access it. 166396906a91SMarc Zyngier */ 166496906a91SMarc Zyngier kvm_for_each_vcpu(c, tmp_vcpu, kvm) { 166596906a91SMarc Zyngier if (!mutex_trylock(&tmp_vcpu->mutex)) { 166696906a91SMarc Zyngier unlock_vcpus(kvm, c - 1); 166796906a91SMarc Zyngier return false; 166896906a91SMarc Zyngier } 166996906a91SMarc Zyngier } 167096906a91SMarc Zyngier 167196906a91SMarc Zyngier return true; 167296906a91SMarc Zyngier } 167396906a91SMarc Zyngier 167430c95391SDavid Brazdil static unsigned long nvhe_percpu_size(void) 167530c95391SDavid Brazdil { 167630c95391SDavid Brazdil return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - 167730c95391SDavid Brazdil (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start); 167830c95391SDavid Brazdil } 167930c95391SDavid Brazdil 168030c95391SDavid Brazdil static unsigned long nvhe_percpu_order(void) 168130c95391SDavid Brazdil { 168230c95391SDavid Brazdil unsigned long size = nvhe_percpu_size(); 168330c95391SDavid Brazdil 168430c95391SDavid Brazdil return size ? get_order(size) : 0; 168530c95391SDavid Brazdil } 168630c95391SDavid Brazdil 1687b881cdceSWill Deacon /* A lookup table holding the hypervisor VA for each vector slot */ 1688b881cdceSWill Deacon static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS]; 1689de5bcdb4SWill Deacon 1690b881cdceSWill Deacon static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot) 1691b881cdceSWill Deacon { 1692bc1d2892SQuentin Perret hyp_spectre_vector_selector[slot] = __kvm_vector_slot2addr(base, slot); 16939ef2b48bSWill Deacon } 16949ef2b48bSWill Deacon 1695b881cdceSWill Deacon static int kvm_init_vector_slots(void) 1696b881cdceSWill Deacon { 1697b881cdceSWill Deacon int err; 1698b881cdceSWill Deacon void *base; 1699b881cdceSWill Deacon 1700b881cdceSWill Deacon base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); 1701b881cdceSWill Deacon kvm_init_vector_slot(base, HYP_VECTOR_DIRECT); 1702b881cdceSWill Deacon 1703b881cdceSWill Deacon base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); 1704b881cdceSWill Deacon kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT); 1705b881cdceSWill Deacon 17062e403167SQuentin Perret if (kvm_system_needs_idmapped_vectors() && 17072e403167SQuentin Perret !is_protected_kvm_enabled()) { 1708b881cdceSWill Deacon err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs), 1709b881cdceSWill Deacon __BP_HARDEN_HYP_VECS_SZ, &base); 1710b881cdceSWill Deacon if (err) 1711b881cdceSWill Deacon return err; 1712b881cdceSWill Deacon } 17139ef2b48bSWill Deacon 1714b881cdceSWill Deacon kvm_init_vector_slot(base, HYP_VECTOR_INDIRECT); 1715b881cdceSWill Deacon kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_INDIRECT); 17169ef2b48bSWill Deacon return 0; 17179ef2b48bSWill Deacon } 17189ef2b48bSWill Deacon 171953bf620aSSean Christopherson static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits) 17209ed24f4bSMarc Zyngier { 17219cc77581SQuentin Perret struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); 1722d3e1086cSDavid Brazdil unsigned long tcr; 17239ed24f4bSMarc Zyngier 172471b3ec5fSDavid Brazdil /* 172571b3ec5fSDavid Brazdil * Calculate the raw per-cpu offset without a translation from the 172671b3ec5fSDavid Brazdil * kernel's mapping to the linear mapping, and store it in tpidr_el2 172771b3ec5fSDavid Brazdil * so that we can use adr_l to access per-cpu variables in EL2. 1728e1663372SSteven Price * Also drop the KASAN tag which gets in the way... 172971b3ec5fSDavid Brazdil */ 17309cc77581SQuentin Perret params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) - 173130c95391SDavid Brazdil (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start)); 173271b3ec5fSDavid Brazdil 1733d3e1086cSDavid Brazdil params->mair_el2 = read_sysreg(mair_el1); 1734d3e1086cSDavid Brazdil 1735cff3b5cfSMarc Zyngier tcr = read_sysreg(tcr_el1); 1736cff3b5cfSMarc Zyngier if (cpus_have_final_cap(ARM64_KVM_HVHE)) { 1737cff3b5cfSMarc Zyngier tcr |= TCR_EPD1_MASK; 1738cff3b5cfSMarc Zyngier } else { 1739cff3b5cfSMarc Zyngier tcr &= TCR_EL2_MASK; 1740cff3b5cfSMarc Zyngier tcr |= TCR_EL2_RES1; 1741cff3b5cfSMarc Zyngier } 1742d3e1086cSDavid Brazdil tcr &= ~TCR_T0SZ_MASK; 1743579d7ebeSRyan Roberts tcr |= TCR_T0SZ(hyp_va_bits); 1744d3e1086cSDavid Brazdil params->tcr_el2 = tcr; 1745d3e1086cSDavid Brazdil 174663fec243SDavid Brazdil params->pgd_pa = kvm_mmu_get_httbr(); 1747734864c1SQuentin Perret if (is_protected_kvm_enabled()) 1748734864c1SQuentin Perret params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS; 1749734864c1SQuentin Perret else 1750734864c1SQuentin Perret params->hcr_el2 = HCR_HOST_NVHE_FLAGS; 1751d0daf5a2SMarc Zyngier if (cpus_have_final_cap(ARM64_KVM_HVHE)) 1752d0daf5a2SMarc Zyngier params->hcr_el2 |= HCR_E2H; 1753734864c1SQuentin Perret params->vttbr = params->vtcr = 0; 175463fec243SDavid Brazdil 175563fec243SDavid Brazdil /* 175663fec243SDavid Brazdil * Flush the init params from the data cache because the struct will 175763fec243SDavid Brazdil * be read while the MMU is off. 175863fec243SDavid Brazdil */ 175963fec243SDavid Brazdil kvm_flush_dcache_to_poc(params, sizeof(*params)); 17609cc77581SQuentin Perret } 17619cc77581SQuentin Perret 1762bfa79a80SQuentin Perret static void hyp_install_host_vector(void) 17639cc77581SQuentin Perret { 17649cc77581SQuentin Perret struct kvm_nvhe_init_params *params; 17659cc77581SQuentin Perret struct arm_smccc_res res; 17669cc77581SQuentin Perret 17679cc77581SQuentin Perret /* Switch from the HYP stub to our own HYP init vector */ 17689cc77581SQuentin Perret __hyp_set_vectors(kvm_get_idmap_vector()); 17699ed24f4bSMarc Zyngier 177071b3ec5fSDavid Brazdil /* 177171b3ec5fSDavid Brazdil * Call initialization code, and switch to the full blown HYP code. 177271b3ec5fSDavid Brazdil * If the cpucaps haven't been finalized yet, something has gone very 177371b3ec5fSDavid Brazdil * wrong, and hyp will crash and burn when it uses any 177471b3ec5fSDavid Brazdil * cpus_have_const_cap() wrapper. 177571b3ec5fSDavid Brazdil */ 177671b3ec5fSDavid Brazdil BUG_ON(!system_capabilities_finalized()); 17779cc77581SQuentin Perret params = this_cpu_ptr_nvhe_sym(kvm_init_params); 177863fec243SDavid Brazdil arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res); 177904e4caa8SAndrew Scull WARN_ON(res.a0 != SMCCC_RET_SUCCESS); 1780bfa79a80SQuentin Perret } 1781bfa79a80SQuentin Perret 1782bfa79a80SQuentin Perret static void cpu_init_hyp_mode(void) 1783bfa79a80SQuentin Perret { 1784bfa79a80SQuentin Perret hyp_install_host_vector(); 178571b3ec5fSDavid Brazdil 178671b3ec5fSDavid Brazdil /* 178771b3ec5fSDavid Brazdil * Disabling SSBD on a non-VHE system requires us to enable SSBS 178871b3ec5fSDavid Brazdil * at EL2. 178971b3ec5fSDavid Brazdil */ 179071b3ec5fSDavid Brazdil if (this_cpu_has_cap(ARM64_SSBS) && 1791d63d975aSMarc Zyngier arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) { 179213aeb9b4SDavid Brazdil kvm_call_hyp_nvhe(__kvm_enable_ssbs); 179371b3ec5fSDavid Brazdil } 17949ed24f4bSMarc Zyngier } 17959ed24f4bSMarc Zyngier 17969ed24f4bSMarc Zyngier static void cpu_hyp_reset(void) 17979ed24f4bSMarc Zyngier { 17989ed24f4bSMarc Zyngier if (!is_kernel_in_hyp_mode()) 17999ed24f4bSMarc Zyngier __hyp_reset_vectors(); 18009ed24f4bSMarc Zyngier } 18019ed24f4bSMarc Zyngier 1802042c76a9SWill Deacon /* 1803042c76a9SWill Deacon * EL2 vectors can be mapped and rerouted in a number of ways, 1804042c76a9SWill Deacon * depending on the kernel configuration and CPU present: 1805042c76a9SWill Deacon * 1806042c76a9SWill Deacon * - If the CPU is affected by Spectre-v2, the hardening sequence is 1807042c76a9SWill Deacon * placed in one of the vector slots, which is executed before jumping 1808042c76a9SWill Deacon * to the real vectors. 1809042c76a9SWill Deacon * 1810c4792b6dSWill Deacon * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot 1811042c76a9SWill Deacon * containing the hardening sequence is mapped next to the idmap page, 1812042c76a9SWill Deacon * and executed before jumping to the real vectors. 1813042c76a9SWill Deacon * 1814c4792b6dSWill Deacon * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an 1815042c76a9SWill Deacon * empty slot is selected, mapped next to the idmap page, and 1816042c76a9SWill Deacon * executed before jumping to the real vectors. 1817042c76a9SWill Deacon * 1818c4792b6dSWill Deacon * Note that ARM64_SPECTRE_V3A is somewhat incompatible with 1819042c76a9SWill Deacon * VHE, as we don't have hypervisor-specific mappings. If the system 1820042c76a9SWill Deacon * is VHE and yet selects this capability, it will be ignored. 1821042c76a9SWill Deacon */ 1822042c76a9SWill Deacon static void cpu_set_hyp_vector(void) 1823042c76a9SWill Deacon { 18246279017eSWill Deacon struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); 1825b881cdceSWill Deacon void *vector = hyp_spectre_vector_selector[data->slot]; 1826042c76a9SWill Deacon 1827bfa79a80SQuentin Perret if (!is_protected_kvm_enabled()) 1828b881cdceSWill Deacon *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector; 1829bfa79a80SQuentin Perret else 1830bfa79a80SQuentin Perret kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot); 1831042c76a9SWill Deacon } 1832042c76a9SWill Deacon 18338579a185SWill Deacon static void cpu_hyp_init_context(void) 18349ed24f4bSMarc Zyngier { 18352a1198c9SDavid Brazdil kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt); 18369ed24f4bSMarc Zyngier 18378579a185SWill Deacon if (!is_kernel_in_hyp_mode()) 18388579a185SWill Deacon cpu_init_hyp_mode(); 18398579a185SWill Deacon } 18408579a185SWill Deacon 18418579a185SWill Deacon static void cpu_hyp_init_features(void) 18428579a185SWill Deacon { 18438579a185SWill Deacon cpu_set_hyp_vector(); 18448579a185SWill Deacon kvm_arm_init_debug(); 1845a0e47952SAndrew Scull 18469ed24f4bSMarc Zyngier if (is_kernel_in_hyp_mode()) 18479ed24f4bSMarc Zyngier kvm_timer_init_vhe(); 18489ed24f4bSMarc Zyngier 18499ed24f4bSMarc Zyngier if (vgic_present) 18509ed24f4bSMarc Zyngier kvm_vgic_init_cpu_hardware(); 18519ed24f4bSMarc Zyngier } 18529ed24f4bSMarc Zyngier 18538579a185SWill Deacon static void cpu_hyp_reinit(void) 18548579a185SWill Deacon { 18558579a185SWill Deacon cpu_hyp_reset(); 18568579a185SWill Deacon cpu_hyp_init_context(); 18578579a185SWill Deacon cpu_hyp_init_features(); 18588579a185SWill Deacon } 18598579a185SWill Deacon 18609ed24f4bSMarc Zyngier static void _kvm_arch_hardware_enable(void *discard) 18619ed24f4bSMarc Zyngier { 18629ed24f4bSMarc Zyngier if (!__this_cpu_read(kvm_arm_hardware_enabled)) { 18639ed24f4bSMarc Zyngier cpu_hyp_reinit(); 18649ed24f4bSMarc Zyngier __this_cpu_write(kvm_arm_hardware_enabled, 1); 18659ed24f4bSMarc Zyngier } 18669ed24f4bSMarc Zyngier } 18679ed24f4bSMarc Zyngier 18689ed24f4bSMarc Zyngier int kvm_arch_hardware_enable(void) 18699ed24f4bSMarc Zyngier { 1870466d27e4SMarc Zyngier int was_enabled = __this_cpu_read(kvm_arm_hardware_enabled); 1871466d27e4SMarc Zyngier 18729ed24f4bSMarc Zyngier _kvm_arch_hardware_enable(NULL); 1873466d27e4SMarc Zyngier 1874466d27e4SMarc Zyngier if (!was_enabled) { 1875466d27e4SMarc Zyngier kvm_vgic_cpu_up(); 1876466d27e4SMarc Zyngier kvm_timer_cpu_up(); 1877466d27e4SMarc Zyngier } 1878466d27e4SMarc Zyngier 18799ed24f4bSMarc Zyngier return 0; 18809ed24f4bSMarc Zyngier } 18819ed24f4bSMarc Zyngier 18829ed24f4bSMarc Zyngier static void _kvm_arch_hardware_disable(void *discard) 18839ed24f4bSMarc Zyngier { 18849ed24f4bSMarc Zyngier if (__this_cpu_read(kvm_arm_hardware_enabled)) { 18859ed24f4bSMarc Zyngier cpu_hyp_reset(); 18869ed24f4bSMarc Zyngier __this_cpu_write(kvm_arm_hardware_enabled, 0); 18879ed24f4bSMarc Zyngier } 18889ed24f4bSMarc Zyngier } 18899ed24f4bSMarc Zyngier 18909ed24f4bSMarc Zyngier void kvm_arch_hardware_disable(void) 18919ed24f4bSMarc Zyngier { 1892466d27e4SMarc Zyngier if (__this_cpu_read(kvm_arm_hardware_enabled)) { 1893466d27e4SMarc Zyngier kvm_timer_cpu_down(); 1894466d27e4SMarc Zyngier kvm_vgic_cpu_down(); 1895466d27e4SMarc Zyngier } 1896466d27e4SMarc Zyngier 1897fa8c3d65SDavid Brazdil if (!is_protected_kvm_enabled()) 18989ed24f4bSMarc Zyngier _kvm_arch_hardware_disable(NULL); 18999ed24f4bSMarc Zyngier } 19009ed24f4bSMarc Zyngier 19019ed24f4bSMarc Zyngier #ifdef CONFIG_CPU_PM 19029ed24f4bSMarc Zyngier static int hyp_init_cpu_pm_notifier(struct notifier_block *self, 19039ed24f4bSMarc Zyngier unsigned long cmd, 19049ed24f4bSMarc Zyngier void *v) 19059ed24f4bSMarc Zyngier { 19069ed24f4bSMarc Zyngier /* 19079ed24f4bSMarc Zyngier * kvm_arm_hardware_enabled is left with its old value over 19089ed24f4bSMarc Zyngier * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should 19099ed24f4bSMarc Zyngier * re-enable hyp. 19109ed24f4bSMarc Zyngier */ 19119ed24f4bSMarc Zyngier switch (cmd) { 19129ed24f4bSMarc Zyngier case CPU_PM_ENTER: 19139ed24f4bSMarc Zyngier if (__this_cpu_read(kvm_arm_hardware_enabled)) 19149ed24f4bSMarc Zyngier /* 19159ed24f4bSMarc Zyngier * don't update kvm_arm_hardware_enabled here 19169ed24f4bSMarc Zyngier * so that the hardware will be re-enabled 19179ed24f4bSMarc Zyngier * when we resume. See below. 19189ed24f4bSMarc Zyngier */ 19199ed24f4bSMarc Zyngier cpu_hyp_reset(); 19209ed24f4bSMarc Zyngier 19219ed24f4bSMarc Zyngier return NOTIFY_OK; 19229ed24f4bSMarc Zyngier case CPU_PM_ENTER_FAILED: 19239ed24f4bSMarc Zyngier case CPU_PM_EXIT: 19249ed24f4bSMarc Zyngier if (__this_cpu_read(kvm_arm_hardware_enabled)) 19259ed24f4bSMarc Zyngier /* The hardware was enabled before suspend. */ 19269ed24f4bSMarc Zyngier cpu_hyp_reinit(); 19279ed24f4bSMarc Zyngier 19289ed24f4bSMarc Zyngier return NOTIFY_OK; 19299ed24f4bSMarc Zyngier 19309ed24f4bSMarc Zyngier default: 19319ed24f4bSMarc Zyngier return NOTIFY_DONE; 19329ed24f4bSMarc Zyngier } 19339ed24f4bSMarc Zyngier } 19349ed24f4bSMarc Zyngier 19359ed24f4bSMarc Zyngier static struct notifier_block hyp_init_cpu_pm_nb = { 19369ed24f4bSMarc Zyngier .notifier_call = hyp_init_cpu_pm_notifier, 19379ed24f4bSMarc Zyngier }; 19389ed24f4bSMarc Zyngier 193953bf620aSSean Christopherson static void __init hyp_cpu_pm_init(void) 19409ed24f4bSMarc Zyngier { 1941fa8c3d65SDavid Brazdil if (!is_protected_kvm_enabled()) 19429ed24f4bSMarc Zyngier cpu_pm_register_notifier(&hyp_init_cpu_pm_nb); 19439ed24f4bSMarc Zyngier } 194453bf620aSSean Christopherson static void __init hyp_cpu_pm_exit(void) 19459ed24f4bSMarc Zyngier { 1946fa8c3d65SDavid Brazdil if (!is_protected_kvm_enabled()) 19479ed24f4bSMarc Zyngier cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb); 19489ed24f4bSMarc Zyngier } 19499ed24f4bSMarc Zyngier #else 195053bf620aSSean Christopherson static inline void __init hyp_cpu_pm_init(void) 19519ed24f4bSMarc Zyngier { 19529ed24f4bSMarc Zyngier } 195353bf620aSSean Christopherson static inline void __init hyp_cpu_pm_exit(void) 19549ed24f4bSMarc Zyngier { 19559ed24f4bSMarc Zyngier } 19569ed24f4bSMarc Zyngier #endif 19579ed24f4bSMarc Zyngier 195853bf620aSSean Christopherson static void __init init_cpu_logical_map(void) 195994f5e8a4SDavid Brazdil { 196094f5e8a4SDavid Brazdil unsigned int cpu; 196194f5e8a4SDavid Brazdil 196294f5e8a4SDavid Brazdil /* 196394f5e8a4SDavid Brazdil * Copy the MPIDR <-> logical CPU ID mapping to hyp. 196421ea4578SJulia Lawall * Only copy the set of online CPUs whose features have been checked 196594f5e8a4SDavid Brazdil * against the finalized system capabilities. The hypervisor will not 196694f5e8a4SDavid Brazdil * allow any other CPUs from the `possible` set to boot. 196794f5e8a4SDavid Brazdil */ 196894f5e8a4SDavid Brazdil for_each_online_cpu(cpu) 196961fe0c37SDavid Brazdil hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu); 197094f5e8a4SDavid Brazdil } 197194f5e8a4SDavid Brazdil 1972767c973fSMarc Zyngier #define init_psci_0_1_impl_state(config, what) \ 1973767c973fSMarc Zyngier config.psci_0_1_ ## what ## _implemented = psci_ops.what 1974767c973fSMarc Zyngier 197553bf620aSSean Christopherson static bool __init init_psci_relay(void) 1976eeeee719SDavid Brazdil { 1977eeeee719SDavid Brazdil /* 1978eeeee719SDavid Brazdil * If PSCI has not been initialized, protected KVM cannot install 1979eeeee719SDavid Brazdil * itself on newly booted CPUs. 1980eeeee719SDavid Brazdil */ 1981eeeee719SDavid Brazdil if (!psci_ops.get_version) { 1982eeeee719SDavid Brazdil kvm_err("Cannot initialize protected mode without PSCI\n"); 1983eeeee719SDavid Brazdil return false; 1984eeeee719SDavid Brazdil } 1985eeeee719SDavid Brazdil 1986ff367fe4SDavid Brazdil kvm_host_psci_config.version = psci_ops.get_version(); 198712bdce4fSWill Deacon kvm_host_psci_config.smccc_version = arm_smccc_get_version(); 1988ff367fe4SDavid Brazdil 1989ff367fe4SDavid Brazdil if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) { 1990ff367fe4SDavid Brazdil kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids(); 1991767c973fSMarc Zyngier init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend); 1992767c973fSMarc Zyngier init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on); 1993767c973fSMarc Zyngier init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off); 1994767c973fSMarc Zyngier init_psci_0_1_impl_state(kvm_host_psci_config, migrate); 1995ff367fe4SDavid Brazdil } 1996eeeee719SDavid Brazdil return true; 1997eeeee719SDavid Brazdil } 1998eeeee719SDavid Brazdil 199953bf620aSSean Christopherson static int __init init_subsystems(void) 20009ed24f4bSMarc Zyngier { 20019ed24f4bSMarc Zyngier int err = 0; 20029ed24f4bSMarc Zyngier 20039ed24f4bSMarc Zyngier /* 20049ed24f4bSMarc Zyngier * Enable hardware so that subsystem initialisation can access EL2. 20059ed24f4bSMarc Zyngier */ 20069ed24f4bSMarc Zyngier on_each_cpu(_kvm_arch_hardware_enable, NULL, 1); 20079ed24f4bSMarc Zyngier 20089ed24f4bSMarc Zyngier /* 20099ed24f4bSMarc Zyngier * Register CPU lower-power notifier 20109ed24f4bSMarc Zyngier */ 20119ed24f4bSMarc Zyngier hyp_cpu_pm_init(); 20129ed24f4bSMarc Zyngier 20139ed24f4bSMarc Zyngier /* 20149ed24f4bSMarc Zyngier * Init HYP view of VGIC 20159ed24f4bSMarc Zyngier */ 20169ed24f4bSMarc Zyngier err = kvm_vgic_hyp_init(); 20179ed24f4bSMarc Zyngier switch (err) { 20189ed24f4bSMarc Zyngier case 0: 20199ed24f4bSMarc Zyngier vgic_present = true; 20209ed24f4bSMarc Zyngier break; 20219ed24f4bSMarc Zyngier case -ENODEV: 20229ed24f4bSMarc Zyngier case -ENXIO: 20239ed24f4bSMarc Zyngier vgic_present = false; 20249ed24f4bSMarc Zyngier err = 0; 20259ed24f4bSMarc Zyngier break; 20269ed24f4bSMarc Zyngier default: 20279ed24f4bSMarc Zyngier goto out; 20289ed24f4bSMarc Zyngier } 20299ed24f4bSMarc Zyngier 20309ed24f4bSMarc Zyngier /* 20319ed24f4bSMarc Zyngier * Init HYP architected timer support 20329ed24f4bSMarc Zyngier */ 20339ed24f4bSMarc Zyngier err = kvm_timer_hyp_init(vgic_present); 20349ed24f4bSMarc Zyngier if (err) 20359ed24f4bSMarc Zyngier goto out; 20369ed24f4bSMarc Zyngier 203717ed14ebSSean Christopherson kvm_register_perf_callbacks(NULL); 203817ed14ebSSean Christopherson 20399ed24f4bSMarc Zyngier out: 204078b3bf48SSean Christopherson if (err) 204178b3bf48SSean Christopherson hyp_cpu_pm_exit(); 204278b3bf48SSean Christopherson 2043fa8c3d65SDavid Brazdil if (err || !is_protected_kvm_enabled()) 20449ed24f4bSMarc Zyngier on_each_cpu(_kvm_arch_hardware_disable, NULL, 1); 20459ed24f4bSMarc Zyngier 20469ed24f4bSMarc Zyngier return err; 20479ed24f4bSMarc Zyngier } 20489ed24f4bSMarc Zyngier 204953bf620aSSean Christopherson static void __init teardown_subsystems(void) 205078b3bf48SSean Christopherson { 205178b3bf48SSean Christopherson kvm_unregister_perf_callbacks(); 205278b3bf48SSean Christopherson hyp_cpu_pm_exit(); 205378b3bf48SSean Christopherson } 205478b3bf48SSean Christopherson 205553bf620aSSean Christopherson static void __init teardown_hyp_mode(void) 20569ed24f4bSMarc Zyngier { 20579ed24f4bSMarc Zyngier int cpu; 20589ed24f4bSMarc Zyngier 20599ed24f4bSMarc Zyngier free_hyp_pgds(); 206030c95391SDavid Brazdil for_each_possible_cpu(cpu) { 20619ed24f4bSMarc Zyngier free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); 2062fe41a7f8SQuentin Perret free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order()); 206330c95391SDavid Brazdil } 20649ed24f4bSMarc Zyngier } 20659ed24f4bSMarc Zyngier 206653bf620aSSean Christopherson static int __init do_pkvm_init(u32 hyp_va_bits) 2067bfa79a80SQuentin Perret { 2068fe41a7f8SQuentin Perret void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)); 2069bfa79a80SQuentin Perret int ret; 2070bfa79a80SQuentin Perret 2071bfa79a80SQuentin Perret preempt_disable(); 20728579a185SWill Deacon cpu_hyp_init_context(); 2073bfa79a80SQuentin Perret ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size, 2074bfa79a80SQuentin Perret num_possible_cpus(), kern_hyp_va(per_cpu_base), 2075bfa79a80SQuentin Perret hyp_va_bits); 20768579a185SWill Deacon cpu_hyp_init_features(); 20778579a185SWill Deacon 20788579a185SWill Deacon /* 20798579a185SWill Deacon * The stub hypercalls are now disabled, so set our local flag to 20808579a185SWill Deacon * prevent a later re-init attempt in kvm_arch_hardware_enable(). 20818579a185SWill Deacon */ 20828579a185SWill Deacon __this_cpu_write(kvm_arm_hardware_enabled, 1); 2083bfa79a80SQuentin Perret preempt_enable(); 2084bfa79a80SQuentin Perret 2085bfa79a80SQuentin Perret return ret; 2086bfa79a80SQuentin Perret } 2087bfa79a80SQuentin Perret 2088e8162521SFuad Tabba static u64 get_hyp_id_aa64pfr0_el1(void) 2089e8162521SFuad Tabba { 2090e8162521SFuad Tabba /* 2091e8162521SFuad Tabba * Track whether the system isn't affected by spectre/meltdown in the 2092e8162521SFuad Tabba * hypervisor's view of id_aa64pfr0_el1, used for protected VMs. 2093e8162521SFuad Tabba * Although this is per-CPU, we make it global for simplicity, e.g., not 2094e8162521SFuad Tabba * to have to worry about vcpu migration. 2095e8162521SFuad Tabba * 2096e8162521SFuad Tabba * Unlike for non-protected VMs, userspace cannot override this for 2097e8162521SFuad Tabba * protected VMs. 2098e8162521SFuad Tabba */ 2099e8162521SFuad Tabba u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 2100e8162521SFuad Tabba 2101e8162521SFuad Tabba val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | 2102e8162521SFuad Tabba ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3)); 2103e8162521SFuad Tabba 2104e8162521SFuad Tabba val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), 2105e8162521SFuad Tabba arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED); 2106e8162521SFuad Tabba val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), 2107e8162521SFuad Tabba arm64_get_meltdown_state() == SPECTRE_UNAFFECTED); 2108e8162521SFuad Tabba 2109e8162521SFuad Tabba return val; 2110e8162521SFuad Tabba } 2111e8162521SFuad Tabba 21126c165223SWill Deacon static void kvm_hyp_init_symbols(void) 2113bfa79a80SQuentin Perret { 2114e8162521SFuad Tabba kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1(); 21156c30bfb1SFuad Tabba kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); 21166c30bfb1SFuad Tabba kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1); 21176c30bfb1SFuad Tabba kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1); 2118def8c222SVladimir Murzin kvm_nvhe_sym(id_aa64isar2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); 21197c419937SMarc Zyngier kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 21207c419937SMarc Zyngier kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); 21216c30bfb1SFuad Tabba kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1); 21228669651cSQuentin Perret kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1); 212313e248aaSWill Deacon kvm_nvhe_sym(__icache_flags) = __icache_flags; 212473f38ef2SWill Deacon kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits; 21256c165223SWill Deacon } 21266c165223SWill Deacon 212753bf620aSSean Christopherson static int __init kvm_hyp_init_protection(u32 hyp_va_bits) 21286c165223SWill Deacon { 21296c165223SWill Deacon void *addr = phys_to_virt(hyp_mem_base); 21306c165223SWill Deacon int ret; 21317c419937SMarc Zyngier 2132bfa79a80SQuentin Perret ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP); 2133bfa79a80SQuentin Perret if (ret) 2134bfa79a80SQuentin Perret return ret; 2135bfa79a80SQuentin Perret 2136bfa79a80SQuentin Perret ret = do_pkvm_init(hyp_va_bits); 2137bfa79a80SQuentin Perret if (ret) 2138bfa79a80SQuentin Perret return ret; 2139bfa79a80SQuentin Perret 2140bfa79a80SQuentin Perret free_hyp_pgds(); 2141bfa79a80SQuentin Perret 2142bfa79a80SQuentin Perret return 0; 2143bfa79a80SQuentin Perret } 2144bfa79a80SQuentin Perret 21458c15c2a0SMostafa Saleh static void pkvm_hyp_init_ptrauth(void) 21468c15c2a0SMostafa Saleh { 21478c15c2a0SMostafa Saleh struct kvm_cpu_context *hyp_ctxt; 21488c15c2a0SMostafa Saleh int cpu; 21498c15c2a0SMostafa Saleh 21508c15c2a0SMostafa Saleh for_each_possible_cpu(cpu) { 21518c15c2a0SMostafa Saleh hyp_ctxt = per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt, cpu); 21528c15c2a0SMostafa Saleh hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long(); 21538c15c2a0SMostafa Saleh hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long(); 21548c15c2a0SMostafa Saleh hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long(); 21558c15c2a0SMostafa Saleh hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long(); 21568c15c2a0SMostafa Saleh hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long(); 21578c15c2a0SMostafa Saleh hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long(); 21588c15c2a0SMostafa Saleh hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long(); 21598c15c2a0SMostafa Saleh hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long(); 21608c15c2a0SMostafa Saleh hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long(); 21618c15c2a0SMostafa Saleh hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long(); 21628c15c2a0SMostafa Saleh } 21638c15c2a0SMostafa Saleh } 21648c15c2a0SMostafa Saleh 216567d953d4SMarc Zyngier /* Inits Hyp-mode on all online CPUs */ 216653bf620aSSean Christopherson static int __init init_hyp_mode(void) 21679ed24f4bSMarc Zyngier { 2168bfa79a80SQuentin Perret u32 hyp_va_bits; 21699ed24f4bSMarc Zyngier int cpu; 2170bfa79a80SQuentin Perret int err = -ENOMEM; 2171bfa79a80SQuentin Perret 2172bfa79a80SQuentin Perret /* 2173bfa79a80SQuentin Perret * The protected Hyp-mode cannot be initialized if the memory pool 2174bfa79a80SQuentin Perret * allocation has failed. 2175bfa79a80SQuentin Perret */ 2176bfa79a80SQuentin Perret if (is_protected_kvm_enabled() && !hyp_mem_base) 2177bfa79a80SQuentin Perret goto out_err; 21789ed24f4bSMarc Zyngier 21799ed24f4bSMarc Zyngier /* 21809ed24f4bSMarc Zyngier * Allocate Hyp PGD and setup Hyp identity mapping 21819ed24f4bSMarc Zyngier */ 2182bfa79a80SQuentin Perret err = kvm_mmu_init(&hyp_va_bits); 21839ed24f4bSMarc Zyngier if (err) 21849ed24f4bSMarc Zyngier goto out_err; 21859ed24f4bSMarc Zyngier 21869ed24f4bSMarc Zyngier /* 21879ed24f4bSMarc Zyngier * Allocate stack pages for Hypervisor-mode 21889ed24f4bSMarc Zyngier */ 21899ed24f4bSMarc Zyngier for_each_possible_cpu(cpu) { 21909ed24f4bSMarc Zyngier unsigned long stack_page; 21919ed24f4bSMarc Zyngier 21929ed24f4bSMarc Zyngier stack_page = __get_free_page(GFP_KERNEL); 21939ed24f4bSMarc Zyngier if (!stack_page) { 21949ed24f4bSMarc Zyngier err = -ENOMEM; 21959ed24f4bSMarc Zyngier goto out_err; 21969ed24f4bSMarc Zyngier } 21979ed24f4bSMarc Zyngier 21989ed24f4bSMarc Zyngier per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; 21999ed24f4bSMarc Zyngier } 22009ed24f4bSMarc Zyngier 22019ed24f4bSMarc Zyngier /* 220230c95391SDavid Brazdil * Allocate and initialize pages for Hypervisor-mode percpu regions. 220330c95391SDavid Brazdil */ 220430c95391SDavid Brazdil for_each_possible_cpu(cpu) { 220530c95391SDavid Brazdil struct page *page; 220630c95391SDavid Brazdil void *page_addr; 220730c95391SDavid Brazdil 220830c95391SDavid Brazdil page = alloc_pages(GFP_KERNEL, nvhe_percpu_order()); 220930c95391SDavid Brazdil if (!page) { 221030c95391SDavid Brazdil err = -ENOMEM; 221130c95391SDavid Brazdil goto out_err; 221230c95391SDavid Brazdil } 221330c95391SDavid Brazdil 221430c95391SDavid Brazdil page_addr = page_address(page); 221530c95391SDavid Brazdil memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size()); 2216fe41a7f8SQuentin Perret kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr; 221730c95391SDavid Brazdil } 221830c95391SDavid Brazdil 221930c95391SDavid Brazdil /* 22209ed24f4bSMarc Zyngier * Map the Hyp-code called directly from the host 22219ed24f4bSMarc Zyngier */ 22229ed24f4bSMarc Zyngier err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start), 22239ed24f4bSMarc Zyngier kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC); 22249ed24f4bSMarc Zyngier if (err) { 22259ed24f4bSMarc Zyngier kvm_err("Cannot map world-switch code\n"); 22269ed24f4bSMarc Zyngier goto out_err; 22279ed24f4bSMarc Zyngier } 22289ed24f4bSMarc Zyngier 222916174eeaSDavid Brazdil err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start), 223016174eeaSDavid Brazdil kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO); 22312d7bf218SDavid Brazdil if (err) { 223216174eeaSDavid Brazdil kvm_err("Cannot map .hyp.rodata section\n"); 22332d7bf218SDavid Brazdil goto out_err; 22342d7bf218SDavid Brazdil } 22352d7bf218SDavid Brazdil 22369ed24f4bSMarc Zyngier err = create_hyp_mappings(kvm_ksym_ref(__start_rodata), 22379ed24f4bSMarc Zyngier kvm_ksym_ref(__end_rodata), PAGE_HYP_RO); 22389ed24f4bSMarc Zyngier if (err) { 22399ed24f4bSMarc Zyngier kvm_err("Cannot map rodata section\n"); 22409ed24f4bSMarc Zyngier goto out_err; 22419ed24f4bSMarc Zyngier } 22429ed24f4bSMarc Zyngier 2243380e18adSQuentin Perret /* 2244380e18adSQuentin Perret * .hyp.bss is guaranteed to be placed at the beginning of the .bss 2245380e18adSQuentin Perret * section thanks to an assertion in the linker script. Map it RW and 2246380e18adSQuentin Perret * the rest of .bss RO. 2247380e18adSQuentin Perret */ 2248380e18adSQuentin Perret err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_start), 2249380e18adSQuentin Perret kvm_ksym_ref(__hyp_bss_end), PAGE_HYP); 2250380e18adSQuentin Perret if (err) { 2251380e18adSQuentin Perret kvm_err("Cannot map hyp bss section: %d\n", err); 2252380e18adSQuentin Perret goto out_err; 2253380e18adSQuentin Perret } 2254380e18adSQuentin Perret 2255380e18adSQuentin Perret err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end), 22569ed24f4bSMarc Zyngier kvm_ksym_ref(__bss_stop), PAGE_HYP_RO); 22579ed24f4bSMarc Zyngier if (err) { 22589ed24f4bSMarc Zyngier kvm_err("Cannot map bss section\n"); 22599ed24f4bSMarc Zyngier goto out_err; 22609ed24f4bSMarc Zyngier } 22619ed24f4bSMarc Zyngier 22629ed24f4bSMarc Zyngier /* 22639ed24f4bSMarc Zyngier * Map the Hyp stack pages 22649ed24f4bSMarc Zyngier */ 22659ed24f4bSMarc Zyngier for_each_possible_cpu(cpu) { 2266ce335431SKalesh Singh struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); 22679ed24f4bSMarc Zyngier char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); 2268ce335431SKalesh Singh unsigned long hyp_addr; 22699ed24f4bSMarc Zyngier 2270ce335431SKalesh Singh /* 2271ce335431SKalesh Singh * Allocate a contiguous HYP private VA range for the stack 2272ce335431SKalesh Singh * and guard page. The allocation is also aligned based on 2273ce335431SKalesh Singh * the order of its size. 2274ce335431SKalesh Singh */ 2275ce335431SKalesh Singh err = hyp_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr); 2276ce335431SKalesh Singh if (err) { 2277ce335431SKalesh Singh kvm_err("Cannot allocate hyp stack guard page\n"); 2278ce335431SKalesh Singh goto out_err; 2279ce335431SKalesh Singh } 2280ce335431SKalesh Singh 2281ce335431SKalesh Singh /* 2282ce335431SKalesh Singh * Since the stack grows downwards, map the stack to the page 2283ce335431SKalesh Singh * at the higher address and leave the lower guard page 2284ce335431SKalesh Singh * unbacked. 2285ce335431SKalesh Singh * 2286ce335431SKalesh Singh * Any valid stack address now has the PAGE_SHIFT bit as 1 2287ce335431SKalesh Singh * and addresses corresponding to the guard page have the 2288ce335431SKalesh Singh * PAGE_SHIFT bit as 0 - this is used for overflow detection. 2289ce335431SKalesh Singh */ 2290ce335431SKalesh Singh err = __create_hyp_mappings(hyp_addr + PAGE_SIZE, PAGE_SIZE, 2291ce335431SKalesh Singh __pa(stack_page), PAGE_HYP); 22929ed24f4bSMarc Zyngier if (err) { 22939ed24f4bSMarc Zyngier kvm_err("Cannot map hyp stack\n"); 22949ed24f4bSMarc Zyngier goto out_err; 22959ed24f4bSMarc Zyngier } 2296ce335431SKalesh Singh 2297ce335431SKalesh Singh /* 2298ce335431SKalesh Singh * Save the stack PA in nvhe_init_params. This will be needed 2299ce335431SKalesh Singh * to recreate the stack mapping in protected nVHE mode. 2300ce335431SKalesh Singh * __hyp_pa() won't do the right thing there, since the stack 2301ce335431SKalesh Singh * has been mapped in the flexible private VA space. 2302ce335431SKalesh Singh */ 2303ce335431SKalesh Singh params->stack_pa = __pa(stack_page); 2304ce335431SKalesh Singh 2305ce335431SKalesh Singh params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE); 23069ed24f4bSMarc Zyngier } 23079ed24f4bSMarc Zyngier 23089ed24f4bSMarc Zyngier for_each_possible_cpu(cpu) { 2309fe41a7f8SQuentin Perret char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; 231030c95391SDavid Brazdil char *percpu_end = percpu_begin + nvhe_percpu_size(); 23119ed24f4bSMarc Zyngier 23129cc77581SQuentin Perret /* Map Hyp percpu pages */ 231330c95391SDavid Brazdil err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP); 23149ed24f4bSMarc Zyngier if (err) { 231530c95391SDavid Brazdil kvm_err("Cannot map hyp percpu region\n"); 23166e3bfbb2SAndrew Scull goto out_err; 23176e3bfbb2SAndrew Scull } 23189cc77581SQuentin Perret 23199cc77581SQuentin Perret /* Prepare the CPU initialization parameters */ 2320579d7ebeSRyan Roberts cpu_prepare_hyp_mode(cpu, hyp_va_bits); 23219ed24f4bSMarc Zyngier } 23229ed24f4bSMarc Zyngier 23236c165223SWill Deacon kvm_hyp_init_symbols(); 23246c165223SWill Deacon 2325eeeee719SDavid Brazdil if (is_protected_kvm_enabled()) { 23268c15c2a0SMostafa Saleh if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) && 23278c15c2a0SMostafa Saleh cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH)) 23288c15c2a0SMostafa Saleh pkvm_hyp_init_ptrauth(); 23298c15c2a0SMostafa Saleh 233094f5e8a4SDavid Brazdil init_cpu_logical_map(); 233194f5e8a4SDavid Brazdil 233252b9e265SWang Wensheng if (!init_psci_relay()) { 233352b9e265SWang Wensheng err = -ENODEV; 2334eeeee719SDavid Brazdil goto out_err; 2335eeeee719SDavid Brazdil } 2336eeeee719SDavid Brazdil 2337bfa79a80SQuentin Perret err = kvm_hyp_init_protection(hyp_va_bits); 2338bfa79a80SQuentin Perret if (err) { 2339bfa79a80SQuentin Perret kvm_err("Failed to init hyp memory protection\n"); 2340bfa79a80SQuentin Perret goto out_err; 2341bfa79a80SQuentin Perret } 2342bfa79a80SQuentin Perret } 23439ed24f4bSMarc Zyngier 23449ed24f4bSMarc Zyngier return 0; 23459ed24f4bSMarc Zyngier 23469ed24f4bSMarc Zyngier out_err: 23479ed24f4bSMarc Zyngier teardown_hyp_mode(); 23489ed24f4bSMarc Zyngier kvm_err("error initializing Hyp mode: %d\n", err); 23499ed24f4bSMarc Zyngier return err; 23509ed24f4bSMarc Zyngier } 23519ed24f4bSMarc Zyngier 23529ed24f4bSMarc Zyngier struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) 23539ed24f4bSMarc Zyngier { 23549ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu; 235546808a4cSMarc Zyngier unsigned long i; 23569ed24f4bSMarc Zyngier 23579ed24f4bSMarc Zyngier mpidr &= MPIDR_HWID_BITMASK; 23589ed24f4bSMarc Zyngier kvm_for_each_vcpu(i, vcpu, kvm) { 23599ed24f4bSMarc Zyngier if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu)) 23609ed24f4bSMarc Zyngier return vcpu; 23619ed24f4bSMarc Zyngier } 23629ed24f4bSMarc Zyngier return NULL; 23639ed24f4bSMarc Zyngier } 23649ed24f4bSMarc Zyngier 2365d663b8a2SPaolo Bonzini bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) 2366d663b8a2SPaolo Bonzini { 2367d663b8a2SPaolo Bonzini return irqchip_in_kernel(kvm); 2368d663b8a2SPaolo Bonzini } 2369d663b8a2SPaolo Bonzini 23709ed24f4bSMarc Zyngier bool kvm_arch_has_irq_bypass(void) 23719ed24f4bSMarc Zyngier { 23729ed24f4bSMarc Zyngier return true; 23739ed24f4bSMarc Zyngier } 23749ed24f4bSMarc Zyngier 23759ed24f4bSMarc Zyngier int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 23769ed24f4bSMarc Zyngier struct irq_bypass_producer *prod) 23779ed24f4bSMarc Zyngier { 23789ed24f4bSMarc Zyngier struct kvm_kernel_irqfd *irqfd = 23799ed24f4bSMarc Zyngier container_of(cons, struct kvm_kernel_irqfd, consumer); 23809ed24f4bSMarc Zyngier 23819ed24f4bSMarc Zyngier return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq, 23829ed24f4bSMarc Zyngier &irqfd->irq_entry); 23839ed24f4bSMarc Zyngier } 23849ed24f4bSMarc Zyngier void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 23859ed24f4bSMarc Zyngier struct irq_bypass_producer *prod) 23869ed24f4bSMarc Zyngier { 23879ed24f4bSMarc Zyngier struct kvm_kernel_irqfd *irqfd = 23889ed24f4bSMarc Zyngier container_of(cons, struct kvm_kernel_irqfd, consumer); 23899ed24f4bSMarc Zyngier 23909ed24f4bSMarc Zyngier kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq, 23919ed24f4bSMarc Zyngier &irqfd->irq_entry); 23929ed24f4bSMarc Zyngier } 23939ed24f4bSMarc Zyngier 23949ed24f4bSMarc Zyngier void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons) 23959ed24f4bSMarc Zyngier { 23969ed24f4bSMarc Zyngier struct kvm_kernel_irqfd *irqfd = 23979ed24f4bSMarc Zyngier container_of(cons, struct kvm_kernel_irqfd, consumer); 23989ed24f4bSMarc Zyngier 23999ed24f4bSMarc Zyngier kvm_arm_halt_guest(irqfd->kvm); 24009ed24f4bSMarc Zyngier } 24019ed24f4bSMarc Zyngier 24029ed24f4bSMarc Zyngier void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons) 24039ed24f4bSMarc Zyngier { 24049ed24f4bSMarc Zyngier struct kvm_kernel_irqfd *irqfd = 24059ed24f4bSMarc Zyngier container_of(cons, struct kvm_kernel_irqfd, consumer); 24069ed24f4bSMarc Zyngier 24079ed24f4bSMarc Zyngier kvm_arm_resume_guest(irqfd->kvm); 24089ed24f4bSMarc Zyngier } 24099ed24f4bSMarc Zyngier 241067d953d4SMarc Zyngier /* Initialize Hyp-mode and memory mappings on all CPUs */ 241153bf620aSSean Christopherson static __init int kvm_arm_init(void) 24129ed24f4bSMarc Zyngier { 24139ed24f4bSMarc Zyngier int err; 24149ed24f4bSMarc Zyngier bool in_hyp_mode; 24159ed24f4bSMarc Zyngier 24169ed24f4bSMarc Zyngier if (!is_hyp_mode_available()) { 24179ed24f4bSMarc Zyngier kvm_info("HYP mode not available\n"); 24189ed24f4bSMarc Zyngier return -ENODEV; 24199ed24f4bSMarc Zyngier } 24209ed24f4bSMarc Zyngier 2421b6a68b97SMarc Zyngier if (kvm_get_mode() == KVM_MODE_NONE) { 2422b6a68b97SMarc Zyngier kvm_info("KVM disabled from command line\n"); 2423b6a68b97SMarc Zyngier return -ENODEV; 2424b6a68b97SMarc Zyngier } 2425b6a68b97SMarc Zyngier 2426f1f0c0cfSAlexandru Elisei err = kvm_sys_reg_table_init(); 2427f1f0c0cfSAlexandru Elisei if (err) { 2428f1f0c0cfSAlexandru Elisei kvm_info("Error initializing system register tables"); 2429f1f0c0cfSAlexandru Elisei return err; 2430f1f0c0cfSAlexandru Elisei } 2431f1f0c0cfSAlexandru Elisei 24329ed24f4bSMarc Zyngier in_hyp_mode = is_kernel_in_hyp_mode(); 24339ed24f4bSMarc Zyngier 243496d389caSRob Herring if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) || 243596d389caSRob Herring cpus_have_final_cap(ARM64_WORKAROUND_1508412)) 2436abf532ccSRob Herring kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ 2437abf532ccSRob Herring "Only trusted guests should be used on this system.\n"); 2438abf532ccSRob Herring 2439bf249d9eSAnshuman Khandual err = kvm_set_ipa_limit(); 24409ed24f4bSMarc Zyngier if (err) 24419ed24f4bSMarc Zyngier return err; 24429ed24f4bSMarc Zyngier 24439ed24f4bSMarc Zyngier err = kvm_arm_init_sve(); 24449ed24f4bSMarc Zyngier if (err) 24459ed24f4bSMarc Zyngier return err; 24469ed24f4bSMarc Zyngier 24473248136bSJulien Grall err = kvm_arm_vmid_alloc_init(); 24483248136bSJulien Grall if (err) { 24493248136bSJulien Grall kvm_err("Failed to initialize VMID allocator.\n"); 24503248136bSJulien Grall return err; 24513248136bSJulien Grall } 24523248136bSJulien Grall 24539ed24f4bSMarc Zyngier if (!in_hyp_mode) { 24549ed24f4bSMarc Zyngier err = init_hyp_mode(); 24559ed24f4bSMarc Zyngier if (err) 24569ed24f4bSMarc Zyngier goto out_err; 24579ed24f4bSMarc Zyngier } 24589ed24f4bSMarc Zyngier 2459b881cdceSWill Deacon err = kvm_init_vector_slots(); 2460b881cdceSWill Deacon if (err) { 2461b881cdceSWill Deacon kvm_err("Cannot initialise vector slots\n"); 24626baaeda8SSean Christopherson goto out_hyp; 2463b881cdceSWill Deacon } 2464b881cdceSWill Deacon 24659ed24f4bSMarc Zyngier err = init_subsystems(); 24669ed24f4bSMarc Zyngier if (err) 246778b3bf48SSean Christopherson goto out_hyp; 24689ed24f4bSMarc Zyngier 2469f19f6644SDavid Brazdil if (is_protected_kvm_enabled()) { 24703eb681fbSDavid Brazdil kvm_info("Protected nVHE mode initialized successfully\n"); 2471f19f6644SDavid Brazdil } else if (in_hyp_mode) { 24729ed24f4bSMarc Zyngier kvm_info("VHE mode initialized successfully\n"); 2473f19f6644SDavid Brazdil } else { 24749ed24f4bSMarc Zyngier kvm_info("Hyp mode initialized successfully\n"); 2475f19f6644SDavid Brazdil } 24769ed24f4bSMarc Zyngier 24771dc0f02dSSean Christopherson /* 24781dc0f02dSSean Christopherson * FIXME: Do something reasonable if kvm_init() fails after pKVM 24791dc0f02dSSean Christopherson * hypervisor protection is finalized. 24801dc0f02dSSean Christopherson */ 248181a1cf9fSSean Christopherson err = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); 24821dc0f02dSSean Christopherson if (err) 24831dc0f02dSSean Christopherson goto out_subs; 24841dc0f02dSSean Christopherson 24859ed24f4bSMarc Zyngier return 0; 24869ed24f4bSMarc Zyngier 24876baaeda8SSean Christopherson out_subs: 248878b3bf48SSean Christopherson teardown_subsystems(); 24896baaeda8SSean Christopherson out_hyp: 24909ed24f4bSMarc Zyngier if (!in_hyp_mode) 24919ed24f4bSMarc Zyngier teardown_hyp_mode(); 24929ed24f4bSMarc Zyngier out_err: 24933248136bSJulien Grall kvm_arm_vmid_alloc_free(); 24949ed24f4bSMarc Zyngier return err; 24959ed24f4bSMarc Zyngier } 24969ed24f4bSMarc Zyngier 2497d8b369c4SDavid Brazdil static int __init early_kvm_mode_cfg(char *arg) 2498d8b369c4SDavid Brazdil { 2499d8b369c4SDavid Brazdil if (!arg) 2500d8b369c4SDavid Brazdil return -EINVAL; 2501d8b369c4SDavid Brazdil 2502b2a4d007SElliot Berman if (strcmp(arg, "none") == 0) { 2503b2a4d007SElliot Berman kvm_mode = KVM_MODE_NONE; 2504b2a4d007SElliot Berman return 0; 2505b2a4d007SElliot Berman } 2506b2a4d007SElliot Berman 2507b2a4d007SElliot Berman if (!is_hyp_mode_available()) { 2508b2a4d007SElliot Berman pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n"); 2509b2a4d007SElliot Berman return 0; 2510b2a4d007SElliot Berman } 2511b2a4d007SElliot Berman 2512d8b369c4SDavid Brazdil if (strcmp(arg, "protected") == 0) { 2513cde5042aSWill Deacon if (!is_kernel_in_hyp_mode()) 2514d8b369c4SDavid Brazdil kvm_mode = KVM_MODE_PROTECTED; 2515cde5042aSWill Deacon else 2516cde5042aSWill Deacon pr_warn_once("Protected KVM not available with VHE\n"); 2517cde5042aSWill Deacon 2518d8b369c4SDavid Brazdil return 0; 2519d8b369c4SDavid Brazdil } 2520d8b369c4SDavid Brazdil 2521b6a68b97SMarc Zyngier if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) { 2522b6a68b97SMarc Zyngier kvm_mode = KVM_MODE_DEFAULT; 25231945a067SMarc Zyngier return 0; 2524b6a68b97SMarc Zyngier } 2525b6a68b97SMarc Zyngier 2526675cabc8SJintack Lim if (strcmp(arg, "nested") == 0 && !WARN_ON(!is_kernel_in_hyp_mode())) { 2527675cabc8SJintack Lim kvm_mode = KVM_MODE_NV; 2528675cabc8SJintack Lim return 0; 2529675cabc8SJintack Lim } 2530675cabc8SJintack Lim 2531d8b369c4SDavid Brazdil return -EINVAL; 2532d8b369c4SDavid Brazdil } 2533d8b369c4SDavid Brazdil early_param("kvm-arm.mode", early_kvm_mode_cfg); 2534d8b369c4SDavid Brazdil 25353eb681fbSDavid Brazdil enum kvm_mode kvm_get_mode(void) 25363eb681fbSDavid Brazdil { 25373eb681fbSDavid Brazdil return kvm_mode; 25383eb681fbSDavid Brazdil } 25393eb681fbSDavid Brazdil 25401dc0f02dSSean Christopherson module_init(kvm_arm_init); 2541