Lines Matching +full:timer +full:- +full:cannot +full:- +full:wake +full:- +full:cpu
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9 #include <linux/entry-kvm.h>
86 int r = -EINVAL; in kvm_vm_ioctl_enable_cap()
88 if (cap->flags) in kvm_vm_ioctl_enable_cap()
89 return -EINVAL; in kvm_vm_ioctl_enable_cap()
91 if (kvm_vm_is_protected(kvm) && !kvm_pvm_ext_allowed(cap->cap)) in kvm_vm_ioctl_enable_cap()
92 return -EINVAL; in kvm_vm_ioctl_enable_cap()
94 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
98 &kvm->arch.flags); in kvm_vm_ioctl_enable_cap()
101 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
102 if (system_supports_mte() && !kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
104 set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags); in kvm_vm_ioctl_enable_cap()
106 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
110 set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags); in kvm_vm_ioctl_enable_cap()
113 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_enable_cap()
119 u64 new_cap = cap->args[0]; in kvm_vm_ioctl_enable_cap()
123 kvm->arch.mmu.split_page_chunk_size = new_cap; in kvm_vm_ioctl_enable_cap()
126 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_enable_cap()
129 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
130 if (!kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
132 set_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags); in kvm_vm_ioctl_enable_cap()
134 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
149 * kvm_arch_init_vm - initializes a VM data structure
157 mutex_init(&kvm->arch.config_lock); in kvm_arch_init_vm()
160 /* Clue in lockdep that the config_lock must be taken inside kvm->lock */ in kvm_arch_init_vm()
161 mutex_lock(&kvm->lock); in kvm_arch_init_vm()
162 mutex_lock(&kvm->arch.config_lock); in kvm_arch_init_vm()
163 mutex_unlock(&kvm->arch.config_lock); in kvm_arch_init_vm()
164 mutex_unlock(&kvm->lock); in kvm_arch_init_vm()
177 if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL_ACCOUNT)) { in kvm_arch_init_vm()
178 ret = -ENOMEM; in kvm_arch_init_vm()
181 cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask); in kvm_arch_init_vm()
183 ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type); in kvm_arch_init_vm()
192 kvm->max_vcpus = kvm_arm_default_max_vcpus(); in kvm_arch_init_vm()
196 bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES); in kvm_arch_init_vm()
201 free_cpumask_var(kvm->arch.supported_cpus); in kvm_arch_init_vm()
222 mutex_lock(&kvm->arch.config_lock); in kvm_destroy_mpidr_data()
224 data = rcu_dereference_protected(kvm->arch.mpidr_data, in kvm_destroy_mpidr_data()
225 lockdep_is_held(&kvm->arch.config_lock)); in kvm_destroy_mpidr_data()
227 rcu_assign_pointer(kvm->arch.mpidr_data, NULL); in kvm_destroy_mpidr_data()
232 mutex_unlock(&kvm->arch.config_lock); in kvm_destroy_mpidr_data()
236 * kvm_arch_destroy_vm - destroy the VM data structure
241 bitmap_free(kvm->arch.pmu_filter); in kvm_arch_destroy_vm()
242 free_cpumask_var(kvm->arch.supported_cpus); in kvm_arch_destroy_vm()
251 kfree(kvm->arch.sysreg_masks); in kvm_arch_destroy_vm()
267 * - both Address and Generic auth are implemented for a given in kvm_has_full_ptr_auth()
269 * - only a single algorithm is implemented. in kvm_has_full_ptr_auth()
345 r = kvm->max_vcpus; in kvm_vm_ioctl_check_extension()
351 r = -EINVAL; in kvm_vm_ioctl_check_extension()
353 r = kvm->arch.vgic.msis_require_devid; in kvm_vm_ioctl_check_extension()
395 r = kvm->arch.mmu.split_page_chunk_size; in kvm_vm_ioctl_check_extension()
415 return -EINVAL; in kvm_arch_dev_ioctl()
431 return -EBUSY; in kvm_arch_vcpu_precreate()
433 if (id >= kvm->max_vcpus) in kvm_arch_vcpu_precreate()
434 return -EINVAL; in kvm_arch_vcpu_precreate()
443 spin_lock_init(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_create()
446 /* Inform lockdep that the config_lock is acquired after vcpu->mutex */ in kvm_arch_vcpu_create()
447 mutex_lock(&vcpu->mutex); in kvm_arch_vcpu_create()
448 mutex_lock(&vcpu->kvm->arch.config_lock); in kvm_arch_vcpu_create()
449 mutex_unlock(&vcpu->kvm->arch.config_lock); in kvm_arch_vcpu_create()
450 mutex_unlock(&vcpu->mutex); in kvm_arch_vcpu_create()
456 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; in kvm_arch_vcpu_create()
458 /* Set up the timer */ in kvm_arch_vcpu_create()
463 kvm_arm_pvtime_vcpu_init(&vcpu->arch); in kvm_arch_vcpu_create()
465 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; in kvm_arch_vcpu_create()
469 * Throw out the pre-computed mappings if that is the case which forces in kvm_arch_vcpu_create()
472 kvm_destroy_mpidr_data(vcpu->kvm); in kvm_arch_vcpu_create()
492 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
494 free_hyp_memcache(&vcpu->arch.pkvm_memcache); in kvm_arch_vcpu_destroy()
523 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); in vcpu_set_pauth_traps()
524 vcpu->arch.hcr_el2 |= val; in vcpu_set_pauth_traps()
526 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); in vcpu_set_pauth_traps()
534 if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) { in vcpu_set_pauth_traps()
549 (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) || in kvm_vcpu_should_clear_twi()
550 vcpu->kvm->arch.vgic.nassgireq); in kvm_vcpu_should_clear_twi()
561 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
572 mmu = vcpu->arch.hw_mmu; in kvm_arch_vcpu_load()
573 last_ran = this_cpu_ptr(mmu->last_vcpu_ran); in kvm_arch_vcpu_load()
583 kvm_arm_vmid_update(&mmu->vmid); in kvm_arch_vcpu_load()
586 * We guarantee that both TLBs and I-cache are private to each in kvm_arch_vcpu_load()
588 * previously run on the same physical CPU, call into the in kvm_arch_vcpu_load()
592 * over-invalidation doesn't affect correctness. in kvm_arch_vcpu_load()
594 if (*last_ran != vcpu->vcpu_idx) { in kvm_arch_vcpu_load()
596 *last_ran = vcpu->vcpu_idx; in kvm_arch_vcpu_load()
600 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
603 * The timer must be loaded before the vgic to correctly set up physical in kvm_arch_vcpu_load()
604 * interrupt deactivation in nested state (e.g. timer interrupt). in kvm_arch_vcpu_load()
613 if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) in kvm_arch_vcpu_load()
617 vcpu->arch.hcr_el2 &= ~HCR_TWE; in kvm_arch_vcpu_load()
619 vcpu->arch.hcr_el2 |= HCR_TWE; in kvm_arch_vcpu_load()
622 vcpu->arch.hcr_el2 &= ~HCR_TWI; in kvm_arch_vcpu_load()
624 vcpu->arch.hcr_el2 |= HCR_TWI; in kvm_arch_vcpu_load()
630 vcpu->kvm->arch.pkvm.handle, in kvm_arch_vcpu_load()
631 vcpu->vcpu_idx, vcpu->arch.hcr_el2); in kvm_arch_vcpu_load()
633 &vcpu->arch.vgic_cpu.vgic_v3); in kvm_arch_vcpu_load()
636 if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus)) in kvm_arch_vcpu_load()
644 &vcpu->arch.vgic_cpu.vgic_v3); in kvm_arch_vcpu_put()
660 vcpu->cpu = -1; in kvm_arch_vcpu_put()
665 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); in __kvm_arm_vcpu_power_off()
672 spin_lock(&vcpu->arch.mp_state_lock); in kvm_arm_vcpu_power_off()
674 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_arm_vcpu_power_off()
679 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED; in kvm_arm_vcpu_stopped()
684 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED); in kvm_arm_vcpu_suspend()
691 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED; in kvm_arm_vcpu_suspended()
697 *mp_state = READ_ONCE(vcpu->arch.mp_state); in kvm_arch_vcpu_ioctl_get_mpstate()
707 spin_lock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_set_mpstate()
709 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
711 WRITE_ONCE(vcpu->arch.mp_state, *mp_state); in kvm_arch_vcpu_ioctl_set_mpstate()
720 ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
723 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_set_mpstate()
729 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
732 * If the guest CPU is not waiting for interrupts or an interrupt line is
733 * asserted, the CPU is by definition runnable.
739 && !kvm_arm_vcpu_stopped(v) && !v->arch.pause); in kvm_arch_vcpu_runnable()
761 mutex_lock(&kvm->arch.config_lock); in kvm_init_mpidr_data()
763 if (rcu_access_pointer(kvm->arch.mpidr_data) || in kvm_init_mpidr_data()
764 atomic_read(&kvm->online_vcpus) == 1) in kvm_init_mpidr_data()
792 data->mpidr_mask = mask; in kvm_init_mpidr_data()
798 data->cmpidr_to_idx[index] = c; in kvm_init_mpidr_data()
801 rcu_assign_pointer(kvm->arch.mpidr_data, data); in kvm_init_mpidr_data()
803 mutex_unlock(&kvm->arch.config_lock); in kvm_init_mpidr_data()
813 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_run_pid_change()
817 return -ENOEXEC; in kvm_arch_vcpu_run_pid_change()
820 return -EPERM; in kvm_arch_vcpu_run_pid_change()
877 mutex_lock(&kvm->arch.config_lock); in kvm_arch_vcpu_run_pid_change()
878 set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags); in kvm_arch_vcpu_run_pid_change()
879 mutex_unlock(&kvm->arch.config_lock); in kvm_arch_vcpu_run_pid_change()
895 vcpu->arch.pause = true; in kvm_arm_halt_guest()
905 vcpu->arch.pause = false; in kvm_arm_resume_guest()
915 (!kvm_arm_vcpu_stopped(vcpu)) && (!vcpu->arch.pause), in kvm_vcpu_sleep()
918 if (kvm_arm_vcpu_stopped(vcpu) || vcpu->arch.pause) { in kvm_vcpu_sleep()
932 * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior
935 * Suspend execution of a vCPU until a valid wake event is detected, i.e. until
937 * on when a wake event arrives, e.g. there may already be a pending wake event.
942 * Sync back the state of the GIC CPU interface so that we have in kvm_vcpu_wfi()
944 * kvm_arch_vcpu_runnable has up-to-date data to decide whether in kvm_vcpu_wfi()
984 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); in kvm_vcpu_suspend()
985 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP; in kvm_vcpu_suspend()
986 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in kvm_vcpu_suspend()
999 * check_vcpu_requests - check and handle pending vCPU requests
1011 return -EIO; in check_vcpu_requests()
1066 * kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest
1077 * for pending work and re-enter), return true without writing to ret.
1081 struct kvm_run *run = vcpu->run; in kvm_vcpu_exit_request()
1085 * to tell a userspace irqchip about timer or PMU level in kvm_vcpu_exit_request()
1090 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { in kvm_vcpu_exit_request()
1093 *ret = -EINTR; in kvm_vcpu_exit_request()
1094 run->exit_reason = KVM_EXIT_INTR; in kvm_vcpu_exit_request()
1100 run->exit_reason = KVM_EXIT_FAIL_ENTRY; in kvm_vcpu_exit_request()
1101 run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED; in kvm_vcpu_exit_request()
1102 run->fail_entry.cpu = smp_processor_id(); in kvm_vcpu_exit_request()
1130 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
1141 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1144 if (run->exit_reason == KVM_EXIT_MMIO) { in kvm_arch_vcpu_ioctl_run()
1152 if (!vcpu->wants_to_run) { in kvm_arch_vcpu_ioctl_run()
1153 ret = -EINTR; in kvm_arch_vcpu_ioctl_run()
1160 run->exit_reason = KVM_EXIT_UNKNOWN; in kvm_arch_vcpu_ioctl_run()
1161 run->flags = 0; in kvm_arch_vcpu_ioctl_run()
1176 * non-preemptible context. in kvm_arch_vcpu_ioctl_run()
1193 * Documentation/virt/kvm/vcpu-requests.rst in kvm_arch_vcpu_ioctl_run()
1195 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_arch_vcpu_ioctl_run()
1198 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
1202 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) in kvm_arch_vcpu_ioctl_run()
1220 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
1221 vcpu->stat.exits++; in kvm_arch_vcpu_ioctl_run()
1235 * Sync the vgic state before syncing the timer state because in kvm_arch_vcpu_ioctl_run()
1236 * the timer code needs to know if the virtual timer in kvm_arch_vcpu_ioctl_run()
1242 * Sync the timer hardware state before enabling interrupts as in kvm_arch_vcpu_ioctl_run()
1244 * timer virtual interrupt state. in kvm_arch_vcpu_ioctl_run()
1246 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) in kvm_arch_vcpu_ioctl_run()
1256 * we exit guest timing so that timer ticks are accounted as in kvm_arch_vcpu_ioctl_run()
1284 * if implemented by the CPU. If we spot the guest in such in kvm_arch_vcpu_ioctl_run()
1291 * As we have caught the guest red-handed, decide that in kvm_arch_vcpu_ioctl_run()
1303 /* Tell userspace about in-kernel device output levels */ in kvm_arch_vcpu_ioctl_run()
1304 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { in kvm_arch_vcpu_ioctl_run()
1317 * being preempt-safe on VHE. in kvm_arch_vcpu_ioctl_run()
1345 * If we didn't change anything, no need to wake up or kick other CPUs in vcpu_interrupt_line()
1351 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and in vcpu_interrupt_line()
1352 * trigger a world-switch round on the running physical CPU to set the in vcpu_interrupt_line()
1364 u32 irq = irq_level->irq; in kvm_vm_ioctl_irq_line()
1367 bool level = irq_level->level; in kvm_vm_ioctl_irq_line()
1374 trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level); in kvm_vm_ioctl_irq_line()
1379 return -ENXIO; in kvm_vm_ioctl_irq_line()
1383 return -EINVAL; in kvm_vm_ioctl_irq_line()
1386 return -EINVAL; in kvm_vm_ioctl_irq_line()
1391 return -ENXIO; in kvm_vm_ioctl_irq_line()
1395 return -EINVAL; in kvm_vm_ioctl_irq_line()
1398 return -EINVAL; in kvm_vm_ioctl_irq_line()
1403 return -ENXIO; in kvm_vm_ioctl_irq_line()
1406 return -EINVAL; in kvm_vm_ioctl_irq_line()
1411 return -EINVAL; in kvm_vm_ioctl_irq_line()
1441 unsigned long features = init->features[0]; in kvm_vcpu_init_check_features()
1445 return -ENOENT; in kvm_vcpu_init_check_features()
1447 for (i = 1; i < ARRAY_SIZE(init->features); i++) { in kvm_vcpu_init_check_features()
1448 if (init->features[i]) in kvm_vcpu_init_check_features()
1449 return -ENOENT; in kvm_vcpu_init_check_features()
1453 return -EINVAL; in kvm_vcpu_init_check_features()
1461 return -EINVAL; in kvm_vcpu_init_check_features()
1467 if (kvm_has_mte(vcpu->kvm)) in kvm_vcpu_init_check_features()
1468 return -EINVAL; in kvm_vcpu_init_check_features()
1472 return -EINVAL; in kvm_vcpu_init_check_features()
1480 unsigned long features = init->features[0]; in kvm_vcpu_init_changed()
1482 return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features, in kvm_vcpu_init_changed()
1488 struct kvm *kvm = vcpu->kvm; in kvm_setup_vcpu()
1495 if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu) in kvm_setup_vcpu()
1508 unsigned long features = init->features[0]; in __kvm_vcpu_set_target()
1509 struct kvm *kvm = vcpu->kvm; in __kvm_vcpu_set_target()
1510 int ret = -EINVAL; in __kvm_vcpu_set_target()
1512 mutex_lock(&kvm->arch.config_lock); in __kvm_vcpu_set_target()
1514 if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) && in __kvm_vcpu_set_target()
1518 bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES); in __kvm_vcpu_set_target()
1527 set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags); in __kvm_vcpu_set_target()
1531 mutex_unlock(&kvm->arch.config_lock); in __kvm_vcpu_set_target()
1540 if (init->target != KVM_ARM_TARGET_GENERIC_V8 && in kvm_vcpu_set_target()
1541 init->target != kvm_target_cpu()) in kvm_vcpu_set_target()
1542 return -EINVAL; in kvm_vcpu_set_target()
1552 return -EINVAL; in kvm_vcpu_set_target()
1565 * Treat the power-off vCPU feature as ephemeral. Clear the bit to avoid in kvm_arch_vcpu_ioctl_vcpu_init()
1569 if (init->features[0] & BIT(KVM_ARM_VCPU_POWER_OFF)) { in kvm_arch_vcpu_ioctl_vcpu_init()
1570 init->features[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF); in kvm_arch_vcpu_ioctl_vcpu_init()
1584 * need to invalidate the I-cache though, as FWB does *not* in kvm_arch_vcpu_ioctl_vcpu_init()
1589 stage2_unmap_vm(vcpu->kvm); in kvm_arch_vcpu_ioctl_vcpu_init()
1597 * Handle the "start in power-off" case. in kvm_arch_vcpu_ioctl_vcpu_init()
1599 spin_lock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_vcpu_init()
1604 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE); in kvm_arch_vcpu_ioctl_vcpu_init()
1606 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_vcpu_init()
1614 int ret = -ENXIO; in kvm_arm_vcpu_set_attr()
1616 switch (attr->group) { in kvm_arm_vcpu_set_attr()
1628 int ret = -ENXIO; in kvm_arm_vcpu_get_attr()
1630 switch (attr->group) { in kvm_arm_vcpu_get_attr()
1642 int ret = -ENXIO; in kvm_arm_vcpu_has_attr()
1644 switch (attr->group) { in kvm_arm_vcpu_has_attr()
1667 for (i = 0; i < ARRAY_SIZE(events->reserved); i++) in kvm_arm_vcpu_set_events()
1668 if (events->reserved[i]) in kvm_arm_vcpu_set_events()
1669 return -EINVAL; in kvm_arm_vcpu_set_events()
1672 for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++) in kvm_arm_vcpu_set_events()
1673 if (events->exception.pad[i]) in kvm_arm_vcpu_set_events()
1674 return -EINVAL; in kvm_arm_vcpu_set_events()
1682 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
1691 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1702 r = -ENOEXEC; in kvm_arch_vcpu_ioctl()
1706 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1729 r = -ENOEXEC; in kvm_arch_vcpu_ioctl()
1733 r = -EPERM; in kvm_arch_vcpu_ioctl()
1737 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1744 r = -E2BIG; in kvm_arch_vcpu_ioctl()
1747 r = kvm_arm_copy_reg_indices(vcpu, user_list->reg); in kvm_arch_vcpu_ioctl()
1751 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1758 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1765 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1775 return -EINVAL; in kvm_arch_vcpu_ioctl()
1778 return -EFAULT; in kvm_arch_vcpu_ioctl()
1786 return -EFAULT; in kvm_arch_vcpu_ioctl()
1794 return -ENOEXEC; in kvm_arch_vcpu_ioctl()
1797 return -EFAULT; in kvm_arch_vcpu_ioctl()
1802 r = -EINVAL; in kvm_arch_vcpu_ioctl()
1816 switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) { in kvm_vm_ioctl_set_device_addr()
1819 return -ENXIO; in kvm_vm_ioctl_set_device_addr()
1822 return -ENODEV; in kvm_vm_ioctl_set_device_addr()
1828 switch (attr->group) { in kvm_vm_has_attr()
1832 return -ENXIO; in kvm_vm_has_attr()
1838 switch (attr->group) { in kvm_vm_set_attr()
1842 return -ENXIO; in kvm_vm_set_attr()
1848 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
1856 return -ENXIO; in kvm_arch_vm_ioctl()
1857 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
1859 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
1866 return -EFAULT; in kvm_arch_vm_ioctl()
1875 return -EFAULT; in kvm_arch_vm_ioctl()
1883 return -EFAULT; in kvm_arch_vm_ioctl()
1890 return -EFAULT; in kvm_arch_vm_ioctl()
1895 return -EFAULT; in kvm_arch_vm_ioctl()
1901 return -EFAULT; in kvm_arch_vm_ioctl()
1909 return -EFAULT; in kvm_arch_vm_ioctl()
1913 return -EINVAL; in kvm_arch_vm_ioctl()
1922 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { in unlock_vcpus()
1924 mutex_unlock(&tmp_vcpu->mutex); in unlock_vcpus()
1930 lockdep_assert_held(&kvm->lock); in unlock_all_vcpus()
1932 unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1); in unlock_all_vcpus()
1941 lockdep_assert_held(&kvm->lock); in lock_all_vcpus()
1945 * core KVM code tries to grab the vcpu->mutex. in lock_all_vcpus()
1947 * By grabbing the vcpu->mutex of all VCPUs we ensure that no in lock_all_vcpus()
1951 if (!mutex_trylock(&tmp_vcpu->mutex)) { in lock_all_vcpus()
1952 unlock_vcpus(kvm, c - 1); in lock_all_vcpus()
1962 return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - in nvhe_percpu_size()
2010 static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits) in cpu_prepare_hyp_mode() argument
2012 struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); in cpu_prepare_hyp_mode()
2016 * Calculate the raw per-cpu offset without a translation from the in cpu_prepare_hyp_mode()
2018 * so that we can use adr_l to access per-cpu variables in EL2. in cpu_prepare_hyp_mode()
2021 params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) - in cpu_prepare_hyp_mode()
2024 params->mair_el2 = read_sysreg(mair_el1); in cpu_prepare_hyp_mode()
2039 params->tcr_el2 = tcr; in cpu_prepare_hyp_mode()
2041 params->pgd_pa = kvm_mmu_get_httbr(); in cpu_prepare_hyp_mode()
2043 params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS; in cpu_prepare_hyp_mode()
2045 params->hcr_el2 = HCR_HOST_NVHE_FLAGS; in cpu_prepare_hyp_mode()
2047 params->hcr_el2 |= HCR_E2H; in cpu_prepare_hyp_mode()
2048 params->vttbr = params->vtcr = 0; in cpu_prepare_hyp_mode()
2082 * Disabling SSBD on a non-VHE system requires us to enable SSBS in cpu_init_hyp_mode()
2099 * depending on the kernel configuration and CPU present:
2101 * - If the CPU is affected by Spectre-v2, the hardening sequence is
2105 * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot
2109 * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an
2114 * VHE, as we don't have hypervisor-specific mappings. If the system
2120 void *vector = hyp_spectre_vector_selector[data->slot]; in cpu_set_hyp_vector()
2125 kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot); in cpu_set_hyp_vector()
2207 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should in hyp_init_cpu_pm_notifier()
2208 * re-enable hyp. in hyp_init_cpu_pm_notifier()
2215 * so that the hyp will be re-enabled in hyp_init_cpu_pm_notifier()
2259 unsigned int cpu; in init_cpu_logical_map() local
2262 * Copy the MPIDR <-> logical CPU ID mapping to hyp. in init_cpu_logical_map()
2267 for_each_online_cpu(cpu) in init_cpu_logical_map()
2268 hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu); in init_cpu_logical_map()
2277 * If PSCI has not been initialized, protected KVM cannot install in init_psci_relay()
2281 kvm_err("Cannot initialize protected mode without PSCI\n"); in init_psci_relay()
2308 * Register CPU lower-power notifier in init_subsystems()
2320 case -ENODEV: in init_subsystems()
2321 case -ENXIO: in init_subsystems()
2333 * guest on non-cooperative hardware. in init_subsystems()
2345 err = -EINVAL; in init_subsystems()
2350 * Init HYP architected timer support in init_subsystems()
2377 int cpu; in teardown_hyp_mode() local
2380 for_each_possible_cpu(cpu) { in teardown_hyp_mode()
2381 free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT); in teardown_hyp_mode()
2382 free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order()); in teardown_hyp_mode()
2387 sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; in teardown_hyp_mode()
2407 * prevent a later re-init attempt in kvm_arch_enable_virtualization_cpu(). in do_pkvm_init()
2420 * Although this is per-CPU, we make it global for simplicity, e.g., not in get_hyp_id_aa64pfr0_el1()
2423 * Unlike for non-protected VMs, userspace cannot override this for in get_hyp_id_aa64pfr0_el1()
2458 kvm_ksym_ref(__hyp_bss_end) - kvm_ksym_ref(__hyp_bss_start)); in kvm_hyp_init_symbols()
2481 int cpu; in init_pkvm_host_sve_state() local
2487 for_each_possible_cpu(cpu) { in init_pkvm_host_sve_state()
2491 return -ENOMEM; in init_pkvm_host_sve_state()
2493 per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page); in init_pkvm_host_sve_state()
2506 * and the initialziation process cannot fail.
2510 int cpu; in finalize_init_hyp_mode() local
2513 for_each_possible_cpu(cpu) { in finalize_init_hyp_mode()
2516 sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; in finalize_init_hyp_mode()
2517 per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = in finalize_init_hyp_mode()
2526 int cpu; in pkvm_hyp_init_ptrauth() local
2528 for_each_possible_cpu(cpu) { in pkvm_hyp_init_ptrauth()
2529 hyp_ctxt = per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt, cpu); in pkvm_hyp_init_ptrauth()
2530 hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2531 hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2532 hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2533 hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2534 hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2535 hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2536 hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2537 hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2538 hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2539 hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2543 /* Inits Hyp-mode on all online CPUs */
2547 int cpu; in init_hyp_mode() local
2548 int err = -ENOMEM; in init_hyp_mode()
2551 * The protected Hyp-mode cannot be initialized if the memory pool in init_hyp_mode()
2565 * Allocate stack pages for Hypervisor-mode in init_hyp_mode()
2567 for_each_possible_cpu(cpu) { in init_hyp_mode()
2570 stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT); in init_hyp_mode()
2572 err = -ENOMEM; in init_hyp_mode()
2576 per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base; in init_hyp_mode()
2580 * Allocate and initialize pages for Hypervisor-mode percpu regions. in init_hyp_mode()
2582 for_each_possible_cpu(cpu) { in init_hyp_mode()
2588 err = -ENOMEM; in init_hyp_mode()
2594 kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr; in init_hyp_mode()
2598 * Map the Hyp-code called directly from the host in init_hyp_mode()
2603 kvm_err("Cannot map world-switch code\n"); in init_hyp_mode()
2610 kvm_err("Cannot map .hyp.rodata section\n"); in init_hyp_mode()
2617 kvm_err("Cannot map rodata section\n"); in init_hyp_mode()
2629 kvm_err("Cannot map hyp bss section: %d\n", err); in init_hyp_mode()
2636 kvm_err("Cannot map bss section\n"); in init_hyp_mode()
2643 for_each_possible_cpu(cpu) { in init_hyp_mode()
2644 struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); in init_hyp_mode()
2645 char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu); in init_hyp_mode()
2647 err = create_hyp_stack(__pa(stack_base), ¶ms->stack_hyp_va); in init_hyp_mode()
2649 kvm_err("Cannot map hyp stack\n"); in init_hyp_mode()
2659 params->stack_pa = __pa(stack_base); in init_hyp_mode()
2662 for_each_possible_cpu(cpu) { in init_hyp_mode()
2663 char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; in init_hyp_mode()
2669 kvm_err("Cannot map hyp percpu region\n"); in init_hyp_mode()
2673 /* Prepare the CPU initialization parameters */ in init_hyp_mode()
2674 cpu_prepare_hyp_mode(cpu, hyp_va_bits); in init_hyp_mode()
2687 err = -ENODEV; in init_hyp_mode()
2719 data = rcu_dereference(kvm->arch.mpidr_data); in kvm_mpidr_to_vcpu()
2724 vcpu = kvm_get_vcpu(kvm, data->cmpidr_to_idx[idx]); in kvm_mpidr_to_vcpu()
2756 struct kvm_kernel_irq_routing_entry *irq_entry = &irqfd->irq_entry; in kvm_arch_irq_bypass_add_producer()
2759 * The only thing we have a chance of directly-injecting is LPIs. Maybe in kvm_arch_irq_bypass_add_producer()
2762 if (irq_entry->type != KVM_IRQ_ROUTING_MSI) in kvm_arch_irq_bypass_add_producer()
2765 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq, in kvm_arch_irq_bypass_add_producer()
2766 &irqfd->irq_entry); in kvm_arch_irq_bypass_add_producer()
2773 struct kvm_kernel_irq_routing_entry *irq_entry = &irqfd->irq_entry; in kvm_arch_irq_bypass_del_producer()
2775 if (irq_entry->type != KVM_IRQ_ROUTING_MSI) in kvm_arch_irq_bypass_del_producer()
2778 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq, in kvm_arch_irq_bypass_del_producer()
2779 &irqfd->irq_entry); in kvm_arch_irq_bypass_del_producer()
2787 kvm_arm_halt_guest(irqfd->kvm); in kvm_arch_irq_bypass_stop()
2795 kvm_arm_resume_guest(irqfd->kvm); in kvm_arch_irq_bypass_start()
2798 /* Initialize Hyp-mode and memory mappings on all CPUs */
2806 return -ENODEV; in kvm_arm_init()
2811 return -ENODEV; in kvm_arm_init()
2824 kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ in kvm_arm_init()
2849 kvm_err("Cannot initialise vector slots\n"); in kvm_arm_init()
2896 return -EINVAL; in early_kvm_mode_cfg()
2904 pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n"); in early_kvm_mode_cfg()
2927 return -EINVAL; in early_kvm_mode_cfg()
2929 early_param("kvm-arm.mode", early_kvm_mode_cfg);
2934 return -EINVAL; in early_kvm_wfx_trap_policy_cfg()
2946 return -EINVAL; in early_kvm_wfx_trap_policy_cfg()
2953 early_param("kvm-arm.wfi_trap_policy", early_kvm_wfi_trap_policy_cfg);
2959 early_param("kvm-arm.wfe_trap_policy", early_kvm_wfe_trap_policy_cfg);