Lines Matching +full:timer +full:- +full:cannot +full:- +full:wake +full:- +full:cpu

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
19 #include <linux/amd-iommu.h>
65 static_assert(__AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_IDX_MASK) == -1u);
67 #define AVIC_AUTO_MODE -1
72 *(int *)kp->arg = AVIC_AUTO_MODE; in avic_param_set()
133 * Note! Always intercept LVTT, as TSC-deadline timer mode in avic_set_x2apic_msr_interception()
134 * isn't virtualized by hardware, and the CPU will generate a in avic_set_x2apic_msr_interception()
148 if (intercept == svm->x2avic_msrs_intercepted) in avic_set_x2apic_msr_interception()
155 svm_set_intercept_for_msr(&svm->vcpu, x2avic_passthrough_msrs[i], in avic_set_x2apic_msr_interception()
158 svm->x2avic_msrs_intercepted = intercept; in avic_set_x2apic_msr_interception()
163 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_activate_vmcb()
165 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK); in avic_activate_vmcb()
166 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK; in avic_activate_vmcb()
168 vmcb->control.int_ctl |= AVIC_ENABLE_MASK; in avic_activate_vmcb()
171 * Note: KVM supports hybrid-AVIC mode, where KVM emulates x2APIC MSR in avic_activate_vmcb()
177 if (x2avic_enabled && apic_x2apic_mode(svm->vcpu.arch.apic)) { in avic_activate_vmcb()
178 vmcb->control.int_ctl |= X2APIC_MODE_MASK; in avic_activate_vmcb()
179 vmcb->control.avic_physical_id |= X2AVIC_MAX_PHYSICAL_ID; in avic_activate_vmcb()
184 * Flush the TLB, the guest may have inserted a non-APIC in avic_activate_vmcb()
187 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, &svm->vcpu); in avic_activate_vmcb()
189 /* For xAVIC and hybrid-xAVIC modes */ in avic_activate_vmcb()
190 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID; in avic_activate_vmcb()
198 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_deactivate_vmcb()
200 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK); in avic_deactivate_vmcb()
201 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK; in avic_deactivate_vmcb()
207 if (is_guest_mode(&svm->vcpu) && in avic_deactivate_vmcb()
208 vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)) in avic_deactivate_vmcb()
232 if (kvm_svm->avic_vm_id != vm_id) in avic_ga_log_notifier()
234 vcpu = kvm_get_vcpu(&kvm_svm->kvm, vcpu_idx); in avic_ga_log_notifier()
258 free_page((unsigned long)kvm_svm->avic_logical_id_table); in avic_vm_destroy()
259 free_page((unsigned long)kvm_svm->avic_physical_id_table); in avic_vm_destroy()
262 hash_del(&kvm_svm->hnode); in avic_vm_destroy()
269 int err = -ENOMEM; in avic_vm_init()
277 kvm_svm->avic_physical_id_table = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); in avic_vm_init()
278 if (!kvm_svm->avic_physical_id_table) in avic_vm_init()
281 kvm_svm->avic_logical_id_table = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); in avic_vm_init()
282 if (!kvm_svm->avic_logical_id_table) in avic_vm_init()
288 if (vm_id == 0) { /* id is 1-based, zero is not okay */ in avic_vm_init()
295 if (k2->avic_vm_id == vm_id) in avic_vm_init()
299 kvm_svm->avic_vm_id = vm_id; in avic_vm_init()
300 hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id); in avic_vm_init()
312 return __sme_set(__pa(svm->vcpu.arch.apic->regs)); in avic_get_backing_page_address()
317 struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm); in avic_init_vmcb()
319 vmcb->control.avic_backing_page = avic_get_backing_page_address(svm); in avic_init_vmcb()
320 vmcb->control.avic_logical_id = __sme_set(__pa(kvm_svm->avic_logical_id_table)); in avic_init_vmcb()
321 vmcb->control.avic_physical_id = __sme_set(__pa(kvm_svm->avic_physical_id_table)); in avic_init_vmcb()
322 vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE; in avic_init_vmcb()
324 if (kvm_apicv_activated(svm->vcpu.kvm)) in avic_init_vmcb()
332 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); in avic_init_backing_page()
334 u32 id = vcpu->vcpu_id; in avic_init_backing_page()
346 kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_TOO_BIG); in avic_init_backing_page()
347 vcpu->arch.apic->apicv_active = false; in avic_init_backing_page()
354 if (WARN_ON_ONCE(!vcpu->arch.apic->regs)) in avic_init_backing_page()
355 return -EINVAL; in avic_init_backing_page()
357 if (kvm_apicv_activated(vcpu->kvm)) { in avic_init_backing_page()
366 ret = kvm_alloc_apic_access_page(vcpu->kvm); in avic_init_backing_page()
378 svm->avic_physical_id_entry = new_entry; in avic_init_backing_page()
383 * invalid entries, i.e. aren't guaranteed to generate a VM-Exit). in avic_init_backing_page()
385 WRITE_ONCE(kvm_svm->avic_physical_id_table[id], new_entry); in avic_init_backing_page()
399 int cpu = READ_ONCE(vcpu->cpu); in avic_ring_doorbell() local
401 if (cpu != get_cpu()) { in avic_ring_doorbell()
402 wrmsrq(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu)); in avic_ring_doorbell()
403 trace_kvm_avic_doorbell(vcpu->vcpu_id, kvm_cpu_get_apicid(cpu)); in avic_ring_doorbell()
411 vcpu->arch.apic->irr_pending = true; in avic_kick_vcpu()
450 * For x2APIC, the logical APIC ID is a read-only value that is in avic_kick_vcpu_by_logical_id()
463 * A fast-path version of avic_kick_target_vcpus(), which attempts to match
475 return -EINVAL; in avic_kick_target_vcpus_fast()
485 return -EINVAL; in avic_kick_target_vcpus_fast()
487 return -EINVAL; in avic_kick_target_vcpus_fast()
490 return -EINVAL; in avic_kick_target_vcpus_fast()
519 avic_logical_id_table = kvm_svm->avic_logical_id_table; in avic_kick_target_vcpus_fast()
547 * Wake any target vCPUs that are blocking, i.e. waiting for a wake in avic_kick_target_vcpus()
562 u32 icrh = svm->vmcb->control.exit_info_1 >> 32; in avic_incomplete_ipi_interception()
563 u32 icrl = svm->vmcb->control.exit_info_1; in avic_incomplete_ipi_interception()
564 u32 id = svm->vmcb->control.exit_info_2 >> 32; in avic_incomplete_ipi_interception()
565 u32 index = svm->vmcb->control.exit_info_2 & 0x1FF; in avic_incomplete_ipi_interception()
566 struct kvm_lapic *apic = vcpu->arch.apic; in avic_incomplete_ipi_interception()
568 trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index); in avic_incomplete_ipi_interception()
575 * only virtualizes Fixed, Edge-Triggered INTRs, and falls over in avic_incomplete_ipi_interception()
596 avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh, index); in avic_incomplete_ipi_interception()
620 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); in avic_get_logical_id_entry()
641 return &kvm_svm->avic_logical_id_table[index]; in avic_get_logical_id_entry()
649 flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT; in avic_ldr_write()
664 bool flat = svm->dfr_reg == APIC_DFR_FLAT; in avic_invalidate_logical_id_entry()
668 if (apic_x2apic_mode(vcpu->arch.apic)) in avic_invalidate_logical_id_entry()
671 entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat); in avic_invalidate_logical_id_entry()
679 u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR); in avic_handle_ldr_update()
680 u32 id = kvm_xapic_id(vcpu->arch.apic); in avic_handle_ldr_update()
683 if (apic_x2apic_mode(vcpu->arch.apic)) in avic_handle_ldr_update()
686 if (ldr == svm->ldr_reg) in avic_handle_ldr_update()
691 svm->ldr_reg = ldr; in avic_handle_ldr_update()
698 u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR); in avic_handle_dfr_update()
700 if (svm->dfr_reg == dfr) in avic_handle_dfr_update()
704 svm->dfr_reg = dfr; in avic_handle_dfr_update()
709 u32 offset = to_svm(vcpu)->vmcb->control.exit_info_1 & in avic_unaccel_trap_write()
720 /* Ignore writes to Read Remote Data, it's read-only. */ in avic_unaccel_trap_write()
763 u32 offset = svm->vmcb->control.exit_info_1 & in avic_unaccelerated_access_interception()
765 u32 vector = svm->vmcb->control.exit_info_2 & in avic_unaccelerated_access_interception()
767 bool write = (svm->vmcb->control.exit_info_1 >> 32) & in avic_unaccelerated_access_interception()
771 trace_kvm_avic_unaccelerated_access(vcpu->vcpu_id, offset, in avic_unaccelerated_access_interception()
788 struct kvm_vcpu *vcpu = &svm->vcpu; in avic_init_vcpu()
790 INIT_LIST_HEAD(&svm->ir_list); in avic_init_vcpu()
791 spin_lock_init(&svm->ir_list_lock); in avic_init_vcpu()
793 if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm)) in avic_init_vcpu()
800 svm->dfr_reg = APIC_DFR_FLAT; in avic_init_vcpu()
813 struct kvm_vcpu *vcpu = irqfd->irq_bypass_vcpu; in svm_ir_list_del()
819 spin_lock_irqsave(&to_svm(vcpu)->ir_list_lock, flags); in svm_ir_list_del()
820 list_del(&irqfd->vcpu_list); in svm_ir_list_del()
821 spin_unlock_irqrestore(&to_svm(vcpu)->ir_list_lock, flags); in svm_ir_list_del()
842 .ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id, in avic_pi_update_irte()
843 vcpu->vcpu_idx), in avic_pi_update_irte()
858 guard(spinlock_irqsave)(&svm->ir_list_lock); in avic_pi_update_irte()
866 entry = svm->avic_physical_id_entry; in avic_pi_update_irte()
868 pi_data.cpu = entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK; in avic_pi_update_irte()
870 pi_data.cpu = -1; in avic_pi_update_irte()
880 * for the IRTE, which KVM needs to keep the IRTE up-to-date, in avic_pi_update_irte()
885 return -EIO; in avic_pi_update_irte()
888 irqfd->irq_bypass_data = pi_data.ir_data; in avic_pi_update_irte()
889 list_add(&irqfd->vcpu_list, &svm->ir_list); in avic_pi_update_irte()
917 * int all associated IRTEs so that KVM can wake the vCPU if an IRQ is
923 static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, in avic_update_iommu_vcpu_affinity() argument
930 lockdep_assert_held(&svm->ir_list_lock); in avic_update_iommu_vcpu_affinity()
933 * Here, we go through the per-vcpu ir_list to update all existing in avic_update_iommu_vcpu_affinity()
936 if (list_empty(&svm->ir_list)) in avic_update_iommu_vcpu_affinity()
939 list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) { in avic_update_iommu_vcpu_affinity()
940 void *data = irqfd->irq_bypass_data; in avic_update_iommu_vcpu_affinity()
943 WARN_ON_ONCE(amd_iommu_update_ga(data, cpu, ga_log_intr)); in avic_update_iommu_vcpu_affinity()
944 else if (cpu >= 0) in avic_update_iommu_vcpu_affinity()
945 WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, cpu, ga_log_intr)); in avic_update_iommu_vcpu_affinity()
951 static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu, in __avic_vcpu_load() argument
954 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); in __avic_vcpu_load()
955 int h_physical_id = kvm_cpu_get_apicid(cpu); in __avic_vcpu_load()
965 if (WARN_ON_ONCE(vcpu->vcpu_id * sizeof(entry) >= PAGE_SIZE)) in __avic_vcpu_load()
969 * Grab the per-vCPU interrupt remapping lock even if the VM doesn't in __avic_vcpu_load()
972 * up-to-date entry information, or that this task will wait until in __avic_vcpu_load()
975 spin_lock_irqsave(&svm->ir_list_lock, flags); in __avic_vcpu_load()
977 entry = svm->avic_physical_id_entry; in __avic_vcpu_load()
985 svm->avic_physical_id_entry = entry; in __avic_vcpu_load()
989 * actual Physical ID table, so that the CPU never sees IsRunning=1. in __avic_vcpu_load()
990 * Keep the APIC ID up-to-date in the entry to minimize the chances of in __avic_vcpu_load()
996 WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry); in __avic_vcpu_load()
1000 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in __avic_vcpu_load()
1003 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in avic_vcpu_load() argument
1007 * is being scheduled in after being preempted. The CPU entries in the in avic_vcpu_load()
1009 * If the vCPU was migrated, its new CPU value will be stuffed when the in avic_vcpu_load()
1015 __avic_vcpu_load(vcpu, cpu, AVIC_START_RUNNING); in avic_vcpu_load()
1020 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); in __avic_vcpu_put()
1023 u64 entry = svm->avic_physical_id_entry; in __avic_vcpu_put()
1027 if (WARN_ON_ONCE(vcpu->vcpu_id * sizeof(entry) >= PAGE_SIZE)) in __avic_vcpu_put()
1031 * Take and hold the per-vCPU interrupt remapping lock while updating in __avic_vcpu_put()
1034 * either svm_ir_list_add() will consume up-to-date entry information, in __avic_vcpu_put()
1038 spin_lock_irqsave(&svm->ir_list_lock, flags); in __avic_vcpu_put()
1040 avic_update_iommu_vcpu_affinity(vcpu, -1, action); in __avic_vcpu_put()
1046 * hardware is at least restricted to a CPU associated with the vCPU. in __avic_vcpu_put()
1051 WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry); in __avic_vcpu_put()
1055 * it's a synthetic flag that usurps an unused should-be-zero bit. in __avic_vcpu_put()
1060 svm->avic_physical_id_entry = entry; in __avic_vcpu_put()
1062 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in __avic_vcpu_put()
1074 u64 entry = to_svm(vcpu)->avic_physical_id_entry; in avic_vcpu_put()
1101 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_refresh_virtual_apic_mode()
1112 * accordingly before re-activating. in avic_refresh_virtual_apic_mode()
1133 __avic_vcpu_load(vcpu, vcpu->cpu, AVIC_ACTIVATE); in avic_refresh_apicv_exec_ctrl()
1157 * used to signal running vCPUs cannot be blocked, i.e. will perturb the in avic_vcpu_blocking()
1158 * CPU and cause noisy neighbor problems if the VM is sending interrupts in avic_vcpu_blocking()
1169 avic_vcpu_load(vcpu, vcpu->cpu); in avic_vcpu_unblocking()
1192 pr_warn(FW_BUG "Cannot enable x2AVIC, AVIC is unsupported\n"); in avic_want_avic_enabled()
1198 pr_warn("AVIC disabled: missing HvInUseWrAllowed on SNP-enabled system\n"); in avic_want_avic_enabled()
1215 * - The module param avic enable both xAPIC and x2APIC mode.
1216 * - Hypervisor can support both xAVIC and x2AVIC in the same guest.
1217 * - The mode can be switched at run-time.
1236 * due to erratum 1235, which results in missed VM-Exits on the sender in avic_hardware_setup()
1237 * and thus missed wake events for blocking vCPUs due to the CPU in avic_hardware_setup()