Lines Matching +full:cluster +full:- +full:index
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
19 #include <linux/amd-iommu.h>
60 static_assert(__AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_ID_MASK) == -1u);
81 struct list_head node; /* Used by SVM for per-vcpu ir_list */
87 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_activate_vmcb()
89 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK); in avic_activate_vmcb()
90 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK; in avic_activate_vmcb()
92 vmcb->control.int_ctl |= AVIC_ENABLE_MASK; in avic_activate_vmcb()
95 * Note: KVM supports hybrid-AVIC mode, where KVM emulates x2APIC MSR in avic_activate_vmcb()
101 if (x2avic_enabled && apic_x2apic_mode(svm->vcpu.arch.apic)) { in avic_activate_vmcb()
102 vmcb->control.int_ctl |= X2APIC_MODE_MASK; in avic_activate_vmcb()
103 vmcb->control.avic_physical_id |= X2AVIC_MAX_PHYSICAL_ID; in avic_activate_vmcb()
108 * Flush the TLB, the guest may have inserted a non-APIC in avic_activate_vmcb()
111 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, &svm->vcpu); in avic_activate_vmcb()
113 /* For xAVIC and hybrid-xAVIC modes */ in avic_activate_vmcb()
114 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID; in avic_activate_vmcb()
122 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_deactivate_vmcb()
124 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK); in avic_deactivate_vmcb()
125 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK; in avic_deactivate_vmcb()
131 if (is_guest_mode(&svm->vcpu) && in avic_deactivate_vmcb()
132 vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)) in avic_deactivate_vmcb()
156 if (kvm_svm->avic_vm_id != vm_id) in avic_ga_log_notifier()
158 vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id); in avic_ga_log_notifier()
182 if (kvm_svm->avic_logical_id_table_page) in avic_vm_destroy()
183 __free_page(kvm_svm->avic_logical_id_table_page); in avic_vm_destroy()
184 if (kvm_svm->avic_physical_id_table_page) in avic_vm_destroy()
185 __free_page(kvm_svm->avic_physical_id_table_page); in avic_vm_destroy()
188 hash_del(&kvm_svm->hnode); in avic_vm_destroy()
195 int err = -ENOMEM; in avic_vm_init()
210 kvm_svm->avic_physical_id_table_page = p_page; in avic_vm_init()
217 kvm_svm->avic_logical_id_table_page = l_page; in avic_vm_init()
222 if (vm_id == 0) { /* id is 1-based, zero is not okay */ in avic_vm_init()
229 if (k2->avic_vm_id == vm_id) in avic_vm_init()
233 kvm_svm->avic_vm_id = vm_id; in avic_vm_init()
234 hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id); in avic_vm_init()
246 struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm); in avic_init_vmcb()
247 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page)); in avic_init_vmcb()
248 phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page)); in avic_init_vmcb()
249 phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page)); in avic_init_vmcb()
251 vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK; in avic_init_vmcb()
252 vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK; in avic_init_vmcb()
253 vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK; in avic_init_vmcb()
254 vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE & VMCB_AVIC_APIC_BAR_MASK; in avic_init_vmcb()
256 if (kvm_apicv_activated(svm->vcpu.kvm)) in avic_init_vmcb()
263 unsigned int index) in avic_get_physical_id_entry() argument
266 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); in avic_get_physical_id_entry()
268 if ((!x2avic_enabled && index > AVIC_MAX_PHYSICAL_ID) || in avic_get_physical_id_entry()
269 (index > X2AVIC_MAX_PHYSICAL_ID)) in avic_get_physical_id_entry()
272 avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page); in avic_get_physical_id_entry()
274 return &avic_physical_id_table[index]; in avic_get_physical_id_entry()
280 int id = vcpu->vcpu_id; in avic_init_backing_page()
285 return -EINVAL; in avic_init_backing_page()
287 if (!vcpu->arch.apic->regs) in avic_init_backing_page()
288 return -EINVAL; in avic_init_backing_page()
290 if (kvm_apicv_activated(vcpu->kvm)) { in avic_init_backing_page()
299 ret = kvm_alloc_apic_access_page(vcpu->kvm); in avic_init_backing_page()
304 svm->avic_backing_page = virt_to_page(vcpu->arch.apic->regs); in avic_init_backing_page()
309 return -EINVAL; in avic_init_backing_page()
311 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) & in avic_init_backing_page()
316 svm->avic_physical_id_cache = entry; in avic_init_backing_page()
330 int cpu = READ_ONCE(vcpu->cpu); in avic_ring_doorbell()
334 trace_kvm_avic_doorbell(vcpu->vcpu_id, kvm_cpu_get_apicid(cpu)); in avic_ring_doorbell()
342 vcpu->arch.apic->irr_pending = true; in avic_kick_vcpu()
381 * For x2APIC, the logical APIC ID is a read-only value that is in avic_kick_vcpu_by_logical_id()
394 * A fast-path version of avic_kick_target_vcpus(), which attempts to match
398 u32 icrl, u32 icrh, u32 index) in avic_kick_target_vcpus_fast() argument
406 return -EINVAL; in avic_kick_target_vcpus_fast()
416 return -EINVAL; in avic_kick_target_vcpus_fast()
418 return -EINVAL; in avic_kick_target_vcpus_fast()
420 if (WARN_ON_ONCE(dest != index)) in avic_kick_target_vcpus_fast()
421 return -EINVAL; in avic_kick_target_vcpus_fast()
427 u32 cluster; in avic_kick_target_vcpus_fast() local
430 /* 16 bit dest mask, 16 bit cluster id */ in avic_kick_target_vcpus_fast()
432 cluster = (dest >> 16) << 4; in avic_kick_target_vcpus_fast()
436 cluster = 0; in avic_kick_target_vcpus_fast()
438 /* 4 bit desk mask, 4 bit cluster id */ in avic_kick_target_vcpus_fast()
440 cluster = (dest >> 4) << 2; in avic_kick_target_vcpus_fast()
443 /* Nothing to do if there are no destinations in the cluster. */ in avic_kick_target_vcpus_fast()
450 avic_logical_id_table = page_address(kvm_svm->avic_logical_id_table_page); in avic_kick_target_vcpus_fast()
459 cluster + i, icrl); in avic_kick_target_vcpus_fast()
466 u32 icrl, u32 icrh, u32 index) in avic_kick_target_vcpus() argument
472 if (!avic_kick_target_vcpus_fast(kvm, source, icrl, icrh, index)) in avic_kick_target_vcpus()
475 trace_kvm_avic_kick_vcpu_slowpath(icrh, icrl, index); in avic_kick_target_vcpus()
493 u32 icrh = svm->vmcb->control.exit_info_1 >> 32; in avic_incomplete_ipi_interception()
494 u32 icrl = svm->vmcb->control.exit_info_1; in avic_incomplete_ipi_interception()
495 u32 id = svm->vmcb->control.exit_info_2 >> 32; in avic_incomplete_ipi_interception()
496 u32 index = svm->vmcb->control.exit_info_2 & 0x1FF; in avic_incomplete_ipi_interception() local
497 struct kvm_lapic *apic = vcpu->arch.apic; in avic_incomplete_ipi_interception()
499 trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index); in avic_incomplete_ipi_interception()
506 * only virtualizes Fixed, Edge-Triggered INTRs, and falls over in avic_incomplete_ipi_interception()
527 avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh, index); in avic_incomplete_ipi_interception()
551 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); in avic_get_logical_id_entry()
553 u32 cluster, index; in avic_get_logical_id_entry() local
558 cluster = 0; in avic_get_logical_id_entry()
560 cluster = (ldr >> 4); in avic_get_logical_id_entry()
561 if (cluster >= 0xf) in avic_get_logical_id_entry()
568 index = __ffs(ldr); in avic_get_logical_id_entry()
569 if (WARN_ON_ONCE(index > 7)) in avic_get_logical_id_entry()
571 index += (cluster << 2); in avic_get_logical_id_entry()
573 logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page); in avic_get_logical_id_entry()
575 return &logical_apic_id_table[index]; in avic_get_logical_id_entry()
583 flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT; in avic_ldr_write()
598 bool flat = svm->dfr_reg == APIC_DFR_FLAT; in avic_invalidate_logical_id_entry()
602 if (apic_x2apic_mode(vcpu->arch.apic)) in avic_invalidate_logical_id_entry()
605 entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat); in avic_invalidate_logical_id_entry()
613 u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR); in avic_handle_ldr_update()
614 u32 id = kvm_xapic_id(vcpu->arch.apic); in avic_handle_ldr_update()
617 if (apic_x2apic_mode(vcpu->arch.apic)) in avic_handle_ldr_update()
620 if (ldr == svm->ldr_reg) in avic_handle_ldr_update()
625 svm->ldr_reg = ldr; in avic_handle_ldr_update()
632 u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR); in avic_handle_dfr_update()
634 if (svm->dfr_reg == dfr) in avic_handle_dfr_update()
638 svm->dfr_reg = dfr; in avic_handle_dfr_update()
643 u32 offset = to_svm(vcpu)->vmcb->control.exit_info_1 & in avic_unaccel_trap_write()
654 /* Ignore writes to Read Remote Data, it's read-only. */ in avic_unaccel_trap_write()
697 u32 offset = svm->vmcb->control.exit_info_1 & in avic_unaccelerated_access_interception()
699 u32 vector = svm->vmcb->control.exit_info_2 & in avic_unaccelerated_access_interception()
701 bool write = (svm->vmcb->control.exit_info_1 >> 32) & in avic_unaccelerated_access_interception()
705 trace_kvm_avic_unaccelerated_access(vcpu->vcpu_id, offset, in avic_unaccelerated_access_interception()
722 struct kvm_vcpu *vcpu = &svm->vcpu; in avic_init_vcpu()
724 if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm)) in avic_init_vcpu()
731 INIT_LIST_HEAD(&svm->ir_list); in avic_init_vcpu()
732 spin_lock_init(&svm->ir_list_lock); in avic_init_vcpu()
733 svm->dfr_reg = APIC_DFR_FLAT; in avic_init_vcpu()
751 if (!kvm_arch_has_assigned_device(vcpu->kvm)) in avic_set_pi_irte_mode()
755 * Here, we go through the per-vcpu ir_list to update all existing in avic_set_pi_irte_mode()
758 spin_lock_irqsave(&svm->ir_list_lock, flags); in avic_set_pi_irte_mode()
760 if (list_empty(&svm->ir_list)) in avic_set_pi_irte_mode()
763 list_for_each_entry(ir, &svm->ir_list, node) { in avic_set_pi_irte_mode()
765 ret = amd_iommu_activate_guest_mode(ir->data); in avic_set_pi_irte_mode()
767 ret = amd_iommu_deactivate_guest_mode(ir->data); in avic_set_pi_irte_mode()
772 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in avic_set_pi_irte_mode()
781 spin_lock_irqsave(&svm->ir_list_lock, flags); in svm_ir_list_del()
782 list_for_each_entry(cur, &svm->ir_list, node) { in svm_ir_list_del()
783 if (cur->data != pi->ir_data) in svm_ir_list_del()
785 list_del(&cur->node); in svm_ir_list_del()
789 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in svm_ir_list_del()
800 * In some cases, the existing irte is updated and re-set, in svm_ir_list_add()
804 if (pi->ir_data && (pi->prev_ga_tag != 0)) { in svm_ir_list_add()
805 struct kvm *kvm = svm->vcpu.kvm; in svm_ir_list_add()
806 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag); in svm_ir_list_add()
811 ret = -EINVAL; in svm_ir_list_add()
821 * add to the per-vcpu ir_list. in svm_ir_list_add()
825 ret = -ENOMEM; in svm_ir_list_add()
828 ir->data = pi->ir_data; in svm_ir_list_add()
830 spin_lock_irqsave(&svm->ir_list_lock, flags); in svm_ir_list_add()
838 entry = READ_ONCE(*(svm->avic_physical_id_cache)); in svm_ir_list_add()
841 true, pi->ir_data); in svm_ir_list_add()
843 list_add(&ir->node, &svm->ir_list); in svm_ir_list_add()
844 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in svm_ir_list_add()
855 * For lowest-priority interrupts, we only support
858 * irqbalance to make the interrupts single-CPU.
873 return -1; in get_pi_vcpu_info()
879 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page)); in get_pi_vcpu_info()
880 vcpu_info->vector = irq.vector; in get_pi_vcpu_info()
886 * avic_pi_update_irte - set IRTE for Posted-Interrupts
908 idx = srcu_read_lock(&kvm->irq_srcu); in avic_pi_update_irte()
909 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); in avic_pi_update_irte()
911 if (guest_irq >= irq_rt->nr_rt_entries || in avic_pi_update_irte()
912 hlist_empty(&irq_rt->map[guest_irq])) { in avic_pi_update_irte()
914 guest_irq, irq_rt->nr_rt_entries); in avic_pi_update_irte()
918 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { in avic_pi_update_irte()
922 if (e->type != KVM_IRQ_ROUTING_MSI) in avic_pi_update_irte()
933 kvm_vcpu_apicv_active(&svm->vcpu)) { in avic_pi_update_irte()
937 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) & in avic_pi_update_irte()
939 pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id, in avic_pi_update_irte()
940 svm->vcpu.vcpu_id); in avic_pi_update_irte()
948 * interrupt information in a per-vcpu ir_list so that in avic_pi_update_irte()
960 * - Tell IOMMU to use legacy mode for this interrupt. in avic_pi_update_irte()
961 * - Retrieve ga_tag of prior interrupt remapping data. in avic_pi_update_irte()
970 * was cached. If so, we need to clean up the per-vcpu in avic_pi_update_irte()
984 trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id, in avic_pi_update_irte()
985 e->gsi, vcpu_info.vector, in avic_pi_update_irte()
997 srcu_read_unlock(&kvm->irq_srcu, idx); in avic_pi_update_irte()
1008 lockdep_assert_held(&svm->ir_list_lock); in avic_update_iommu_vcpu_affinity()
1010 if (!kvm_arch_has_assigned_device(vcpu->kvm)) in avic_update_iommu_vcpu_affinity()
1014 * Here, we go through the per-vcpu ir_list to update all existing in avic_update_iommu_vcpu_affinity()
1017 if (list_empty(&svm->ir_list)) in avic_update_iommu_vcpu_affinity()
1020 list_for_each_entry(ir, &svm->ir_list, node) { in avic_update_iommu_vcpu_affinity()
1021 ret = amd_iommu_update_ga(cpu, r, ir->data); in avic_update_iommu_vcpu_affinity()
1051 * Grab the per-vCPU interrupt remapping lock even if the VM doesn't in avic_vcpu_load()
1054 * up-to-date entry information, or that this task will wait until in avic_vcpu_load()
1057 spin_lock_irqsave(&svm->ir_list_lock, flags); in avic_vcpu_load()
1059 entry = READ_ONCE(*(svm->avic_physical_id_cache)); in avic_vcpu_load()
1066 WRITE_ONCE(*(svm->avic_physical_id_cache), entry); in avic_vcpu_load()
1069 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in avic_vcpu_load()
1087 entry = READ_ONCE(*(svm->avic_physical_id_cache)); in avic_vcpu_put()
1094 * Take and hold the per-vCPU interrupt remapping lock while updating in avic_vcpu_put()
1097 * either svm_ir_list_add() will consume up-to-date entry information, in avic_vcpu_put()
1101 spin_lock_irqsave(&svm->ir_list_lock, flags); in avic_vcpu_put()
1103 avic_update_iommu_vcpu_affinity(vcpu, -1, 0); in avic_vcpu_put()
1106 WRITE_ONCE(*(svm->avic_physical_id_cache), entry); in avic_vcpu_put()
1108 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in avic_vcpu_put()
1115 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_refresh_virtual_apic_mode()
1126 * accordingly before re-activating. in avic_refresh_virtual_apic_mode()
1146 avic_vcpu_load(vcpu, vcpu->cpu); in avic_refresh_apicv_exec_ctrl()
1179 avic_vcpu_load(vcpu, vcpu->cpu); in avic_vcpu_unblocking()
1184 * - The module param avic enable both xAPIC and x2APIC mode.
1185 * - Hypervisor can support both xAVIC and x2AVIC in the same guest.
1186 * - The mode can be switched at run-time.