Lines Matching +full:pci +full:- +full:iommu

1 // SPDX-License-Identifier: GPL-2.0
3 #define pr_fmt(fmt) "DMAR-IR: " fmt
11 #include <linux/pci.h>
13 #include <linux/irqchip/irq-msi-lib.h>
22 #include <asm/pci-direct.h>
25 #include "iommu.h"
27 #include "../iommu-pages.h"
30 struct intel_iommu *iommu; member
32 unsigned int bus; /* PCI bus number */
33 unsigned int devfn; /* PCI devfn number */
37 struct intel_iommu *iommu; member
44 struct intel_iommu *iommu; member
69 * ->dmar_global_lock
70 * ->irq_2_ir_lock
71 * ->qi->q_lock
72 * ->iommu->register_lock
75 * in single-threaded environment with interrupt disabled, so no need to tabke
81 static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
85 static bool ir_pre_enabled(struct intel_iommu *iommu) in ir_pre_enabled() argument
87 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED); in ir_pre_enabled()
90 static void clear_ir_pre_enabled(struct intel_iommu *iommu) in clear_ir_pre_enabled() argument
92 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED; in clear_ir_pre_enabled()
95 static void init_ir_status(struct intel_iommu *iommu) in init_ir_status() argument
99 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_ir_status()
101 iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED; in init_ir_status()
104 static int alloc_irte(struct intel_iommu *iommu, in alloc_irte() argument
107 struct ir_table *table = iommu->ir_table; in alloc_irte()
113 return -1; in alloc_irte()
120 if (mask > ecap_max_handle_mask(iommu->ecap)) { in alloc_irte()
123 ecap_max_handle_mask(iommu->ecap)); in alloc_irte()
124 return -1; in alloc_irte()
128 index = bitmap_find_free_region(table->bitmap, in alloc_irte()
131 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id); in alloc_irte()
133 irq_iommu->iommu = iommu; in alloc_irte()
134 irq_iommu->irte_index = index; in alloc_irte()
135 irq_iommu->sub_handle = 0; in alloc_irte()
136 irq_iommu->irte_mask = mask; in alloc_irte()
143 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) in qi_flush_iec() argument
153 return qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_iec()
159 struct intel_iommu *iommu; in modify_irte() local
165 return -1; in modify_irte()
169 iommu = irq_iommu->iommu; in modify_irte()
171 index = irq_iommu->irte_index + irq_iommu->sub_handle; in modify_irte()
172 irte = &iommu->ir_table->base[index]; in modify_irte()
174 if ((irte->pst == 1) || (irte_modified->pst == 1)) { in modify_irte()
176 * We use cmpxchg16 to atomically update the 128-bit IRTE, in modify_irte()
181 u128 old = irte->irte; in modify_irte()
182 WARN_ON(!try_cmpxchg128(&irte->irte, &old, irte_modified->irte)); in modify_irte()
184 WRITE_ONCE(irte->low, irte_modified->low); in modify_irte()
185 WRITE_ONCE(irte->high, irte_modified->high); in modify_irte()
187 __iommu_flush_cache(iommu, irte, sizeof(*irte)); in modify_irte()
189 rc = qi_flush_iec(iommu, index, 0); in modify_irte()
201 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu) in map_hpet_to_iommu()
202 return ir_hpet[i].iommu; in map_hpet_to_iommu()
212 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu) in map_ioapic_to_iommu()
213 return ir_ioapic[i].iommu; in map_ioapic_to_iommu()
222 return drhd ? drhd->iommu->ir_domain : NULL; in map_dev_to_ir()
228 struct intel_iommu *iommu; in clear_entries() local
231 if (irq_iommu->sub_handle) in clear_entries()
234 iommu = irq_iommu->iommu; in clear_entries()
235 index = irq_iommu->irte_index; in clear_entries()
237 start = iommu->ir_table->base + index; in clear_entries()
238 end = start + (1 << irq_iommu->irte_mask); in clear_entries()
241 WRITE_ONCE(entry->low, 0); in clear_entries()
242 WRITE_ONCE(entry->high, 0); in clear_entries()
244 bitmap_release_region(iommu->ir_table->bitmap, index, in clear_entries()
245 irq_iommu->irte_mask); in clear_entries()
247 return qi_flush_iec(iommu, index, irq_iommu->irte_mask); in clear_entries()
255 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
258 * source-id qualifier
260 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
280 irte->svt = svt; in set_irte_sid()
281 irte->sq = sq; in set_irte_sid()
282 irte->sid = sid; in set_irte_sid()
287 * this IRTE must have a requester-id whose bus number is between or equal
303 return -1; in set_ioapic_sid()
306 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) { in set_ioapic_sid()
313 pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic); in set_ioapic_sid()
314 return -1; in set_ioapic_sid()
328 return -1; in set_hpet_sid()
331 if (ir_hpet[i].iommu && ir_hpet[i].id == id) { in set_hpet_sid()
338 pr_warn("Failed to set source-id of HPET block (%d)\n", id); in set_hpet_sid()
339 return -1; in set_hpet_sid()
363 if (data->count == 0 || PCI_BUS_NUM(alias) == PCI_BUS_NUM(data->alias)) in set_msi_sid_cb()
364 data->busmatch_count++; in set_msi_sid_cb()
366 data->pdev = pdev; in set_msi_sid_cb()
367 data->alias = alias; in set_msi_sid_cb()
368 data->count++; in set_msi_sid_cb()
378 return -1; in set_msi_sid()
385 * DMA alias provides us with a PCI device and alias. The only case in set_msi_sid()
387 * device is the case of a PCIe-to-PCI bridge, where the alias is for in set_msi_sid()
402 if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number) in set_msi_sid()
404 dev->bus->number); in set_msi_sid()
406 set_irte_verify_bus(irte, dev->bus->number, dev->bus->number); in set_msi_sid()
407 else if (data.pdev->bus->number != dev->bus->number) in set_msi_sid()
416 static int iommu_load_old_irte(struct intel_iommu *iommu) in iommu_load_old_irte() argument
424 /* Check whether the old ir-table has the same size as ours */ in iommu_load_old_irte()
425 irta = dmar_readq(iommu->reg + DMAR_IRTA_REG); in iommu_load_old_irte()
428 return -EINVAL; in iommu_load_old_irte()
436 return -ENOMEM; in iommu_load_old_irte()
439 memcpy(iommu->ir_table->base, old_ir_table, size); in iommu_load_old_irte()
441 __iommu_flush_cache(iommu, iommu->ir_table->base, size); in iommu_load_old_irte()
448 if (iommu->ir_table->base[i].present) in iommu_load_old_irte()
449 bitmap_set(iommu->ir_table->bitmap, i, 1); in iommu_load_old_irte()
458 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) in iommu_set_irq_remapping() argument
464 addr = virt_to_phys((void *)iommu->ir_table->base); in iommu_set_irq_remapping()
466 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_set_irq_remapping()
468 dmar_writeq(iommu->reg + DMAR_IRTA_REG, in iommu_set_irq_remapping()
471 /* Set interrupt-remapping table pointer */ in iommu_set_irq_remapping()
472 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_irq_remapping()
474 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_irq_remapping()
476 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_set_irq_remapping()
482 if (!cap_esirtps(iommu->cap)) in iommu_set_irq_remapping()
483 qi_global_iec(iommu); in iommu_set_irq_remapping()
486 static void iommu_enable_irq_remapping(struct intel_iommu *iommu) in iommu_enable_irq_remapping() argument
491 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_irq_remapping()
493 /* Enable interrupt-remapping */ in iommu_enable_irq_remapping()
494 iommu->gcmd |= DMA_GCMD_IRE; in iommu_enable_irq_remapping()
495 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_irq_remapping()
496 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_irq_remapping()
499 /* Block compatibility-format MSIs */ in iommu_enable_irq_remapping()
501 iommu->gcmd &= ~DMA_GCMD_CFI; in iommu_enable_irq_remapping()
502 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_irq_remapping()
503 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_irq_remapping()
514 "Compatibility-format IRQs enabled despite intr remapping;\n" in iommu_enable_irq_remapping()
517 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_irq_remapping()
520 static int intel_setup_irq_remapping(struct intel_iommu *iommu) in intel_setup_irq_remapping() argument
527 .host_data = iommu, in intel_setup_irq_remapping()
533 if (iommu->ir_table) in intel_setup_irq_remapping()
538 return -ENOMEM; in intel_setup_irq_remapping()
540 /* 1MB - maximum possible interrupt remapping table size */ in intel_setup_irq_remapping()
542 iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, SZ_1M); in intel_setup_irq_remapping()
544 pr_err("IR%d: failed to allocate 1M of pages\n", iommu->seq_id); in intel_setup_irq_remapping()
550 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); in intel_setup_irq_remapping()
554 info.fwnode = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id); in intel_setup_irq_remapping()
558 iommu->ir_domain = msi_create_parent_irq_domain(&info, &dmar_msi_parent_ops); in intel_setup_irq_remapping()
559 if (!iommu->ir_domain) { in intel_setup_irq_remapping()
560 pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); in intel_setup_irq_remapping()
564 ir_table->base = ir_table_base; in intel_setup_irq_remapping()
565 ir_table->bitmap = bitmap; in intel_setup_irq_remapping()
566 iommu->ir_table = ir_table; in intel_setup_irq_remapping()
572 if (!iommu->qi) { in intel_setup_irq_remapping()
576 dmar_fault(-1, iommu); in intel_setup_irq_remapping()
577 dmar_disable_qi(iommu); in intel_setup_irq_remapping()
579 if (dmar_enable_qi(iommu)) { in intel_setup_irq_remapping()
585 init_ir_status(iommu); in intel_setup_irq_remapping()
587 if (ir_pre_enabled(iommu)) { in intel_setup_irq_remapping()
590 iommu->name); in intel_setup_irq_remapping()
591 clear_ir_pre_enabled(iommu); in intel_setup_irq_remapping()
592 iommu_disable_irq_remapping(iommu); in intel_setup_irq_remapping()
593 } else if (iommu_load_old_irte(iommu)) in intel_setup_irq_remapping()
595 iommu->name); in intel_setup_irq_remapping()
598 iommu->name); in intel_setup_irq_remapping()
601 iommu_set_irq_remapping(iommu, eim_mode); in intel_setup_irq_remapping()
606 irq_domain_remove(iommu->ir_domain); in intel_setup_irq_remapping()
607 iommu->ir_domain = NULL; in intel_setup_irq_remapping()
617 iommu->ir_table = NULL; in intel_setup_irq_remapping()
619 return -ENOMEM; in intel_setup_irq_remapping()
622 static void intel_teardown_irq_remapping(struct intel_iommu *iommu) in intel_teardown_irq_remapping() argument
626 if (iommu && iommu->ir_table) { in intel_teardown_irq_remapping()
627 if (iommu->ir_domain) { in intel_teardown_irq_remapping()
628 fn = iommu->ir_domain->fwnode; in intel_teardown_irq_remapping()
630 irq_domain_remove(iommu->ir_domain); in intel_teardown_irq_remapping()
632 iommu->ir_domain = NULL; in intel_teardown_irq_remapping()
634 iommu_free_pages(iommu->ir_table->base); in intel_teardown_irq_remapping()
635 bitmap_free(iommu->ir_table->bitmap); in intel_teardown_irq_remapping()
636 kfree(iommu->ir_table); in intel_teardown_irq_remapping()
637 iommu->ir_table = NULL; in intel_teardown_irq_remapping()
644 static void iommu_disable_irq_remapping(struct intel_iommu *iommu) in iommu_disable_irq_remapping() argument
649 if (!ecap_ir_support(iommu->ecap)) in iommu_disable_irq_remapping()
654 * interrupt-remapping. in iommu_disable_irq_remapping()
656 if (!cap_esirtps(iommu->cap)) in iommu_disable_irq_remapping()
657 qi_global_iec(iommu); in iommu_disable_irq_remapping()
659 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_irq_remapping()
661 sts = readl(iommu->reg + DMAR_GSTS_REG); in iommu_disable_irq_remapping()
665 iommu->gcmd &= ~DMA_GCMD_IRE; in iommu_disable_irq_remapping()
666 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_irq_remapping()
668 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_irq_remapping()
672 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_irq_remapping()
681 return dmar->flags & DMAR_X2APIC_OPT_OUT; in dmar_x2apic_optout()
687 struct intel_iommu *iommu; in intel_cleanup_irq_remapping() local
689 for_each_iommu(iommu, drhd) { in intel_cleanup_irq_remapping()
690 if (ecap_ir_support(iommu->ecap)) { in intel_cleanup_irq_remapping()
691 iommu_disable_irq_remapping(iommu); in intel_cleanup_irq_remapping()
692 intel_teardown_irq_remapping(iommu); in intel_cleanup_irq_remapping()
697 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); in intel_cleanup_irq_remapping()
703 struct intel_iommu *iommu; in intel_prepare_irq_remapping() local
713 return -ENODEV; in intel_prepare_irq_remapping()
717 return -ENODEV; in intel_prepare_irq_remapping()
720 return -ENODEV; in intel_prepare_irq_remapping()
728 for_each_iommu(iommu, drhd) in intel_prepare_irq_remapping()
729 if (!ecap_ir_support(iommu->ecap)) in intel_prepare_irq_remapping()
741 for_each_iommu(iommu, drhd) { in intel_prepare_irq_remapping()
742 if (eim && !ecap_eim_support(iommu->ecap)) { in intel_prepare_irq_remapping()
743 pr_info("%s does not support EIM\n", iommu->name); in intel_prepare_irq_remapping()
750 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n"); in intel_prepare_irq_remapping()
753 for_each_iommu(iommu, drhd) { in intel_prepare_irq_remapping()
754 if (intel_setup_irq_remapping(iommu)) { in intel_prepare_irq_remapping()
756 iommu->name); in intel_prepare_irq_remapping()
765 return -ENODEV; in intel_prepare_irq_remapping()
769 * Set Posted-Interrupts capability.
774 struct intel_iommu *iommu; in set_irq_posting_cap() local
779 * 64-bit boundary, we need use cmpxchg16b to atomically update in set_irq_posting_cap()
780 * it. We only expose posted-interrupt when X86_FEATURE_CX16 in set_irq_posting_cap()
788 for_each_iommu(iommu, drhd) in set_irq_posting_cap()
789 if (!cap_pi_support(iommu->cap)) { in set_irq_posting_cap()
800 struct intel_iommu *iommu; in intel_enable_irq_remapping() local
804 * Setup Interrupt-remapping for all the DRHD's now. in intel_enable_irq_remapping()
806 for_each_iommu(iommu, drhd) { in intel_enable_irq_remapping()
807 if (!ir_pre_enabled(iommu)) in intel_enable_irq_remapping()
808 iommu_enable_irq_remapping(iommu); in intel_enable_irq_remapping()
825 return -1; in intel_enable_irq_remapping()
829 struct intel_iommu *iommu, in ir_parse_one_hpet_scope() argument
834 int count, free = -1; in ir_parse_one_hpet_scope()
836 bus = scope->bus; in ir_parse_one_hpet_scope()
838 count = (scope->length - sizeof(struct acpi_dmar_device_scope)) in ir_parse_one_hpet_scope()
841 while (--count > 0) { in ir_parse_one_hpet_scope()
843 * Access PCI directly due to the PCI in ir_parse_one_hpet_scope()
846 bus = read_pci_config_byte(bus, path->device, path->function, in ir_parse_one_hpet_scope()
852 if (ir_hpet[count].iommu == iommu && in ir_parse_one_hpet_scope()
853 ir_hpet[count].id == scope->enumeration_id) in ir_parse_one_hpet_scope()
855 else if (ir_hpet[count].iommu == NULL && free == -1) in ir_parse_one_hpet_scope()
858 if (free == -1) { in ir_parse_one_hpet_scope()
860 return -ENOSPC; in ir_parse_one_hpet_scope()
863 ir_hpet[free].iommu = iommu; in ir_parse_one_hpet_scope()
864 ir_hpet[free].id = scope->enumeration_id; in ir_parse_one_hpet_scope()
866 ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function); in ir_parse_one_hpet_scope()
868 scope->enumeration_id, drhd->address); in ir_parse_one_hpet_scope()
874 struct intel_iommu *iommu, in ir_parse_one_ioapic_scope() argument
879 int count, free = -1; in ir_parse_one_ioapic_scope()
881 bus = scope->bus; in ir_parse_one_ioapic_scope()
883 count = (scope->length - sizeof(struct acpi_dmar_device_scope)) in ir_parse_one_ioapic_scope()
886 while (--count > 0) { in ir_parse_one_ioapic_scope()
888 * Access PCI directly due to the PCI in ir_parse_one_ioapic_scope()
891 bus = read_pci_config_byte(bus, path->device, path->function, in ir_parse_one_ioapic_scope()
897 if (ir_ioapic[count].iommu == iommu && in ir_parse_one_ioapic_scope()
898 ir_ioapic[count].id == scope->enumeration_id) in ir_parse_one_ioapic_scope()
900 else if (ir_ioapic[count].iommu == NULL && free == -1) in ir_parse_one_ioapic_scope()
903 if (free == -1) { in ir_parse_one_ioapic_scope()
905 return -ENOSPC; in ir_parse_one_ioapic_scope()
909 ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function); in ir_parse_one_ioapic_scope()
910 ir_ioapic[free].iommu = iommu; in ir_parse_one_ioapic_scope()
911 ir_ioapic[free].id = scope->enumeration_id; in ir_parse_one_ioapic_scope()
912 pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n", in ir_parse_one_ioapic_scope()
913 scope->enumeration_id, drhd->address, iommu->seq_id); in ir_parse_one_ioapic_scope()
919 struct intel_iommu *iommu) in ir_parse_ioapic_hpet_scope() argument
928 end = ((void *)drhd) + header->length; in ir_parse_ioapic_hpet_scope()
932 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) in ir_parse_ioapic_hpet_scope()
933 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd); in ir_parse_ioapic_hpet_scope()
934 else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) in ir_parse_ioapic_hpet_scope()
935 ret = ir_parse_one_hpet_scope(scope, iommu, drhd); in ir_parse_ioapic_hpet_scope()
936 start += scope->length; in ir_parse_ioapic_hpet_scope()
942 static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu) in ir_remove_ioapic_hpet_scope() argument
947 if (ir_hpet[i].iommu == iommu) in ir_remove_ioapic_hpet_scope()
948 ir_hpet[i].iommu = NULL; in ir_remove_ioapic_hpet_scope()
951 if (ir_ioapic[i].iommu == iommu) in ir_remove_ioapic_hpet_scope()
952 ir_ioapic[i].iommu = NULL; in ir_remove_ioapic_hpet_scope()
956 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
962 struct intel_iommu *iommu; in parse_ioapics_under_ir() local
966 for_each_iommu(iommu, drhd) { in parse_ioapics_under_ir()
969 if (!ecap_ir_support(iommu->ecap)) in parse_ioapics_under_ir()
972 ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu); in parse_ioapics_under_ir()
980 return -ENODEV; in parse_ioapics_under_ir()
985 pr_err(FW_BUG "ioapic %d has no mapping iommu, " in parse_ioapics_under_ir()
988 return -1; in parse_ioapics_under_ir()
1013 struct intel_iommu *iommu = NULL; in disable_irq_remapping() local
1016 * Disable Interrupt-remapping for all the DRHD's now. in disable_irq_remapping()
1018 for_each_iommu(iommu, drhd) { in disable_irq_remapping()
1019 if (!ecap_ir_support(iommu->ecap)) in disable_irq_remapping()
1022 iommu_disable_irq_remapping(iommu); in disable_irq_remapping()
1026 * Clear Posted-Interrupts capability. in disable_irq_remapping()
1036 struct intel_iommu *iommu = NULL; in reenable_irq_remapping() local
1038 for_each_iommu(iommu, drhd) in reenable_irq_remapping()
1039 if (iommu->qi) in reenable_irq_remapping()
1040 dmar_reenable_qi(iommu); in reenable_irq_remapping()
1043 * Setup Interrupt-remapping for all the DRHD's now. in reenable_irq_remapping()
1045 for_each_iommu(iommu, drhd) { in reenable_irq_remapping()
1046 if (!ecap_ir_support(iommu->ecap)) in reenable_irq_remapping()
1049 /* Set up interrupt remapping for iommu.*/ in reenable_irq_remapping()
1050 iommu_set_irq_remapping(iommu, eim); in reenable_irq_remapping()
1051 iommu_enable_irq_remapping(iommu); in reenable_irq_remapping()
1066 return -1; in reenable_irq_remapping()
1074 * already handled by a non default PCI/MSI interrupt domain. This protects
1079 if (!irq_remapping_enabled || !pci_dev_has_default_msi_parent_domain(info->dev)) in intel_irq_remap_add_device()
1082 dev_set_msi_domain(&info->dev->dev, map_dev_to_ir(info->dev)); in intel_irq_remap_add_device()
1089 irte->present = 1; in prepare_irte()
1090 irte->dst_mode = apic->dest_mode_logical; in prepare_irte()
1092 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the in prepare_irte()
1093 * actual level or edge trigger will be setup in the IO-APIC in prepare_irte()
1095 * For more details, see the comments (in io_apic.c) explainig IO-APIC in prepare_irte()
1096 * irq migration in the presence of interrupt-remapping. in prepare_irte()
1098 irte->trigger_mode = 0; in prepare_irte()
1099 irte->dlvry_mode = APIC_DELIVERY_MODE_FIXED; in prepare_irte()
1100 irte->vector = vector; in prepare_irte()
1101 irte->dest_id = IRTE_DEST(dest); in prepare_irte()
1102 irte->redir_hint = 1; in prepare_irte()
1109 irte->present = 1; in prepare_irte_posted()
1110 irte->p_pst = 1; in prepare_irte_posted()
1135 struct intel_ir_data *ir_data = irqd->chip_data; in intel_ir_reconfigure_irte_posted()
1136 struct irte *irte = &ir_data->irte_entry; in intel_ir_reconfigure_irte_posted()
1143 pr_warn("Failed to setup IRQ %d for posted mode", irqd->irq); in intel_ir_reconfigure_irte_posted()
1152 irte_pi.pda_l = (pid_addr >> (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT); in intel_ir_reconfigure_irte_posted()
1153 irte_pi.pda_h = (pid_addr >> 32) & ~(-1UL << PDA_HIGH_BIT); in intel_ir_reconfigure_irte_posted()
1155 modify_irte(&ir_data->irq_2_iommu, &irte_pi); in intel_ir_reconfigure_irte_posted()
1164 struct intel_ir_data *ir_data = irqd->chip_data; in __intel_ir_reconfigure_irte()
1170 if (ir_data->irq_2_iommu.posted_vcpu && !force_host) in __intel_ir_reconfigure_irte()
1173 ir_data->irq_2_iommu.posted_vcpu = false; in __intel_ir_reconfigure_irte()
1175 if (ir_data->irq_2_iommu.posted_msi) in __intel_ir_reconfigure_irte()
1178 modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry); in __intel_ir_reconfigure_irte()
1183 struct intel_ir_data *ir_data = irqd->chip_data; in intel_ir_reconfigure_irte()
1184 struct irte *irte = &ir_data->irte_entry; in intel_ir_reconfigure_irte()
1191 irte->vector = cfg->vector; in intel_ir_reconfigure_irte()
1192 irte->dest_id = IRTE_DEST(cfg->dest_apicid); in intel_ir_reconfigure_irte()
1198 * Migrate the IO-APIC irq in the presence of intr-remapping.
1203 * For level triggered, we eliminate the io-apic RTE modification (with the
1204 * updated vector information), by using a virtual vector (io-apic pin number).
1206 * the interrupt-remapping table entry.
1209 * is used to migrate MSI irq's in the presence of interrupt-remapping.
1215 struct irq_data *parent = data->parent_data; in intel_ir_set_affinity()
1219 ret = parent->chip->irq_set_affinity(parent, mask, force); in intel_ir_set_affinity()
1237 struct intel_ir_data *ir_data = irq_data->chip_data; in intel_ir_compose_msi_msg()
1239 *msg = ir_data->msi_entry; in intel_ir_compose_msi_msg()
1244 struct intel_ir_data *ir_data = data->chip_data; in intel_ir_set_vcpu_affinity()
1261 dmar_copy_shared_irte(&irte_pi, &ir_data->irte_entry); in intel_ir_set_vcpu_affinity()
1266 irte_pi.p_vector = pi_data->vector; in intel_ir_set_vcpu_affinity()
1267 irte_pi.pda_l = (pi_data->pi_desc_addr >> in intel_ir_set_vcpu_affinity()
1268 (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT); in intel_ir_set_vcpu_affinity()
1269 irte_pi.pda_h = (pi_data->pi_desc_addr >> 32) & in intel_ir_set_vcpu_affinity()
1270 ~(-1UL << PDA_HIGH_BIT); in intel_ir_set_vcpu_affinity()
1272 ir_data->irq_2_iommu.posted_vcpu = true; in intel_ir_set_vcpu_affinity()
1273 modify_irte(&ir_data->irq_2_iommu, &irte_pi); in intel_ir_set_vcpu_affinity()
1280 .name = "INTEL-IR",
1324 .name = "INTEL-IR-POST",
1335 msg->arch_addr_lo.dmar_base_address = X86_MSI_BASE_ADDRESS_LOW; in fill_msi_msg()
1336 msg->arch_addr_lo.dmar_subhandle_valid = true; in fill_msi_msg()
1337 msg->arch_addr_lo.dmar_format = true; in fill_msi_msg()
1338 msg->arch_addr_lo.dmar_index_0_14 = index & 0x7FFF; in fill_msi_msg()
1339 msg->arch_addr_lo.dmar_index_15 = !!(index & 0x8000); in fill_msi_msg()
1341 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH; in fill_msi_msg()
1343 msg->arch_data.dmar_subhandle = subhandle; in fill_msi_msg()
1351 struct irte *irte = &data->irte_entry; in intel_irq_remapping_prepare_irte()
1353 prepare_irte(irte, irq_cfg->vector, irq_cfg->dest_apicid); in intel_irq_remapping_prepare_irte()
1355 switch (info->type) { in intel_irq_remapping_prepare_irte()
1357 /* Set source-id of interrupt request */ in intel_irq_remapping_prepare_irte()
1358 set_ioapic_sid(irte, info->devid); in intel_irq_remapping_prepare_irte()
1360 info->devid, irte->present, irte->fpd, irte->dst_mode, in intel_irq_remapping_prepare_irte()
1361 irte->redir_hint, irte->trigger_mode, irte->dlvry_mode, in intel_irq_remapping_prepare_irte()
1362 irte->avail, irte->vector, irte->dest_id, irte->sid, in intel_irq_remapping_prepare_irte()
1363 irte->sq, irte->svt); in intel_irq_remapping_prepare_irte()
1364 sub_handle = info->ioapic.pin; in intel_irq_remapping_prepare_irte()
1367 set_hpet_sid(irte, info->devid); in intel_irq_remapping_prepare_irte()
1373 data->irq_2_iommu.posted_msi = 1; in intel_irq_remapping_prepare_irte()
1377 pci_real_dma_dev(msi_desc_to_pci_dev(info->desc))); in intel_irq_remapping_prepare_irte()
1383 fill_msi_msg(&data->msi_entry, index, sub_handle); in intel_irq_remapping_prepare_irte()
1396 if (irq_data && irq_data->chip_data) { in intel_free_irq_resources()
1397 data = irq_data->chip_data; in intel_free_irq_resources()
1398 irq_iommu = &data->irq_2_iommu; in intel_free_irq_resources()
1412 struct intel_iommu *iommu = domain->host_data; in intel_irq_remapping_alloc() local
1419 if (!info || !iommu) in intel_irq_remapping_alloc()
1420 return -EINVAL; in intel_irq_remapping_alloc()
1421 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI) in intel_irq_remapping_alloc()
1422 return -EINVAL; in intel_irq_remapping_alloc()
1428 ret = -ENOMEM; in intel_irq_remapping_alloc()
1433 index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs); in intel_irq_remapping_alloc()
1446 ret = -EINVAL; in intel_irq_remapping_alloc()
1455 ird->irq_2_iommu = data->irq_2_iommu; in intel_irq_remapping_alloc()
1456 ird->irq_2_iommu.sub_handle = i; in intel_irq_remapping_alloc()
1461 irq_data->hwirq = (index << 16) + i; in intel_irq_remapping_alloc()
1462 irq_data->chip_data = ird; in intel_irq_remapping_alloc()
1464 ((info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) || in intel_irq_remapping_alloc()
1465 (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX))) in intel_irq_remapping_alloc()
1466 irq_data->chip = &intel_ir_chip_post_msi; in intel_irq_remapping_alloc()
1468 irq_data->chip = &intel_ir_chip; in intel_irq_remapping_alloc()
1497 struct intel_ir_data *data = irq_data->chip_data; in intel_irq_remapping_deactivate()
1500 WARN_ON_ONCE(data->irq_2_iommu.posted_vcpu); in intel_irq_remapping_deactivate()
1501 data->irq_2_iommu.posted_vcpu = false; in intel_irq_remapping_deactivate()
1504 modify_irte(&data->irq_2_iommu, &entry); in intel_irq_remapping_deactivate()
1511 struct intel_iommu *iommu = NULL; in intel_irq_remapping_select() local
1514 iommu = map_ioapic_to_iommu(fwspec->param[0]); in intel_irq_remapping_select()
1516 iommu = map_hpet_to_iommu(fwspec->param[0]); in intel_irq_remapping_select()
1518 return iommu && d == iommu->ir_domain; in intel_irq_remapping_select()
1533 .prefix = "IR-",
1540 static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu) in dmar_ir_add() argument
1545 if (eim && !ecap_eim_support(iommu->ecap)) { in dmar_ir_add()
1547 iommu->reg_phys, iommu->ecap); in dmar_ir_add()
1548 return -ENODEV; in dmar_ir_add()
1551 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) { in dmar_ir_add()
1553 iommu->reg_phys); in dmar_ir_add()
1554 return -ENODEV; in dmar_ir_add()
1557 /* TODO: check all IOAPICs are covered by IOMMU */ in dmar_ir_add()
1559 /* Setup Interrupt-remapping now. */ in dmar_ir_add()
1560 ret = intel_setup_irq_remapping(iommu); in dmar_ir_add()
1563 iommu->name); in dmar_ir_add()
1564 intel_teardown_irq_remapping(iommu); in dmar_ir_add()
1565 ir_remove_ioapic_hpet_scope(iommu); in dmar_ir_add()
1567 iommu_enable_irq_remapping(iommu); in dmar_ir_add()
1576 struct intel_iommu *iommu = dmaru->iommu; in dmar_ir_hotplug() local
1580 if (iommu == NULL) in dmar_ir_hotplug()
1581 return -EINVAL; in dmar_ir_hotplug()
1582 if (!ecap_ir_support(iommu->ecap)) in dmar_ir_hotplug()
1585 !cap_pi_support(iommu->cap)) in dmar_ir_hotplug()
1586 return -EBUSY; in dmar_ir_hotplug()
1589 if (!iommu->ir_table) in dmar_ir_hotplug()
1590 ret = dmar_ir_add(dmaru, iommu); in dmar_ir_hotplug()
1592 if (iommu->ir_table) { in dmar_ir_hotplug()
1593 if (!bitmap_empty(iommu->ir_table->bitmap, in dmar_ir_hotplug()
1595 ret = -EBUSY; in dmar_ir_hotplug()
1597 iommu_disable_irq_remapping(iommu); in dmar_ir_hotplug()
1598 intel_teardown_irq_remapping(iommu); in dmar_ir_hotplug()
1599 ir_remove_ioapic_hpet_scope(iommu); in dmar_ir_hotplug()