Lines Matching full:iommu
24 #include "iommu.h"
26 #include "../iommu-pages.h"
35 struct intel_iommu *iommu; member
42 struct intel_iommu *iommu; member
49 struct intel_iommu *iommu; member
77 * ->iommu->register_lock
86 static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
90 static bool ir_pre_enabled(struct intel_iommu *iommu) in ir_pre_enabled() argument
92 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED); in ir_pre_enabled()
95 static void clear_ir_pre_enabled(struct intel_iommu *iommu) in clear_ir_pre_enabled() argument
97 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED; in clear_ir_pre_enabled()
100 static void init_ir_status(struct intel_iommu *iommu) in init_ir_status() argument
104 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_ir_status()
106 iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED; in init_ir_status()
109 static int alloc_irte(struct intel_iommu *iommu, in alloc_irte() argument
112 struct ir_table *table = iommu->ir_table; in alloc_irte()
125 if (mask > ecap_max_handle_mask(iommu->ecap)) { in alloc_irte()
128 ecap_max_handle_mask(iommu->ecap)); in alloc_irte()
136 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id); in alloc_irte()
138 irq_iommu->iommu = iommu; in alloc_irte()
149 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) in qi_flush_iec() argument
159 return qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_iec()
165 struct intel_iommu *iommu; in modify_irte() local
175 iommu = irq_iommu->iommu; in modify_irte()
178 irte = &iommu->ir_table->base[index]; in modify_irte()
193 __iommu_flush_cache(iommu, irte, sizeof(*irte)); in modify_irte()
195 rc = qi_flush_iec(iommu, index, 0); in modify_irte()
197 /* Update iommu mode according to the IRTE mode */ in modify_irte()
209 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu) in map_hpet_to_iommu()
210 return ir_hpet[i].iommu; in map_hpet_to_iommu()
220 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu) in map_ioapic_to_iommu()
221 return ir_ioapic[i].iommu; in map_ioapic_to_iommu()
230 return drhd ? drhd->iommu->ir_domain : NULL; in map_dev_to_ir()
236 struct intel_iommu *iommu; in clear_entries() local
242 iommu = irq_iommu->iommu; in clear_entries()
245 start = iommu->ir_table->base + index; in clear_entries()
252 bitmap_release_region(iommu->ir_table->bitmap, index, in clear_entries()
255 return qi_flush_iec(iommu, index, irq_iommu->irte_mask); in clear_entries()
314 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) { in set_ioapic_sid()
339 if (ir_hpet[i].iommu && ir_hpet[i].id == id) { in set_hpet_sid()
424 static int iommu_load_old_irte(struct intel_iommu *iommu) in iommu_load_old_irte() argument
433 irta = dmar_readq(iommu->reg + DMAR_IRTA_REG); in iommu_load_old_irte()
447 memcpy(iommu->ir_table->base, old_ir_table, size); in iommu_load_old_irte()
449 __iommu_flush_cache(iommu, iommu->ir_table->base, size); in iommu_load_old_irte()
456 if (iommu->ir_table->base[i].present) in iommu_load_old_irte()
457 bitmap_set(iommu->ir_table->bitmap, i, 1); in iommu_load_old_irte()
466 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) in iommu_set_irq_remapping() argument
472 addr = virt_to_phys((void *)iommu->ir_table->base); in iommu_set_irq_remapping()
474 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_set_irq_remapping()
476 dmar_writeq(iommu->reg + DMAR_IRTA_REG, in iommu_set_irq_remapping()
480 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_irq_remapping()
482 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_irq_remapping()
484 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_set_irq_remapping()
490 if (!cap_esirtps(iommu->cap)) in iommu_set_irq_remapping()
491 qi_global_iec(iommu); in iommu_set_irq_remapping()
494 static void iommu_enable_irq_remapping(struct intel_iommu *iommu) in iommu_enable_irq_remapping() argument
499 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_irq_remapping()
502 iommu->gcmd |= DMA_GCMD_IRE; in iommu_enable_irq_remapping()
503 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_irq_remapping()
504 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_irq_remapping()
509 iommu->gcmd &= ~DMA_GCMD_CFI; in iommu_enable_irq_remapping()
510 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_irq_remapping()
511 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_irq_remapping()
525 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_irq_remapping()
528 static int intel_setup_irq_remapping(struct intel_iommu *iommu) in intel_setup_irq_remapping() argument
535 if (iommu->ir_table) in intel_setup_irq_remapping()
542 ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, in intel_setup_irq_remapping()
546 iommu->seq_id, INTR_REMAP_PAGE_ORDER); in intel_setup_irq_remapping()
552 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); in intel_setup_irq_remapping()
556 fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id); in intel_setup_irq_remapping()
560 iommu->ir_domain = in intel_setup_irq_remapping()
564 iommu); in intel_setup_irq_remapping()
565 if (!iommu->ir_domain) { in intel_setup_irq_remapping()
566 pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); in intel_setup_irq_remapping()
570 irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_DMAR); in intel_setup_irq_remapping()
571 iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT | in intel_setup_irq_remapping()
573 iommu->ir_domain->msi_parent_ops = &dmar_msi_parent_ops; in intel_setup_irq_remapping()
577 iommu->ir_table = ir_table; in intel_setup_irq_remapping()
583 if (!iommu->qi) { in intel_setup_irq_remapping()
587 dmar_fault(-1, iommu); in intel_setup_irq_remapping()
588 dmar_disable_qi(iommu); in intel_setup_irq_remapping()
590 if (dmar_enable_qi(iommu)) { in intel_setup_irq_remapping()
596 init_ir_status(iommu); in intel_setup_irq_remapping()
598 if (ir_pre_enabled(iommu)) { in intel_setup_irq_remapping()
601 iommu->name); in intel_setup_irq_remapping()
602 clear_ir_pre_enabled(iommu); in intel_setup_irq_remapping()
603 iommu_disable_irq_remapping(iommu); in intel_setup_irq_remapping()
604 } else if (iommu_load_old_irte(iommu)) in intel_setup_irq_remapping()
606 iommu->name); in intel_setup_irq_remapping()
609 iommu->name); in intel_setup_irq_remapping()
612 iommu_set_irq_remapping(iommu, eim_mode); in intel_setup_irq_remapping()
617 irq_domain_remove(iommu->ir_domain); in intel_setup_irq_remapping()
618 iommu->ir_domain = NULL; in intel_setup_irq_remapping()
628 iommu->ir_table = NULL; in intel_setup_irq_remapping()
633 static void intel_teardown_irq_remapping(struct intel_iommu *iommu) in intel_teardown_irq_remapping() argument
637 if (iommu && iommu->ir_table) { in intel_teardown_irq_remapping()
638 if (iommu->ir_domain) { in intel_teardown_irq_remapping()
639 fn = iommu->ir_domain->fwnode; in intel_teardown_irq_remapping()
641 irq_domain_remove(iommu->ir_domain); in intel_teardown_irq_remapping()
643 iommu->ir_domain = NULL; in intel_teardown_irq_remapping()
645 iommu_free_pages(iommu->ir_table->base, INTR_REMAP_PAGE_ORDER); in intel_teardown_irq_remapping()
646 bitmap_free(iommu->ir_table->bitmap); in intel_teardown_irq_remapping()
647 kfree(iommu->ir_table); in intel_teardown_irq_remapping()
648 iommu->ir_table = NULL; in intel_teardown_irq_remapping()
655 static void iommu_disable_irq_remapping(struct intel_iommu *iommu) in iommu_disable_irq_remapping() argument
660 if (!ecap_ir_support(iommu->ecap)) in iommu_disable_irq_remapping()
667 if (!cap_esirtps(iommu->cap)) in iommu_disable_irq_remapping()
668 qi_global_iec(iommu); in iommu_disable_irq_remapping()
670 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_irq_remapping()
672 sts = readl(iommu->reg + DMAR_GSTS_REG); in iommu_disable_irq_remapping()
676 iommu->gcmd &= ~DMA_GCMD_IRE; in iommu_disable_irq_remapping()
677 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_irq_remapping()
679 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_irq_remapping()
683 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_irq_remapping()
698 struct intel_iommu *iommu; in intel_cleanup_irq_remapping() local
700 for_each_iommu(iommu, drhd) { in intel_cleanup_irq_remapping()
701 if (ecap_ir_support(iommu->ecap)) { in intel_cleanup_irq_remapping()
702 iommu_disable_irq_remapping(iommu); in intel_cleanup_irq_remapping()
703 intel_teardown_irq_remapping(iommu); in intel_cleanup_irq_remapping()
714 struct intel_iommu *iommu; in intel_prepare_irq_remapping() local
742 for_each_iommu(iommu, drhd) in intel_prepare_irq_remapping()
743 if (!ecap_ir_support(iommu->ecap)) in intel_prepare_irq_remapping()
755 for_each_iommu(iommu, drhd) { in intel_prepare_irq_remapping()
756 if (eim && !ecap_eim_support(iommu->ecap)) { in intel_prepare_irq_remapping()
757 pr_info("%s does not support EIM\n", iommu->name); in intel_prepare_irq_remapping()
767 for_each_iommu(iommu, drhd) { in intel_prepare_irq_remapping()
768 if (intel_setup_irq_remapping(iommu)) { in intel_prepare_irq_remapping()
770 iommu->name); in intel_prepare_irq_remapping()
788 struct intel_iommu *iommu; in set_irq_posting_cap() local
802 for_each_iommu(iommu, drhd) in set_irq_posting_cap()
803 if (!cap_pi_support(iommu->cap)) { in set_irq_posting_cap()
814 struct intel_iommu *iommu; in intel_enable_irq_remapping() local
820 for_each_iommu(iommu, drhd) { in intel_enable_irq_remapping()
821 if (!ir_pre_enabled(iommu)) in intel_enable_irq_remapping()
822 iommu_enable_irq_remapping(iommu); in intel_enable_irq_remapping()
843 struct intel_iommu *iommu, in ir_parse_one_hpet_scope() argument
866 if (ir_hpet[count].iommu == iommu && in ir_parse_one_hpet_scope()
869 else if (ir_hpet[count].iommu == NULL && free == -1) in ir_parse_one_hpet_scope()
877 ir_hpet[free].iommu = iommu; in ir_parse_one_hpet_scope()
888 struct intel_iommu *iommu, in ir_parse_one_ioapic_scope() argument
911 if (ir_ioapic[count].iommu == iommu && in ir_parse_one_ioapic_scope()
914 else if (ir_ioapic[count].iommu == NULL && free == -1) in ir_parse_one_ioapic_scope()
924 ir_ioapic[free].iommu = iommu; in ir_parse_one_ioapic_scope()
926 pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n", in ir_parse_one_ioapic_scope()
927 scope->enumeration_id, drhd->address, iommu->seq_id); in ir_parse_one_ioapic_scope()
933 struct intel_iommu *iommu) in ir_parse_ioapic_hpet_scope() argument
947 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd); in ir_parse_ioapic_hpet_scope()
949 ret = ir_parse_one_hpet_scope(scope, iommu, drhd); in ir_parse_ioapic_hpet_scope()
956 static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu) in ir_remove_ioapic_hpet_scope() argument
961 if (ir_hpet[i].iommu == iommu) in ir_remove_ioapic_hpet_scope()
962 ir_hpet[i].iommu = NULL; in ir_remove_ioapic_hpet_scope()
965 if (ir_ioapic[i].iommu == iommu) in ir_remove_ioapic_hpet_scope()
966 ir_ioapic[i].iommu = NULL; in ir_remove_ioapic_hpet_scope()
976 struct intel_iommu *iommu; in parse_ioapics_under_ir() local
980 for_each_iommu(iommu, drhd) { in parse_ioapics_under_ir()
983 if (!ecap_ir_support(iommu->ecap)) in parse_ioapics_under_ir()
986 ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu); in parse_ioapics_under_ir()
999 pr_err(FW_BUG "ioapic %d has no mapping iommu, " in parse_ioapics_under_ir()
1027 struct intel_iommu *iommu = NULL; in disable_irq_remapping() local
1032 for_each_iommu(iommu, drhd) { in disable_irq_remapping()
1033 if (!ecap_ir_support(iommu->ecap)) in disable_irq_remapping()
1036 iommu_disable_irq_remapping(iommu); in disable_irq_remapping()
1050 struct intel_iommu *iommu = NULL; in reenable_irq_remapping() local
1052 for_each_iommu(iommu, drhd) in reenable_irq_remapping()
1053 if (iommu->qi) in reenable_irq_remapping()
1054 dmar_reenable_qi(iommu); in reenable_irq_remapping()
1059 for_each_iommu(iommu, drhd) { in reenable_irq_remapping()
1060 if (!ecap_ir_support(iommu->ecap)) in reenable_irq_remapping()
1063 /* Set up interrupt remapping for iommu.*/ in reenable_irq_remapping()
1064 iommu_set_irq_remapping(iommu, eim); in reenable_irq_remapping()
1065 iommu_enable_irq_remapping(iommu); in reenable_irq_remapping()
1408 struct intel_iommu *iommu = domain->host_data; in intel_irq_remapping_alloc() local
1415 if (!info || !iommu) in intel_irq_remapping_alloc()
1429 index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs); in intel_irq_remapping_alloc()
1505 struct intel_iommu *iommu = NULL; in intel_irq_remapping_select() local
1508 iommu = map_ioapic_to_iommu(fwspec->param[0]); in intel_irq_remapping_select()
1510 iommu = map_hpet_to_iommu(fwspec->param[0]); in intel_irq_remapping_select()
1512 return iommu && d == iommu->ir_domain; in intel_irq_remapping_select()
1532 static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu) in dmar_ir_add() argument
1537 ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_IRQR, iommu); in dmar_ir_add()
1541 if (eim && !ecap_eim_support(iommu->ecap)) { in dmar_ir_add()
1543 iommu->reg_phys, iommu->ecap); in dmar_ir_add()
1547 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) { in dmar_ir_add()
1549 iommu->reg_phys); in dmar_ir_add()
1553 /* TODO: check all IOAPICs are covered by IOMMU */ in dmar_ir_add()
1556 ret = intel_setup_irq_remapping(iommu); in dmar_ir_add()
1559 iommu->name); in dmar_ir_add()
1560 intel_teardown_irq_remapping(iommu); in dmar_ir_add()
1561 ir_remove_ioapic_hpet_scope(iommu); in dmar_ir_add()
1563 iommu_enable_irq_remapping(iommu); in dmar_ir_add()
1572 struct intel_iommu *iommu = dmaru->iommu; in dmar_ir_hotplug() local
1576 if (iommu == NULL) in dmar_ir_hotplug()
1578 if (!ecap_ir_support(iommu->ecap)) in dmar_ir_hotplug()
1581 !cap_pi_support(iommu->cap)) in dmar_ir_hotplug()
1585 if (!iommu->ir_table) in dmar_ir_hotplug()
1586 ret = dmar_ir_add(dmaru, iommu); in dmar_ir_hotplug()
1588 if (iommu->ir_table) { in dmar_ir_hotplug()
1589 if (!bitmap_empty(iommu->ir_table->bitmap, in dmar_ir_hotplug()
1593 iommu_disable_irq_remapping(iommu); in dmar_ir_hotplug()
1594 intel_teardown_irq_remapping(iommu); in dmar_ir_hotplug()
1595 ir_remove_ioapic_hpet_scope(iommu); in dmar_ir_hotplug()