Lines Matching full:iommu
28 #include <linux/iommu.h>
33 #include "iommu.h"
35 #include "../iommu-pages.h"
68 static void free_iommu(struct intel_iommu *iommu);
462 if (dmaru->iommu) in dmar_free_drhd()
463 free_iommu(dmaru->iommu); in dmar_free_drhd()
502 drhd->iommu->node = node; in dmar_parse_one_rhsa()
767 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n", in dmar_acpi_insert_dev_scope()
940 x86_init.iommu.iommu_init = intel_iommu_init; in detect_intel_iommu()
953 static void unmap_iommu(struct intel_iommu *iommu) in unmap_iommu() argument
955 iounmap(iommu->reg); in unmap_iommu()
956 release_mem_region(iommu->reg_phys, iommu->reg_size); in unmap_iommu()
960 * map_iommu: map the iommu's registers
961 * @iommu: the iommu to map
964 * Memory map the iommu's registers. Start w/ a single page, and
967 static int map_iommu(struct intel_iommu *iommu, struct dmar_drhd_unit *drhd) in map_iommu() argument
972 iommu->reg_phys = phys_addr; in map_iommu()
973 iommu->reg_size = drhd->reg_size; in map_iommu()
975 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) { in map_iommu()
981 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); in map_iommu()
982 if (!iommu->reg) { in map_iommu()
988 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); in map_iommu()
989 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); in map_iommu()
991 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { in map_iommu()
998 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), in map_iommu()
999 cap_max_fault_reg_offset(iommu->cap)); in map_iommu()
1001 if (map_size > iommu->reg_size) { in map_iommu()
1002 iounmap(iommu->reg); in map_iommu()
1003 release_mem_region(iommu->reg_phys, iommu->reg_size); in map_iommu()
1004 iommu->reg_size = map_size; in map_iommu()
1005 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, in map_iommu()
1006 iommu->name)) { in map_iommu()
1011 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); in map_iommu()
1012 if (!iommu->reg) { in map_iommu()
1019 if (cap_ecmds(iommu->cap)) { in map_iommu()
1023 iommu->ecmdcap[i] = dmar_readq(iommu->reg + DMAR_ECCAP_REG + in map_iommu()
1032 iounmap(iommu->reg); in map_iommu()
1034 release_mem_region(iommu->reg_phys, iommu->reg_size); in map_iommu()
1041 struct intel_iommu *iommu; in alloc_iommu() local
1052 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); in alloc_iommu()
1053 if (!iommu) in alloc_iommu()
1056 iommu->seq_id = ida_alloc_range(&dmar_seq_ids, 0, in alloc_iommu()
1058 if (iommu->seq_id < 0) { in alloc_iommu()
1060 err = iommu->seq_id; in alloc_iommu()
1063 sprintf(iommu->name, "dmar%d", iommu->seq_id); in alloc_iommu()
1065 err = map_iommu(iommu, drhd); in alloc_iommu()
1067 pr_err("Failed to map %s\n", iommu->name); in alloc_iommu()
1071 if (!cap_sagaw(iommu->cap) && in alloc_iommu()
1072 (!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) { in alloc_iommu()
1074 iommu->name); in alloc_iommu()
1079 agaw = iommu_calculate_agaw(iommu); in alloc_iommu()
1081 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", in alloc_iommu()
1082 iommu->seq_id); in alloc_iommu()
1087 msagaw = iommu_calculate_max_sagaw(iommu); in alloc_iommu()
1089 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", in alloc_iommu()
1090 iommu->seq_id); in alloc_iommu()
1095 iommu->agaw = agaw; in alloc_iommu()
1096 iommu->msagaw = msagaw; in alloc_iommu()
1097 iommu->segment = drhd->segment; in alloc_iommu()
1098 iommu->device_rbtree = RB_ROOT; in alloc_iommu()
1099 spin_lock_init(&iommu->device_rbtree_lock); in alloc_iommu()
1100 mutex_init(&iommu->iopf_lock); in alloc_iommu()
1101 iommu->node = NUMA_NO_NODE; in alloc_iommu()
1103 ver = readl(iommu->reg + DMAR_VER_REG); in alloc_iommu()
1105 iommu->name, in alloc_iommu()
1108 (unsigned long long)iommu->cap, in alloc_iommu()
1109 (unsigned long long)iommu->ecap); in alloc_iommu()
1112 sts = readl(iommu->reg + DMAR_GSTS_REG); in alloc_iommu()
1114 iommu->gcmd |= DMA_GCMD_IRE; in alloc_iommu()
1116 iommu->gcmd |= DMA_GCMD_TE; in alloc_iommu()
1118 iommu->gcmd |= DMA_GCMD_QIE; in alloc_iommu()
1120 if (alloc_iommu_pmu(iommu)) in alloc_iommu()
1121 pr_debug("Cannot alloc PMU for iommu (seq_id = %d)\n", iommu->seq_id); in alloc_iommu()
1123 raw_spin_lock_init(&iommu->register_lock); in alloc_iommu()
1129 if (pasid_supported(iommu)) in alloc_iommu()
1130 iommu->iommu.max_pasids = 2UL << ecap_pss(iommu->ecap); in alloc_iommu()
1138 err = iommu_device_sysfs_add(&iommu->iommu, NULL, in alloc_iommu()
1140 "%s", iommu->name); in alloc_iommu()
1144 err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); in alloc_iommu()
1148 iommu_pmu_register(iommu); in alloc_iommu()
1151 drhd->iommu = iommu; in alloc_iommu()
1152 iommu->drhd = drhd; in alloc_iommu()
1157 iommu_device_sysfs_remove(&iommu->iommu); in alloc_iommu()
1159 free_iommu_pmu(iommu); in alloc_iommu()
1160 unmap_iommu(iommu); in alloc_iommu()
1162 ida_free(&dmar_seq_ids, iommu->seq_id); in alloc_iommu()
1164 kfree(iommu); in alloc_iommu()
1168 static void free_iommu(struct intel_iommu *iommu) in free_iommu() argument
1170 if (intel_iommu_enabled && !iommu->drhd->ignored) { in free_iommu()
1171 iommu_pmu_unregister(iommu); in free_iommu()
1172 iommu_device_unregister(&iommu->iommu); in free_iommu()
1173 iommu_device_sysfs_remove(&iommu->iommu); in free_iommu()
1176 free_iommu_pmu(iommu); in free_iommu()
1178 if (iommu->irq) { in free_iommu()
1179 if (iommu->pr_irq) { in free_iommu()
1180 free_irq(iommu->pr_irq, iommu); in free_iommu()
1181 dmar_free_hwirq(iommu->pr_irq); in free_iommu()
1182 iommu->pr_irq = 0; in free_iommu()
1184 free_irq(iommu->irq, iommu); in free_iommu()
1185 dmar_free_hwirq(iommu->irq); in free_iommu()
1186 iommu->irq = 0; in free_iommu()
1189 if (iommu->qi) { in free_iommu()
1190 iommu_free_page(iommu->qi->desc); in free_iommu()
1191 kfree(iommu->qi->desc_status); in free_iommu()
1192 kfree(iommu->qi); in free_iommu()
1195 if (iommu->reg) in free_iommu()
1196 unmap_iommu(iommu); in free_iommu()
1198 ida_free(&dmar_seq_ids, iommu->seq_id); in free_iommu()
1199 kfree(iommu); in free_iommu()
1239 static void qi_dump_fault(struct intel_iommu *iommu, u32 fault) in qi_dump_fault() argument
1241 unsigned int head = dmar_readl(iommu->reg + DMAR_IQH_REG); in qi_dump_fault()
1242 u64 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG); in qi_dump_fault()
1243 struct qi_desc *desc = iommu->qi->desc + head; in qi_dump_fault()
1260 head = ((head >> qi_shift(iommu)) + QI_LENGTH - 1) % QI_LENGTH; in qi_dump_fault()
1261 head <<= qi_shift(iommu); in qi_dump_fault()
1262 desc = iommu->qi->desc + head; in qi_dump_fault()
1270 static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index) in qi_check_fault() argument
1276 struct q_inval *qi = iommu->qi; in qi_check_fault()
1277 int shift = qi_shift(iommu); in qi_check_fault()
1282 fault = readl(iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
1284 qi_dump_fault(iommu, fault); in qi_check_fault()
1292 head = readl(iommu->reg + DMAR_IQH_REG); in qi_check_fault()
1303 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
1314 head = readl(iommu->reg + DMAR_IQH_REG); in qi_check_fault()
1317 tail = readl(iommu->reg + DMAR_IQT_REG); in qi_check_fault()
1324 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG); in qi_check_fault()
1327 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
1344 dev = device_rbtree_find(iommu, ite_sid); in qi_check_fault()
1354 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
1368 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, in qi_submit_sync() argument
1371 struct q_inval *qi = iommu->qi; in qi_submit_sync()
1388 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IOTLB)) in qi_submit_sync()
1392 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_DEVTLB)) in qi_submit_sync()
1396 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IEC)) in qi_submit_sync()
1416 shift = qi_shift(iommu); in qi_submit_sync()
1422 trace_qi_submit(iommu, desc[i].qw0, desc[i].qw1, in qi_submit_sync()
1445 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG); in qi_submit_sync()
1455 rc = qi_check_fault(iommu, index, wait_index); in qi_submit_sync()
1482 dmar_latency_update(iommu, DMAR_LATENCY_INV_IOTLB, in qi_submit_sync()
1486 dmar_latency_update(iommu, DMAR_LATENCY_INV_DEVTLB, in qi_submit_sync()
1490 dmar_latency_update(iommu, DMAR_LATENCY_INV_IEC, in qi_submit_sync()
1499 void qi_global_iec(struct intel_iommu *iommu) in qi_global_iec() argument
1509 qi_submit_sync(iommu, &desc, 1, 0); in qi_global_iec()
1512 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, in qi_flush_context() argument
1523 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_context()
1526 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, in qi_flush_iotlb() argument
1531 qi_desc_iotlb(iommu, did, addr, size_order, type, &desc); in qi_flush_iotlb()
1532 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_iotlb()
1535 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, in qi_flush_dev_iotlb() argument
1546 if (!(iommu->gcmd & DMA_GCMD_TE)) in qi_flush_dev_iotlb()
1550 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_dev_iotlb()
1554 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, in qi_flush_piotlb() argument
1570 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_piotlb()
1574 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, in qi_flush_dev_iotlb_pasid() argument
1585 if (!(iommu->gcmd & DMA_GCMD_TE)) in qi_flush_dev_iotlb_pasid()
1591 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_dev_iotlb_pasid()
1594 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, in qi_flush_pasid_cache() argument
1601 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_pasid_cache()
1607 void dmar_disable_qi(struct intel_iommu *iommu) in dmar_disable_qi() argument
1613 if (!ecap_qis(iommu->ecap)) in dmar_disable_qi()
1616 raw_spin_lock_irqsave(&iommu->register_lock, flags); in dmar_disable_qi()
1618 sts = readl(iommu->reg + DMAR_GSTS_REG); in dmar_disable_qi()
1625 while ((readl(iommu->reg + DMAR_IQT_REG) != in dmar_disable_qi()
1626 readl(iommu->reg + DMAR_IQH_REG)) && in dmar_disable_qi()
1630 iommu->gcmd &= ~DMA_GCMD_QIE; in dmar_disable_qi()
1631 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in dmar_disable_qi()
1633 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, in dmar_disable_qi()
1636 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in dmar_disable_qi()
1642 static void __dmar_enable_qi(struct intel_iommu *iommu) in __dmar_enable_qi() argument
1646 struct q_inval *qi = iommu->qi; in __dmar_enable_qi()
1656 if (ecap_smts(iommu->ecap)) in __dmar_enable_qi()
1659 raw_spin_lock_irqsave(&iommu->register_lock, flags); in __dmar_enable_qi()
1662 writel(0, iommu->reg + DMAR_IQT_REG); in __dmar_enable_qi()
1664 dmar_writeq(iommu->reg + DMAR_IQA_REG, val); in __dmar_enable_qi()
1666 iommu->gcmd |= DMA_GCMD_QIE; in __dmar_enable_qi()
1667 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in __dmar_enable_qi()
1670 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); in __dmar_enable_qi()
1672 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in __dmar_enable_qi()
1680 int dmar_enable_qi(struct intel_iommu *iommu) in dmar_enable_qi() argument
1686 if (!ecap_qis(iommu->ecap)) in dmar_enable_qi()
1692 if (iommu->qi) in dmar_enable_qi()
1695 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); in dmar_enable_qi()
1696 if (!iommu->qi) in dmar_enable_qi()
1699 qi = iommu->qi; in dmar_enable_qi()
1705 order = ecap_smts(iommu->ecap) ? 1 : 0; in dmar_enable_qi()
1706 desc = iommu_alloc_pages_node(iommu->node, GFP_ATOMIC, order); in dmar_enable_qi()
1709 iommu->qi = NULL; in dmar_enable_qi()
1719 iommu->qi = NULL; in dmar_enable_qi()
1725 __dmar_enable_qi(iommu); in dmar_enable_qi()
1730 /* iommu interrupt handling. Most stuff are MSI-like. */
1845 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq) in dmar_msi_reg() argument
1847 if (iommu->irq == irq) in dmar_msi_reg()
1849 else if (iommu->pr_irq == irq) in dmar_msi_reg()
1851 else if (iommu->perf_irq == irq) in dmar_msi_reg()
1859 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); in dmar_msi_unmask() local
1860 int reg = dmar_msi_reg(iommu, data->irq); in dmar_msi_unmask()
1864 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_unmask()
1865 writel(0, iommu->reg + reg); in dmar_msi_unmask()
1867 readl(iommu->reg + reg); in dmar_msi_unmask()
1868 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_unmask()
1873 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); in dmar_msi_mask() local
1874 int reg = dmar_msi_reg(iommu, data->irq); in dmar_msi_mask()
1878 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_mask()
1879 writel(DMA_FECTL_IM, iommu->reg + reg); in dmar_msi_mask()
1881 readl(iommu->reg + reg); in dmar_msi_mask()
1882 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_mask()
1887 struct intel_iommu *iommu = irq_get_handler_data(irq); in dmar_msi_write() local
1888 int reg = dmar_msi_reg(iommu, irq); in dmar_msi_write()
1891 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_write()
1892 writel(msg->data, iommu->reg + reg + 4); in dmar_msi_write()
1893 writel(msg->address_lo, iommu->reg + reg + 8); in dmar_msi_write()
1894 writel(msg->address_hi, iommu->reg + reg + 12); in dmar_msi_write()
1895 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_write()
1900 struct intel_iommu *iommu = irq_get_handler_data(irq); in dmar_msi_read() local
1901 int reg = dmar_msi_reg(iommu, irq); in dmar_msi_read()
1904 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_read()
1905 msg->data = readl(iommu->reg + reg + 4); in dmar_msi_read()
1906 msg->address_lo = readl(iommu->reg + reg + 8); in dmar_msi_read()
1907 msg->address_hi = readl(iommu->reg + reg + 12); in dmar_msi_read()
1908 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_read()
1911 static int dmar_fault_do_one(struct intel_iommu *iommu, int type, in dmar_fault_do_one() argument
1942 dmar_fault_dump_ptes(iommu, source_id, addr, pasid); in dmar_fault_do_one()
1950 struct intel_iommu *iommu = dev_id; in dmar_fault() local
1958 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_fault()
1959 fault_status = readl(iommu->reg + DMAR_FSTS_REG); in dmar_fault()
1968 reg = cap_fault_reg_offset(iommu->cap); in dmar_fault()
1981 data = readl(iommu->reg + reg + in dmar_fault()
1991 data = readl(iommu->reg + reg + in dmar_fault()
1996 guest_addr = dmar_readq(iommu->reg + reg + in dmar_fault()
2002 writel(DMA_FRCD_F, iommu->reg + reg + in dmar_fault()
2005 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_fault()
2009 dmar_fault_do_one(iommu, type, fault_reason, in dmar_fault()
2014 if (fault_index >= cap_num_fault_regs(iommu->cap)) in dmar_fault()
2016 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_fault()
2020 iommu->reg + DMAR_FSTS_REG); in dmar_fault()
2023 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_fault()
2027 int dmar_set_interrupt(struct intel_iommu *iommu) in dmar_set_interrupt() argument
2034 if (iommu->irq) in dmar_set_interrupt()
2037 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu); in dmar_set_interrupt()
2039 iommu->irq = irq; in dmar_set_interrupt()
2045 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); in dmar_set_interrupt()
2054 struct intel_iommu *iommu; in enable_drhd_fault_handling() local
2059 for_each_iommu(iommu, drhd) { in enable_drhd_fault_handling()
2063 if (iommu->irq || iommu->node != cpu_to_node(cpu)) in enable_drhd_fault_handling()
2066 ret = dmar_set_interrupt(iommu); in enable_drhd_fault_handling()
2077 dmar_fault(iommu->irq, iommu); in enable_drhd_fault_handling()
2078 fault_status = readl(iommu->reg + DMAR_FSTS_REG); in enable_drhd_fault_handling()
2079 writel(fault_status, iommu->reg + DMAR_FSTS_REG); in enable_drhd_fault_handling()
2088 int dmar_reenable_qi(struct intel_iommu *iommu) in dmar_reenable_qi() argument
2090 if (!ecap_qis(iommu->ecap)) in dmar_reenable_qi()
2093 if (!iommu->qi) in dmar_reenable_qi()
2099 dmar_disable_qi(iommu); in dmar_reenable_qi()
2105 __dmar_enable_qi(iommu); in dmar_reenable_qi()