| /linux/drivers/iommu/ |
| H A D | sun50i-iommu.c | 101 struct iommu_device iommu; member 125 struct sun50i_iommu *iommu; member 138 static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset) in iommu_read() argument 140 return readl(iommu->base + offset); in iommu_read() 143 static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value) in iommu_write() argument 145 writel(value, iommu->base + offset); in iommu_write() 294 struct sun50i_iommu *iommu = sun50i_domain->iommu; in sun50i_table_flush() local 298 dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE); in sun50i_table_flush() 301 static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu, in sun50i_iommu_zap_iova() argument 307 iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova); in sun50i_iommu_zap_iova() [all …]
|
| H A D | rockchip-iommu.c | 117 struct iommu_device iommu; member 124 struct rk_iommu *iommu; member 347 static void rk_iommu_command(struct rk_iommu *iommu, u32 command) in rk_iommu_command() argument 351 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_command() 352 writel(command, iommu->bases[i] + RK_MMU_COMMAND); in rk_iommu_command() 359 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, in rk_iommu_zap_lines() argument 368 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_zap_lines() 372 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); in rk_iommu_zap_lines() 376 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) in rk_iommu_is_stall_active() argument 381 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_is_stall_active() [all …]
|
| H A D | msm_iommu.c | 54 static int __enable_clocks(struct msm_iommu_dev *iommu) in __enable_clocks() argument 58 ret = clk_enable(iommu->pclk); in __enable_clocks() 62 if (iommu->clk) { in __enable_clocks() 63 ret = clk_enable(iommu->clk); in __enable_clocks() 65 clk_disable(iommu->pclk); in __enable_clocks() 71 static void __disable_clocks(struct msm_iommu_dev *iommu) in __disable_clocks() argument 73 if (iommu->clk) in __disable_clocks() 74 clk_disable(iommu->clk); in __disable_clocks() 75 clk_disable(iommu->pclk); in __disable_clocks() 120 struct msm_iommu_dev *iommu = NULL; in __flush_iotlb() local [all …]
|
| H A D | Makefile | 7 obj-$(CONFIG_IOMMU_API) += iommu.o 8 obj-$(CONFIG_IOMMU_SUPPORT) += iommu-pages.o 9 obj-$(CONFIG_IOMMU_API) += iommu-traces.o 10 obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o 11 obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o 12 obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o 25 obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o 26 obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o 27 obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o 28 obj-$(CONFIG_SUN50I_IOMMU) += sun50i-iommu.o [all …]
|
| /linux/arch/sparc/kernel/ |
| H A D | iommu.c | 52 struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl); in iommu_flushall() local 53 if (iommu->iommu_flushinv) { in iommu_flushall() 54 iommu_write(iommu->iommu_flushinv, ~(u64)0); in iommu_flushall() 59 tag = iommu->iommu_tags; in iommu_flushall() 66 (void) iommu_read(iommu->write_complete_reg); in iommu_flushall() 80 #define IOPTE_IS_DUMMY(iommu, iopte) \ argument 81 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) 83 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) in iopte_make_dummy() argument 88 val |= iommu->dummy_page_pa; in iopte_make_dummy() 93 int iommu_table_init(struct iommu *iommu, int tsbsize, in iommu_table_init() argument [all …]
|
| H A D | iommu-common.c | 19 static inline bool need_flush(struct iommu_map_table *iommu) in need_flush() argument 21 return ((iommu->flags & IOMMU_NEED_FLUSH) != 0); in need_flush() 24 static inline void set_flush(struct iommu_map_table *iommu) in set_flush() argument 26 iommu->flags |= IOMMU_NEED_FLUSH; in set_flush() 29 static inline void clear_flush(struct iommu_map_table *iommu) in clear_flush() argument 31 iommu->flags &= ~IOMMU_NEED_FLUSH; in clear_flush() 52 void iommu_tbl_pool_init(struct iommu_map_table *iommu, in iommu_tbl_pool_init() argument 60 struct iommu_pool *p = &(iommu->large_pool); in iommu_tbl_pool_init() 64 iommu->nr_pools = IOMMU_NR_POOLS; in iommu_tbl_pool_init() 66 iommu->nr_pools = npools; in iommu_tbl_pool_init() [all …]
|
| H A D | sbus.c | 63 struct iommu *iommu = dev->archdata.iommu; in sbus_set_sbus64() local 78 cfg_reg = iommu->write_complete_reg; in sbus_set_sbus64() 213 struct iommu *iommu = op->dev.archdata.iommu; in sbus_build_irq() local 214 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; in sbus_build_irq() 275 struct iommu *iommu = op->dev.archdata.iommu; in sysio_ue_handler() local 276 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; in sysio_ue_handler() 349 struct iommu *iommu = op->dev.archdata.iommu; in sysio_ce_handler() local 350 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; in sysio_ce_handler() 428 struct iommu *iommu = op->dev.archdata.iommu; in sysio_sbus_error_handler() local 433 reg_base = iommu->write_complete_reg - 0x2000UL; in sysio_sbus_error_handler() [all …]
|
| H A D | pci_sun4v.c | 78 static inline bool iommu_use_atu(struct iommu *iommu, u64 mask) in iommu_use_atu() argument 80 return iommu->atu && mask > DMA_BIT_MASK(32); in iommu_use_atu() 102 if (!iommu_use_atu(pbm->iommu, mask)) { in iommu_batch_flush() 119 iotsb_num = pbm->iommu->atu->iotsb->iotsb_num; in iommu_batch_flush() 188 struct iommu *iommu; in dma_4v_alloc_coherent() local 213 iommu = dev->archdata.iommu; in dma_4v_alloc_coherent() 215 if (!iommu_use_atu(iommu, mask)) in dma_4v_alloc_coherent() 216 tbl = &iommu->tbl; in dma_4v_alloc_coherent() 218 tbl = &iommu->atu->tbl; in dma_4v_alloc_coherent() 327 struct iommu *iommu; in dma_4v_free_coherent() local [all …]
|
| /linux/drivers/iommu/intel/ |
| H A D | pasid.c | 63 dir = iommu_alloc_pages_node_sz(info->iommu->node, GFP_KERNEL, in intel_pasid_alloc_table() 74 if (!ecap_coherent(info->iommu->ecap)) in intel_pasid_alloc_table() 151 entries = iommu_alloc_pages_node_sz(info->iommu->node, in intel_pasid_get_entry() 168 if (!ecap_coherent(info->iommu->ecap)) { in intel_pasid_get_entry() 196 pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu, in pasid_cache_invalidation_with_pasid() argument 207 qi_submit_sync(iommu, &desc, 1, 0); in pasid_cache_invalidation_with_pasid() 211 devtlb_invalidation_with_pasid(struct intel_iommu *iommu, in devtlb_invalidation_with_pasid() argument 235 qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT); in devtlb_invalidation_with_pasid() 237 qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT); in devtlb_invalidation_with_pasid() 240 void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, in intel_pasid_tear_down_entry() argument [all …]
|
| H A D | iommu.c | 53 #define rwbf_required(iommu) (rwbf_quirk || cap_rwbf((iommu)->cap)) argument 124 struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid) in device_rbtree_find() argument 130 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_find() 131 node = rb_find(&rid, &iommu->device_rbtree, device_rid_cmp_key); in device_rbtree_find() 134 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_find() 139 static int device_rbtree_insert(struct intel_iommu *iommu, in device_rbtree_insert() argument 145 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_insert() 146 curr = rb_find_add(&info->node, &iommu->device_rbtree, device_rid_cmp); in device_rbtree_insert() 147 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_insert() 156 struct intel_iommu *iommu = info->iommu; in device_rbtree_remove() local [all …]
|
| H A D | dmar.c | 68 static void free_iommu(struct intel_iommu *iommu); 462 if (dmaru->iommu) in dmar_free_drhd() 463 free_iommu(dmaru->iommu); in dmar_free_drhd() 502 drhd->iommu->node = node; in dmar_parse_one_rhsa() 939 x86_init.iommu.iommu_init = intel_iommu_init; in detect_intel_iommu() 950 static void unmap_iommu(struct intel_iommu *iommu) in unmap_iommu() argument 952 iounmap(iommu->reg); in unmap_iommu() 953 release_mem_region(iommu->reg_phys, iommu->reg_size); in unmap_iommu() 964 static int map_iommu(struct intel_iommu *iommu, struct dmar_drhd_unit *drhd) in map_iommu() argument 969 iommu->reg_phys = phys_addr; in map_iommu() [all …]
|
| H A D | cache.c | 24 struct intel_iommu *iommu, struct device *dev, in cache_tage_match() argument 34 return tag->iommu == iommu; in cache_tage_match() 47 struct intel_iommu *iommu = info->iommu; in cache_tag_assign() local 57 tag->iommu = iommu; in cache_tag_assign() 65 tag->dev = iommu->iommu.dev; in cache_tag_assign() 70 if (cache_tage_match(temp, did, iommu, dev, pasid, type)) { in cache_tag_assign() 77 if (temp->iommu == iommu) in cache_tag_assign() 98 struct intel_iommu *iommu = info->iommu; in cache_tag_unassign() local 104 if (cache_tage_match(tag, did, iommu, dev, pasid, type)) { in cache_tag_unassign() 202 struct intel_iommu *iommu = info->iommu; in domain_get_id_for_dev() local [all …]
|
| H A D | perfmon.c | 327 struct intel_iommu *iommu = iommu_pmu->iommu; in iommu_pmu_start() local 356 ecmd_submit_sync(iommu, DMA_ECMD_ENABLE, hwc->idx, 0); in iommu_pmu_start() 364 struct intel_iommu *iommu = iommu_pmu->iommu; in iommu_pmu_stop() local 368 ecmd_submit_sync(iommu, DMA_ECMD_DISABLE, hwc->idx, 0); in iommu_pmu_stop() 476 struct intel_iommu *iommu = iommu_pmu->iommu; in iommu_pmu_enable() local 478 ecmd_submit_sync(iommu, DMA_ECMD_UNFREEZE, 0, 0); in iommu_pmu_enable() 484 struct intel_iommu *iommu = iommu_pmu->iommu; in iommu_pmu_disable() local 486 ecmd_submit_sync(iommu, DMA_ECMD_FREEZE, 0, 0); in iommu_pmu_disable() 519 struct intel_iommu *iommu = dev_id; in iommu_pmu_irq_handler() local 521 if (!dmar_readl(iommu->reg + DMAR_PERFINTRSTS_REG)) in iommu_pmu_irq_handler() [all …]
|
| H A D | trace.h | 22 TP_PROTO(struct intel_iommu *iommu, u64 qw0, u64 qw1, u64 qw2, u64 qw3), 24 TP_ARGS(iommu, qw0, qw1, qw2, qw3), 31 __string(iommu, iommu->name) 35 __assign_str(iommu); 53 __get_str(iommu), 59 TP_PROTO(struct intel_iommu *iommu, struct device *dev, 63 TP_ARGS(iommu, dev, dw0, dw1, dw2, dw3, seq), 71 __string(iommu, iommu->name) 82 __assign_str(iommu); 87 __get_str(iommu), __get_str(dev), __entry->seq, [all …]
|
| H A D | iommu.h | 363 #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ argument 367 sts = op(iommu->reg + offset); \ 472 #define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap)) argument 518 void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, 520 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, 536 #define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap)) argument 537 #define pasid_supported(iommu) (sm_supported(iommu) && \ argument 538 ecap_pasid((iommu)->ecap)) 539 #define ssads_supported(iommu) (sm_supported(iommu) && \ argument 540 ecap_slads((iommu)->ecap) && \ [all …]
|
| /linux/drivers/iommu/riscv/ |
| H A D | iommu.c | 44 iommu_get_iommu_dev(dev, struct riscv_iommu_device, iommu) 70 static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, in riscv_iommu_get_pages() argument 76 addr = iommu_alloc_pages_node_sz(dev_to_node(iommu->dev), in riscv_iommu_get_pages() 91 devres_add(iommu->dev, devres); in riscv_iommu_get_pages() 96 static void riscv_iommu_free_pages(struct riscv_iommu_device *iommu, void *addr) in riscv_iommu_free_pages() argument 100 devres_release(iommu->dev, riscv_iommu_devres_pages_release, in riscv_iommu_free_pages() 131 static int riscv_iommu_queue_alloc(struct riscv_iommu_device *iommu, in riscv_iommu_queue_alloc() argument 142 riscv_iommu_writeq(iommu, queue->qbr, RISCV_IOMMU_QUEUE_LOG2SZ_FIELD); in riscv_iommu_queue_alloc() 143 qb = riscv_iommu_readq(iommu, queue->qbr); in riscv_iommu_queue_alloc() 163 queue->base = devm_ioremap(iommu->dev, queue->phys, queue_size); in riscv_iommu_queue_alloc() [all …]
|
| /linux/tools/testing/selftests/vfio/lib/ |
| H A D | iommu.c | 70 int __iommu_hva2iova(struct iommu *iommu, void *vaddr, iova_t *iova) in __iommu_hva2iova() argument 74 list_for_each_entry(region, &iommu->dma_regions, link) { in __iommu_hva2iova() 90 iova_t iommu_hva2iova(struct iommu *iommu, void *vaddr) in iommu_hva2iova() argument 95 ret = __iommu_hva2iova(iommu, vaddr, &iova); in iommu_hva2iova() 101 static int vfio_iommu_map(struct iommu *iommu, struct dma_region *region) in vfio_iommu_map() argument 111 if (ioctl(iommu->container_fd, VFIO_IOMMU_MAP_DMA, &args)) in vfio_iommu_map() 117 static int iommufd_map(struct iommu *iommu, struct dma_region *region) in iommufd_map() argument 127 .ioas_id = iommu->ioas_id, in iommufd_map() 130 if (ioctl(iommu->iommufd, IOMMU_IOAS_MAP, &args)) in iommufd_map() 136 int __iommu_map(struct iommu *iommu, struct dma_region *region) in __iommu_map() argument [all …]
|
| /linux/drivers/iommu/amd/ |
| H A D | ppr.c | 20 int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu) in amd_iommu_alloc_ppr_log() argument 22 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, in amd_iommu_alloc_ppr_log() 24 return iommu->ppr_log ? 0 : -ENOMEM; in amd_iommu_alloc_ppr_log() 27 void amd_iommu_enable_ppr_log(struct amd_iommu *iommu) in amd_iommu_enable_ppr_log() argument 31 if (iommu->ppr_log == NULL) in amd_iommu_enable_ppr_log() 34 iommu_feature_enable(iommu, CONTROL_PPR_EN); in amd_iommu_enable_ppr_log() 36 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; in amd_iommu_enable_ppr_log() 38 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, in amd_iommu_enable_ppr_log() 42 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in amd_iommu_enable_ppr_log() 43 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in amd_iommu_enable_ppr_log() [all …]
|
| H A D | debugfs.c | 28 struct amd_iommu *iommu = m->private; in iommu_mmio_write() local 31 iommu->dbg_mmio_offset = -1; in iommu_mmio_write() 36 ret = kstrtou32_from_user(ubuf, cnt, 0, &iommu->dbg_mmio_offset); in iommu_mmio_write() 40 if (iommu->dbg_mmio_offset > iommu->mmio_phys_end - sizeof(u64)) { in iommu_mmio_write() 41 iommu->dbg_mmio_offset = -1; in iommu_mmio_write() 50 struct amd_iommu *iommu = m->private; in iommu_mmio_show() local 53 if (iommu->dbg_mmio_offset < 0) { in iommu_mmio_show() 58 value = readq(iommu->mmio_base + iommu->dbg_mmio_offset); in iommu_mmio_show() 59 seq_printf(m, "Offset:0x%x Value:0x%016llx\n", iommu->dbg_mmio_offset, value); in iommu_mmio_show() 69 struct amd_iommu *iommu = m->private; in iommu_capability_write() local [all …]
|
| /linux/tools/testing/selftests/vfio/lib/include/libvfio/ |
| H A D | iommu.h | 27 struct iommu { struct 35 struct iommu *iommu_init(const char *iommu_mode); argument 36 void iommu_cleanup(struct iommu *iommu); 38 int __iommu_map(struct iommu *iommu, struct dma_region *region); 40 static inline void iommu_map(struct iommu *iommu, struct dma_region *region) in iommu_map() argument 42 VFIO_ASSERT_EQ(__iommu_map(iommu, region), 0); in iommu_map() 45 int __iommu_unmap(struct iommu *iommu, struct dma_region *region, u64 *unmapped); 47 static inline void iommu_unmap(struct iommu *iommu, struct dma_region *region) in iommu_unmap() argument 49 VFIO_ASSERT_EQ(__iommu_unmap(iommu, region, NULL), 0); in iommu_unmap() 52 int __iommu_unmap_all(struct iommu *iommu, u64 *unmapped); [all …]
|
| /linux/drivers/vfio/ |
| H A D | vfio_iommu_type1.c | 158 vfio_iommu_find_iommu_group(struct vfio_iommu *iommu, 166 static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, in vfio_find_dma() argument 169 struct rb_node *node = iommu->dma_list.rb_node; in vfio_find_dma() 187 static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu, in vfio_find_dma_first_node() argument 192 struct rb_node *node = iommu->dma_list.rb_node; in vfio_find_dma_first_node() 215 static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new) in vfio_link_dma() argument 217 struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; in vfio_link_dma() 233 rb_insert_color(&new->node, &iommu->dma_list); in vfio_link_dma() 236 static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old) in vfio_unlink_dma() argument 238 rb_erase(&old->node, &iommu->dma_list); in vfio_unlink_dma() [all …]
|
| /linux/arch/sparc/mm/ |
| H A D | iommu.c | 60 struct iommu_struct *iommu; in sbus_iommu_init() local 67 iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL); in sbus_iommu_init() 68 if (!iommu) { in sbus_iommu_init() 73 iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3, in sbus_iommu_init() 75 if (!iommu->regs) { in sbus_iommu_init() 80 control = sbus_readl(&iommu->regs->control); in sbus_iommu_init() 85 sbus_writel(control, &iommu->regs->control); in sbus_iommu_init() 87 iommu_invalidate(iommu->regs); in sbus_iommu_init() 88 iommu->start = IOMMU_START; in sbus_iommu_init() 89 iommu->end = 0xffffffff; in sbus_iommu_init() [all …]
|
| /linux/Documentation/ABI/testing/ |
| H A D | debugfs-amd-iommu | 1 What: /sys/kernel/debug/iommu/amd/iommu<x>/mmio 6 MMIO register offset for iommu<x>, and the file outputs the corresponding 7 MMIO register value of iommu<x> 11 $ echo "0x18" > /sys/kernel/debug/iommu/amd/iommu00/mmio 12 $ cat /sys/kernel/debug/iommu/amd/iommu00/mmio 18 What: /sys/kernel/debug/iommu/amd/iommu<x>/capability 23 capability register offset for iommu<x>, and the file outputs the 24 corresponding capability register value of iommu<x>. 28 $ echo "0x10" > /sys/kernel/debug/iommu/amd/iommu00/capability 29 $ cat /sys/kernel/debug/iommu/amd/iommu00/capability [all …]
|
| H A D | sysfs-class-iommu-intel-iommu | 1 What: /sys/class/iommu/<iommu>/intel-iommu/address 8 intel-iommu with a DMAR DRHD table entry. 10 What: /sys/class/iommu/<iommu>/intel-iommu/cap 18 What: /sys/class/iommu/<iommu>/intel-iommu/ecap 26 What: /sys/class/iommu/<iommu>/intel-iommu/version
|
| /linux/drivers/gpu/drm/msm/ |
| H A D | msm_iommu.c | 138 struct msm_iommu *iommu = to_msm_iommu(pagetable->parent); in msm_iommu_pagetable_map_prr() local 139 phys_addr_t phys = page_to_phys(iommu->prr_page); in msm_iommu_pagetable_map_prr() 224 struct msm_iommu *iommu = to_msm_iommu(pagetable->parent); in msm_iommu_pagetable_destroy() local 232 mutex_lock(&iommu->init_lock); in msm_iommu_pagetable_destroy() 233 if (--iommu->pagetables == 0) { in msm_iommu_pagetable_destroy() 238 __free_page(iommu->prr_page); in msm_iommu_pagetable_destroy() 239 iommu->prr_page = NULL; in msm_iommu_pagetable_destroy() 242 mutex_unlock(&iommu->init_lock); in msm_iommu_pagetable_destroy() 269 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_get_geometry() local 271 return &iommu->domain->geometry; in msm_iommu_get_geometry() [all …]
|