Lines Matching +full:protection +full:- +full:domain

1 // SPDX-License-Identifier: GPL-2.0-only
12 #include <linux/dma-mapping.h>
25 #include "dma-iommu.h"
26 #include "iommu-pages.h"
41 #define SECT_MASK (~(SECT_SIZE - 1))
42 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
43 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
58 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
65 static short PG_ENT_SHIFT = -1;
101 #define section_offs(iova) (iova & (SECT_SIZE - 1))
103 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
105 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
117 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); in lv2ent_offset()
155 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
159 /* v1.x - v3.x registers */
206 { REG_AR_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_READ },
207 { REG_AW_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_WRITE },
209 { REG_AR_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_READ },
210 { REG_AR_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_READ },
211 { REG_AW_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_WRITE },
212 { REG_AW_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_WRITE },
219 "MULTI-HIT",
220 "ACCESS PROTECTION",
221 "SECURITY PROTECTION"
227 "ACCESS PROTECTION",
232 * This structure is attached to dev->iommu->priv of the master device
239 struct iommu_domain *domain; /* domain this device is attached */ member
246 * been attached to this domain and page tables of IO address space defined by
247 * it. It is usually referenced by 'domain' pointer.
255 struct iommu_domain domain; /* generic domain data structure */ member
298 struct exynos_iommu_domain *domain; /* domain we belong to */ member
299 struct list_head domain_node; /* node for domain clients list */
311 #define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg)
320 return -ENXIO; in exynos_sysmmu_v1_get_fault_info()
323 fault->addr = readl(data->sfrbase + finfo->addr_reg); in exynos_sysmmu_v1_get_fault_info()
324 fault->name = finfo->name; in exynos_sysmmu_v1_get_fault_info()
325 fault->type = finfo->type; in exynos_sysmmu_v1_get_fault_info()
337 fault->type = IOMMU_FAULT_READ; in exynos_sysmmu_v5_get_fault_info()
340 fault->type = IOMMU_FAULT_WRITE; in exynos_sysmmu_v5_get_fault_info()
342 itype -= 16; in exynos_sysmmu_v5_get_fault_info()
344 return -ENXIO; in exynos_sysmmu_v5_get_fault_info()
347 fault->name = sysmmu_v5_fault_names[itype]; in exynos_sysmmu_v5_get_fault_info()
348 fault->addr = readl(data->sfrbase + addr_reg); in exynos_sysmmu_v5_get_fault_info()
359 fault->addr = readl(SYSMMU_REG(data, fault_va)); in exynos_sysmmu_v7_get_fault_info()
360 fault->name = sysmmu_v7_fault_names[itype % 4]; in exynos_sysmmu_v7_get_fault_info()
361 fault->type = (info & BIT(20)) ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; in exynos_sysmmu_v7_get_fault_info()
391 /* SysMMU v7: non-VM capable register layout */
425 return container_of(dom, struct exynos_iommu_domain, domain); in to_exynos_domain()
430 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); in sysmmu_unblock()
437 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); in sysmmu_block()
438 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1)) in sysmmu_block()
439 --i; in sysmmu_block()
441 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) { in sysmmu_block()
459 if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) { in __sysmmu_tlb_invalidate_entry()
467 writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE, in __sysmmu_tlb_invalidate_entry()
477 if (MMU_MAJ_VER(data->version) < 5) in __sysmmu_set_ptbase()
488 BUG_ON(clk_prepare_enable(data->clk_master)); in __sysmmu_enable_clocks()
489 BUG_ON(clk_prepare_enable(data->clk)); in __sysmmu_enable_clocks()
490 BUG_ON(clk_prepare_enable(data->pclk)); in __sysmmu_enable_clocks()
491 BUG_ON(clk_prepare_enable(data->aclk)); in __sysmmu_enable_clocks()
496 clk_disable_unprepare(data->aclk); in __sysmmu_disable_clocks()
497 clk_disable_unprepare(data->pclk); in __sysmmu_disable_clocks()
498 clk_disable_unprepare(data->clk); in __sysmmu_disable_clocks()
499 clk_disable_unprepare(data->clk_master); in __sysmmu_disable_clocks()
504 u32 capa0 = readl(data->sfrbase + REG_V7_CAPA0); in __sysmmu_has_capa1()
511 u32 capa1 = readl(data->sfrbase + REG_V7_CAPA1); in __sysmmu_get_vcr()
513 data->has_vcr = capa1 & CAPA1_VCR_ENABLED; in __sysmmu_get_vcr()
522 ver = readl(data->sfrbase + REG_MMU_VERSION); in __sysmmu_get_version()
526 data->version = MAKE_MMU_VER(1, 0); in __sysmmu_get_version()
528 data->version = MMU_RAW_VER(ver); in __sysmmu_get_version()
530 dev_dbg(data->sysmmu, "hardware version: %d.%d\n", in __sysmmu_get_version()
531 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version)); in __sysmmu_get_version()
533 if (MMU_MAJ_VER(data->version) < 5) { in __sysmmu_get_version()
534 data->variant = &sysmmu_v1_variant; in __sysmmu_get_version()
535 } else if (MMU_MAJ_VER(data->version) < 7) { in __sysmmu_get_version()
536 data->variant = &sysmmu_v5_variant; in __sysmmu_get_version()
540 if (data->has_vcr) in __sysmmu_get_version()
541 data->variant = &sysmmu_v7_vm_variant; in __sysmmu_get_version()
543 data->variant = &sysmmu_v7_variant; in __sysmmu_get_version()
554 dev_err(data->sysmmu, "%s: [%s] %s FAULT occurred at %#x\n", in show_fault_information()
555 dev_name(data->master), in show_fault_information()
556 fault->type == IOMMU_FAULT_READ ? "READ" : "WRITE", in show_fault_information()
557 fault->name, fault->addr); in show_fault_information()
558 dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable); in show_fault_information()
559 ent = section_entry(phys_to_virt(data->pgtable), fault->addr); in show_fault_information()
560 dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent); in show_fault_information()
562 ent = page_entry(ent, fault->addr); in show_fault_information()
563 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); in show_fault_information()
572 int ret = -ENOSYS; in exynos_sysmmu_irq()
574 WARN_ON(!data->active); in exynos_sysmmu_irq()
576 spin_lock(&data->lock); in exynos_sysmmu_irq()
577 clk_enable(data->clk_master); in exynos_sysmmu_irq()
580 ret = data->variant->get_fault_info(data, itype, &fault); in exynos_sysmmu_irq()
582 dev_err(data->sysmmu, "Unhandled interrupt bit %u\n", itype); in exynos_sysmmu_irq()
587 if (data->domain) { in exynos_sysmmu_irq()
588 ret = report_iommu_fault(&data->domain->domain, data->master, in exynos_sysmmu_irq()
599 clk_disable(data->clk_master); in exynos_sysmmu_irq()
600 spin_unlock(&data->lock); in exynos_sysmmu_irq()
609 clk_enable(data->clk_master); in __sysmmu_disable()
611 spin_lock_irqsave(&data->lock, flags); in __sysmmu_disable()
612 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL); in __sysmmu_disable()
613 writel(0, data->sfrbase + REG_MMU_CFG); in __sysmmu_disable()
614 data->active = false; in __sysmmu_disable()
615 spin_unlock_irqrestore(&data->lock, flags); in __sysmmu_disable()
624 if (data->version <= MAKE_MMU_VER(3, 1)) in __sysmmu_init_config()
626 else if (data->version <= MAKE_MMU_VER(3, 2)) in __sysmmu_init_config()
631 cfg |= CFG_EAP; /* enable access protection bits check */ in __sysmmu_init_config()
633 writel(cfg, data->sfrbase + REG_MMU_CFG); in __sysmmu_init_config()
640 if (MMU_MAJ_VER(data->version) < 7 || !data->has_vcr) in __sysmmu_enable_vid()
643 ctrl = readl(data->sfrbase + REG_V7_CTRL_VM); in __sysmmu_enable_vid()
645 writel(ctrl, data->sfrbase + REG_V7_CTRL_VM); in __sysmmu_enable_vid()
654 spin_lock_irqsave(&data->lock, flags); in __sysmmu_enable()
655 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); in __sysmmu_enable()
657 __sysmmu_set_ptbase(data, data->pgtable); in __sysmmu_enable()
659 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); in __sysmmu_enable()
660 data->active = true; in __sysmmu_enable()
661 spin_unlock_irqrestore(&data->lock, flags); in __sysmmu_enable()
669 clk_disable(data->clk_master); in __sysmmu_enable()
677 spin_lock_irqsave(&data->lock, flags); in sysmmu_tlb_invalidate_flpdcache()
678 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { in sysmmu_tlb_invalidate_flpdcache()
679 clk_enable(data->clk_master); in sysmmu_tlb_invalidate_flpdcache()
681 if (data->version >= MAKE_MMU_VER(5, 0)) in sysmmu_tlb_invalidate_flpdcache()
687 clk_disable(data->clk_master); in sysmmu_tlb_invalidate_flpdcache()
689 spin_unlock_irqrestore(&data->lock, flags); in sysmmu_tlb_invalidate_flpdcache()
697 spin_lock_irqsave(&data->lock, flags); in sysmmu_tlb_invalidate_entry()
698 if (data->active) { in sysmmu_tlb_invalidate_entry()
701 clk_enable(data->clk_master); in sysmmu_tlb_invalidate_entry()
708 * because it is set-associative TLB in sysmmu_tlb_invalidate_entry()
709 * with 8-way and 64 sets. in sysmmu_tlb_invalidate_entry()
713 if (MMU_MAJ_VER(data->version) == 2) in sysmmu_tlb_invalidate_entry()
720 clk_disable(data->clk_master); in sysmmu_tlb_invalidate_entry()
722 spin_unlock_irqrestore(&data->lock, flags); in sysmmu_tlb_invalidate_entry()
730 struct device *dev = &pdev->dev; in exynos_sysmmu_probe()
736 return -ENOMEM; in exynos_sysmmu_probe()
739 data->sfrbase = devm_ioremap_resource(dev, res); in exynos_sysmmu_probe()
740 if (IS_ERR(data->sfrbase)) in exynos_sysmmu_probe()
741 return PTR_ERR(data->sfrbase); in exynos_sysmmu_probe()
754 data->clk = devm_clk_get_optional(dev, "sysmmu"); in exynos_sysmmu_probe()
755 if (IS_ERR(data->clk)) in exynos_sysmmu_probe()
756 return PTR_ERR(data->clk); in exynos_sysmmu_probe()
758 data->aclk = devm_clk_get_optional(dev, "aclk"); in exynos_sysmmu_probe()
759 if (IS_ERR(data->aclk)) in exynos_sysmmu_probe()
760 return PTR_ERR(data->aclk); in exynos_sysmmu_probe()
762 data->pclk = devm_clk_get_optional(dev, "pclk"); in exynos_sysmmu_probe()
763 if (IS_ERR(data->pclk)) in exynos_sysmmu_probe()
764 return PTR_ERR(data->pclk); in exynos_sysmmu_probe()
766 if (!data->clk && (!data->aclk || !data->pclk)) { in exynos_sysmmu_probe()
768 return -ENOSYS; in exynos_sysmmu_probe()
771 data->clk_master = devm_clk_get_optional(dev, "master"); in exynos_sysmmu_probe()
772 if (IS_ERR(data->clk_master)) in exynos_sysmmu_probe()
773 return PTR_ERR(data->clk_master); in exynos_sysmmu_probe()
775 data->sysmmu = dev; in exynos_sysmmu_probe()
776 spin_lock_init(&data->lock); in exynos_sysmmu_probe()
780 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, in exynos_sysmmu_probe()
781 dev_name(data->sysmmu)); in exynos_sysmmu_probe()
788 if (MMU_MAJ_VER(data->version) < 5) { in exynos_sysmmu_probe()
799 if (MMU_MAJ_VER(data->version) >= 5) { in exynos_sysmmu_probe()
812 dma_dev = &pdev->dev; in exynos_sysmmu_probe()
816 ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev); in exynos_sysmmu_probe()
823 iommu_device_sysfs_remove(&data->iommu); in exynos_sysmmu_probe()
830 struct device *master = data->master; in exynos_sysmmu_suspend()
835 mutex_lock(&owner->rpm_lock); in exynos_sysmmu_suspend()
836 if (data->domain) { in exynos_sysmmu_suspend()
837 dev_dbg(data->sysmmu, "saving state\n"); in exynos_sysmmu_suspend()
840 mutex_unlock(&owner->rpm_lock); in exynos_sysmmu_suspend()
848 struct device *master = data->master; in exynos_sysmmu_resume()
853 mutex_lock(&owner->rpm_lock); in exynos_sysmmu_resume()
854 if (data->domain) { in exynos_sysmmu_resume()
855 dev_dbg(data->sysmmu, "restoring state\n"); in exynos_sysmmu_resume()
858 mutex_unlock(&owner->rpm_lock); in exynos_sysmmu_resume()
870 { .compatible = "samsung,exynos-sysmmu", },
877 .name = "exynos-sysmmu",
895 struct exynos_iommu_domain *domain; in exynos_iommu_domain_alloc_paging() local
902 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in exynos_iommu_domain_alloc_paging()
903 if (!domain) in exynos_iommu_domain_alloc_paging()
906 domain->pgtable = iommu_alloc_pages_sz(GFP_KERNEL, SZ_16K); in exynos_iommu_domain_alloc_paging()
907 if (!domain->pgtable) in exynos_iommu_domain_alloc_paging()
910 domain->lv2entcnt = iommu_alloc_pages_sz(GFP_KERNEL, SZ_8K); in exynos_iommu_domain_alloc_paging()
911 if (!domain->lv2entcnt) in exynos_iommu_domain_alloc_paging()
916 domain->pgtable[i] = ZERO_LV2LINK; in exynos_iommu_domain_alloc_paging()
918 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, in exynos_iommu_domain_alloc_paging()
921 BUG_ON(handle != virt_to_phys(domain->pgtable)); in exynos_iommu_domain_alloc_paging()
925 spin_lock_init(&domain->lock); in exynos_iommu_domain_alloc_paging()
926 spin_lock_init(&domain->pgtablelock); in exynos_iommu_domain_alloc_paging()
927 INIT_LIST_HEAD(&domain->clients); in exynos_iommu_domain_alloc_paging()
929 domain->domain.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE; in exynos_iommu_domain_alloc_paging()
931 domain->domain.geometry.aperture_start = 0; in exynos_iommu_domain_alloc_paging()
932 domain->domain.geometry.aperture_end = ~0UL; in exynos_iommu_domain_alloc_paging()
933 domain->domain.geometry.force_aperture = true; in exynos_iommu_domain_alloc_paging()
935 return &domain->domain; in exynos_iommu_domain_alloc_paging()
938 iommu_free_pages(domain->lv2entcnt); in exynos_iommu_domain_alloc_paging()
940 iommu_free_pages(domain->pgtable); in exynos_iommu_domain_alloc_paging()
942 kfree(domain); in exynos_iommu_domain_alloc_paging()
948 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_domain_free() local
953 WARN_ON(!list_empty(&domain->clients)); in exynos_iommu_domain_free()
955 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_domain_free()
957 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { in exynos_iommu_domain_free()
958 spin_lock(&data->lock); in exynos_iommu_domain_free()
960 data->pgtable = 0; in exynos_iommu_domain_free()
961 data->domain = NULL; in exynos_iommu_domain_free()
962 list_del_init(&data->domain_node); in exynos_iommu_domain_free()
963 spin_unlock(&data->lock); in exynos_iommu_domain_free()
966 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_domain_free()
968 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE, in exynos_iommu_domain_free()
972 if (lv1ent_page(domain->pgtable + i)) { in exynos_iommu_domain_free()
973 phys_addr_t base = lv2table_base(domain->pgtable + i); in exynos_iommu_domain_free()
981 iommu_free_pages(domain->pgtable); in exynos_iommu_domain_free()
982 iommu_free_pages(domain->lv2entcnt); in exynos_iommu_domain_free()
983 kfree(domain); in exynos_iommu_domain_free()
990 struct exynos_iommu_domain *domain; in exynos_iommu_identity_attach() local
995 if (owner->domain == identity_domain) in exynos_iommu_identity_attach()
998 domain = to_exynos_domain(owner->domain); in exynos_iommu_identity_attach()
999 pagetable = virt_to_phys(domain->pgtable); in exynos_iommu_identity_attach()
1001 mutex_lock(&owner->rpm_lock); in exynos_iommu_identity_attach()
1003 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_identity_attach()
1004 pm_runtime_get_noresume(data->sysmmu); in exynos_iommu_identity_attach()
1005 if (pm_runtime_active(data->sysmmu)) in exynos_iommu_identity_attach()
1007 pm_runtime_put(data->sysmmu); in exynos_iommu_identity_attach()
1010 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_identity_attach()
1011 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { in exynos_iommu_identity_attach()
1012 spin_lock(&data->lock); in exynos_iommu_identity_attach()
1013 data->pgtable = 0; in exynos_iommu_identity_attach()
1014 data->domain = NULL; in exynos_iommu_identity_attach()
1015 list_del_init(&data->domain_node); in exynos_iommu_identity_attach()
1016 spin_unlock(&data->lock); in exynos_iommu_identity_attach()
1018 owner->domain = identity_domain; in exynos_iommu_identity_attach()
1019 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_identity_attach()
1021 mutex_unlock(&owner->rpm_lock); in exynos_iommu_identity_attach()
1040 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_attach_device() local
1043 phys_addr_t pagetable = virt_to_phys(domain->pgtable); in exynos_iommu_attach_device()
1051 mutex_lock(&owner->rpm_lock); in exynos_iommu_attach_device()
1053 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_attach_device()
1054 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_attach_device()
1055 spin_lock(&data->lock); in exynos_iommu_attach_device()
1056 data->pgtable = pagetable; in exynos_iommu_attach_device()
1057 data->domain = domain; in exynos_iommu_attach_device()
1058 list_add_tail(&data->domain_node, &domain->clients); in exynos_iommu_attach_device()
1059 spin_unlock(&data->lock); in exynos_iommu_attach_device()
1061 owner->domain = iommu_domain; in exynos_iommu_attach_device()
1062 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_attach_device()
1064 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_attach_device()
1065 pm_runtime_get_noresume(data->sysmmu); in exynos_iommu_attach_device()
1066 if (pm_runtime_active(data->sysmmu)) in exynos_iommu_attach_device()
1068 pm_runtime_put(data->sysmmu); in exynos_iommu_attach_device()
1071 mutex_unlock(&owner->rpm_lock); in exynos_iommu_attach_device()
1079 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, in alloc_lv2entry() argument
1084 return ERR_PTR(-EADDRINUSE); in alloc_lv2entry()
1093 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1)); in alloc_lv2entry()
1095 return ERR_PTR(-ENOMEM); in alloc_lv2entry()
1104 return ERR_PTR(-EADDRINUSE); in alloc_lv2entry()
1108 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, in alloc_lv2entry()
1127 spin_lock(&domain->lock); in alloc_lv2entry()
1128 list_for_each_entry(data, &domain->clients, domain_node) in alloc_lv2entry()
1130 spin_unlock(&domain->lock); in alloc_lv2entry()
1137 static int lv1set_section(struct exynos_iommu_domain *domain, in lv1set_section() argument
1144 return -EADDRINUSE; in lv1set_section()
1151 return -EADDRINUSE; in lv1set_section()
1160 spin_lock(&domain->lock); in lv1set_section()
1167 list_for_each_entry(data, &domain->clients, domain_node) in lv1set_section()
1170 spin_unlock(&domain->lock); in lv1set_section()
1180 return -EADDRINUSE; in lv2set_page()
1183 *pgcnt -= 1; in lv2set_page()
1194 memset(pent - i, 0, sizeof(*pent) * i); in lv2set_page()
1195 return -EADDRINUSE; in lv2set_page()
1203 *pgcnt -= SPAGES_PER_LPAGE; in lv2set_page()
1210 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1231 * - Any two consecutive I/O virtual regions must have a hole of size larger
1233 * - Start address of an I/O virtual region must be aligned by 128KiB.
1239 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_map() local
1243 int ret = -ENOMEM; in exynos_iommu_map()
1245 BUG_ON(domain->pgtable == NULL); in exynos_iommu_map()
1248 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_map()
1250 entry = section_entry(domain->pgtable, iova); in exynos_iommu_map()
1253 ret = lv1set_section(domain, entry, iova, paddr, prot, in exynos_iommu_map()
1254 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1258 pent = alloc_lv2entry(domain, entry, iova, in exynos_iommu_map()
1259 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1265 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1274 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_map()
1279 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain, in exynos_iommu_tlb_invalidate_entry() argument
1285 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_tlb_invalidate_entry()
1287 list_for_each_entry(data, &domain->clients, domain_node) in exynos_iommu_tlb_invalidate_entry()
1290 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_tlb_invalidate_entry()
1297 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_unmap() local
1303 BUG_ON(domain->pgtable == NULL); in exynos_iommu_unmap()
1305 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1307 ent = section_entry(domain->pgtable, iova); in exynos_iommu_unmap()
1339 domain->lv2entcnt[lv1ent_offset(iova)] += 1; in exynos_iommu_unmap()
1357 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; in exynos_iommu_unmap()
1359 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1361 exynos_iommu_tlb_invalidate_entry(domain, iova, size); in exynos_iommu_unmap()
1365 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1376 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_iova_to_phys() local
1381 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_iova_to_phys()
1383 entry = section_entry(domain->pgtable, iova); in exynos_iommu_iova_to_phys()
1396 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_iova_to_phys()
1407 return ERR_PTR(-ENODEV); in exynos_iommu_probe_device()
1409 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_probe_device()
1415 data->link = device_link_add(dev, data->sysmmu, in exynos_iommu_probe_device()
1421 data = list_first_entry(&owner->controllers, in exynos_iommu_probe_device()
1424 return &data->iommu; in exynos_iommu_probe_device()
1434 list_for_each_entry(data, &owner->controllers, owner_node) in exynos_iommu_release_device()
1435 device_link_del(data->link); in exynos_iommu_release_device()
1441 struct platform_device *sysmmu = of_find_device_by_node(spec->np); in exynos_iommu_of_xlate()
1446 return -ENODEV; in exynos_iommu_of_xlate()
1450 put_device(&sysmmu->dev); in exynos_iommu_of_xlate()
1451 return -ENODEV; in exynos_iommu_of_xlate()
1457 put_device(&sysmmu->dev); in exynos_iommu_of_xlate()
1458 return -ENOMEM; in exynos_iommu_of_xlate()
1461 INIT_LIST_HEAD(&owner->controllers); in exynos_iommu_of_xlate()
1462 mutex_init(&owner->rpm_lock); in exynos_iommu_of_xlate()
1463 owner->domain = &exynos_identity_domain; in exynos_iommu_of_xlate()
1467 list_for_each_entry(entry, &owner->controllers, owner_node) in exynos_iommu_of_xlate()
1471 list_add_tail(&data->owner_node, &owner->controllers); in exynos_iommu_of_xlate()
1472 data->master = dev; in exynos_iommu_of_xlate()
1505 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", in exynos_iommu_init()
1509 return -ENOMEM; in exynos_iommu_init()
1516 ret = -ENOMEM; in exynos_iommu_init()