Lines Matching +full:iommu +full:- +full:base

1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 // Copyright (C) 2016-2018, Allwinner Technology CO., LTD.
3 // Copyright (C) 2019-2020, Cerno
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
14 #include <linux/iommu.h>
29 #include "iommu-pages.h"
101 struct iommu_device iommu; member
103 /* Lock to modify the IOMMU registers */
107 void __iomem *base; member
125 struct sun50i_iommu *iommu; member
138 static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset) in iommu_read() argument
140 return readl(iommu->base + offset); in iommu_read()
143 static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value) in iommu_write() argument
145 writel(value, iommu->base + offset); in iommu_write()
149 * The Allwinner H6 IOMMU uses a 2-level page table.
152 * 4096 4-bytes Directory Table Entries (DTE), each pointing to a Page
155 * Each PT consits of 256 4-bytes Page Table Entries (PTE), each
158 * The IOMMU supports a single DT, pointed by the IOMMU_TTB_REG
185 * +---------------------+-----------+-+
187 * +---------------------+-----------+-+
188 * 31:10 - Page Table address
189 * 9:2 - Reserved
190 * 1:0 - 1 if the entry is valid
215 * +----------------+-----+-----+-----+---+-----+
217 * +----------------+-----+-----+-----+---+-----+
218 * 31:12 - Page address
219 * 11:8 - Reserved
220 * 7:4 - Authority Control Index
221 * 3:2 - Reserved
222 * 1 - 1 if the entry is valid
223 * 0 - Reserved
225 * The way permissions work is that the IOMMU has 16 "domains" that
229 * IOMMU_DM_AUT_CTRL_REG are only read-only, so it's not really
233 * affected to, so that we can actually enforce them on a per-page
236 * In order to make it work with the IOMMU framework, we will be using
239 * have each master setup in the same way, since the IOMMU framework
240 * doesn't seem to restrict page access on a per-device basis. And
294 struct sun50i_iommu *iommu = sun50i_domain->iommu; in sun50i_table_flush() local
298 dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE); in sun50i_table_flush()
301 static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu, in sun50i_iommu_zap_iova() argument
307 iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova); in sun50i_iommu_zap_iova()
308 iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, GENMASK(31, 12)); in sun50i_iommu_zap_iova()
309 iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG, in sun50i_iommu_zap_iova()
312 ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_IVLD_ENABLE_REG, in sun50i_iommu_zap_iova()
315 dev_warn(iommu->dev, "TLB invalidation timed out!\n"); in sun50i_iommu_zap_iova()
318 static void sun50i_iommu_zap_ptw_cache(struct sun50i_iommu *iommu, in sun50i_iommu_zap_ptw_cache() argument
324 iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova); in sun50i_iommu_zap_ptw_cache()
325 iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG, in sun50i_iommu_zap_ptw_cache()
328 ret = readl_poll_timeout_atomic(iommu->base + IOMMU_PC_IVLD_ENABLE_REG, in sun50i_iommu_zap_ptw_cache()
331 dev_warn(iommu->dev, "PTW cache invalidation timed out!\n"); in sun50i_iommu_zap_ptw_cache()
334 static void sun50i_iommu_zap_range(struct sun50i_iommu *iommu, in sun50i_iommu_zap_range() argument
337 assert_spin_locked(&iommu->iommu_lock); in sun50i_iommu_zap_range()
339 iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0); in sun50i_iommu_zap_range()
341 sun50i_iommu_zap_iova(iommu, iova); in sun50i_iommu_zap_range()
342 sun50i_iommu_zap_iova(iommu, iova + SPAGE_SIZE); in sun50i_iommu_zap_range()
344 sun50i_iommu_zap_iova(iommu, iova + size); in sun50i_iommu_zap_range()
345 sun50i_iommu_zap_iova(iommu, iova + size + SPAGE_SIZE); in sun50i_iommu_zap_range()
347 sun50i_iommu_zap_ptw_cache(iommu, iova); in sun50i_iommu_zap_range()
348 sun50i_iommu_zap_ptw_cache(iommu, iova + SZ_1M); in sun50i_iommu_zap_range()
350 sun50i_iommu_zap_ptw_cache(iommu, iova + size); in sun50i_iommu_zap_range()
351 sun50i_iommu_zap_ptw_cache(iommu, iova + size + SZ_1M); in sun50i_iommu_zap_range()
354 iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE); in sun50i_iommu_zap_range()
357 static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu) in sun50i_iommu_flush_all_tlb() argument
362 assert_spin_locked(&iommu->iommu_lock); in sun50i_iommu_flush_all_tlb()
364 iommu_write(iommu, in sun50i_iommu_flush_all_tlb()
375 ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG, in sun50i_iommu_flush_all_tlb()
379 dev_warn(iommu->dev, "TLB Flush timed out!\n"); in sun50i_iommu_flush_all_tlb()
387 struct sun50i_iommu *iommu = sun50i_domain->iommu; in sun50i_iommu_flush_iotlb_all() local
392 * .probe_device, and since we link our (single) domain to our iommu in in sun50i_iommu_flush_iotlb_all()
398 if (!iommu) in sun50i_iommu_flush_iotlb_all()
401 spin_lock_irqsave(&iommu->iommu_lock, flags); in sun50i_iommu_flush_iotlb_all()
402 sun50i_iommu_flush_all_tlb(iommu); in sun50i_iommu_flush_iotlb_all()
403 spin_unlock_irqrestore(&iommu->iommu_lock, flags); in sun50i_iommu_flush_iotlb_all()
410 struct sun50i_iommu *iommu = sun50i_domain->iommu; in sun50i_iommu_iotlb_sync_map() local
413 spin_lock_irqsave(&iommu->iommu_lock, flags); in sun50i_iommu_iotlb_sync_map()
414 sun50i_iommu_zap_range(iommu, iova, size); in sun50i_iommu_iotlb_sync_map()
415 spin_unlock_irqrestore(&iommu->iommu_lock, flags); in sun50i_iommu_iotlb_sync_map()
426 static int sun50i_iommu_enable(struct sun50i_iommu *iommu) in sun50i_iommu_enable() argument
432 if (!iommu->domain) in sun50i_iommu_enable()
435 sun50i_domain = to_sun50i_domain(iommu->domain); in sun50i_iommu_enable()
437 ret = reset_control_deassert(iommu->reset); in sun50i_iommu_enable()
441 ret = clk_prepare_enable(iommu->clk); in sun50i_iommu_enable()
445 spin_lock_irqsave(&iommu->iommu_lock, flags); in sun50i_iommu_enable()
447 iommu_write(iommu, IOMMU_TTB_REG, sun50i_domain->dt_dma); in sun50i_iommu_enable()
448 iommu_write(iommu, IOMMU_TLB_PREFETCH_REG, in sun50i_iommu_enable()
455 iommu_write(iommu, IOMMU_BYPASS_REG, 0); in sun50i_iommu_enable()
456 iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK); in sun50i_iommu_enable()
457 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE), in sun50i_iommu_enable()
471 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD), in sun50i_iommu_enable()
479 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR), in sun50i_iommu_enable()
487 ret = sun50i_iommu_flush_all_tlb(iommu); in sun50i_iommu_enable()
489 spin_unlock_irqrestore(&iommu->iommu_lock, flags); in sun50i_iommu_enable()
493 iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE); in sun50i_iommu_enable()
494 iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE_ENABLE); in sun50i_iommu_enable()
496 spin_unlock_irqrestore(&iommu->iommu_lock, flags); in sun50i_iommu_enable()
501 clk_disable_unprepare(iommu->clk); in sun50i_iommu_enable()
504 reset_control_assert(iommu->reset); in sun50i_iommu_enable()
509 static void sun50i_iommu_disable(struct sun50i_iommu *iommu) in sun50i_iommu_disable() argument
513 spin_lock_irqsave(&iommu->iommu_lock, flags); in sun50i_iommu_disable()
515 iommu_write(iommu, IOMMU_ENABLE_REG, 0); in sun50i_iommu_disable()
516 iommu_write(iommu, IOMMU_TTB_REG, 0); in sun50i_iommu_disable()
518 spin_unlock_irqrestore(&iommu->iommu_lock, flags); in sun50i_iommu_disable()
520 clk_disable_unprepare(iommu->clk); in sun50i_iommu_disable()
521 reset_control_assert(iommu->reset); in sun50i_iommu_disable()
524 static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu *iommu, in sun50i_iommu_alloc_page_table() argument
530 page_table = kmem_cache_zalloc(iommu->pt_pool, gfp); in sun50i_iommu_alloc_page_table()
532 return ERR_PTR(-ENOMEM); in sun50i_iommu_alloc_page_table()
534 pt_dma = dma_map_single(iommu->dev, page_table, PT_SIZE, DMA_TO_DEVICE); in sun50i_iommu_alloc_page_table()
535 if (dma_mapping_error(iommu->dev, pt_dma)) { in sun50i_iommu_alloc_page_table()
536 dev_err(iommu->dev, "Couldn't map L2 Page Table\n"); in sun50i_iommu_alloc_page_table()
537 kmem_cache_free(iommu->pt_pool, page_table); in sun50i_iommu_alloc_page_table()
538 return ERR_PTR(-ENOMEM); in sun50i_iommu_alloc_page_table()
547 static void sun50i_iommu_free_page_table(struct sun50i_iommu *iommu, in sun50i_iommu_free_page_table() argument
552 dma_unmap_single(iommu->dev, pt_phys, PT_SIZE, DMA_TO_DEVICE); in sun50i_iommu_free_page_table()
553 kmem_cache_free(iommu->pt_pool, page_table); in sun50i_iommu_free_page_table()
559 struct sun50i_iommu *iommu = sun50i_domain->iommu; in sun50i_dte_get_page_table() local
565 dte_addr = &sun50i_domain->dt[sun50i_iova_get_dte_index(iova)]; in sun50i_dte_get_page_table()
572 page_table = sun50i_iommu_alloc_page_table(iommu, gfp); in sun50i_dte_get_page_table()
586 sun50i_iommu_free_page_table(iommu, drop_pt); in sun50i_dte_get_page_table()
600 struct sun50i_iommu *iommu = sun50i_domain->iommu; in sun50i_iommu_map() local
605 /* the IOMMU can only handle 32-bit addresses, both input and output */ in sun50i_iommu_map()
607 ret = -EINVAL; in sun50i_iommu_map()
608 dev_warn_once(iommu->dev, in sun50i_iommu_map()
623 dev_err(iommu->dev, in sun50i_iommu_map()
626 ret = -EBUSY; in sun50i_iommu_map()
646 dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)]; in sun50i_iommu_unmap()
670 dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)]; in sun50i_iommu_iova_to_phys()
693 sun50i_domain->dt = in sun50i_iommu_domain_alloc_paging()
695 if (!sun50i_domain->dt) in sun50i_iommu_domain_alloc_paging()
698 refcount_set(&sun50i_domain->refcnt, 1); in sun50i_iommu_domain_alloc_paging()
700 sun50i_domain->domain.pgsize_bitmap = SZ_4K; in sun50i_iommu_domain_alloc_paging()
702 sun50i_domain->domain.geometry.aperture_start = 0; in sun50i_iommu_domain_alloc_paging()
703 sun50i_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); in sun50i_iommu_domain_alloc_paging()
704 sun50i_domain->domain.geometry.force_aperture = true; in sun50i_iommu_domain_alloc_paging()
706 return &sun50i_domain->domain; in sun50i_iommu_domain_alloc_paging()
718 iommu_free_pages(sun50i_domain->dt); in sun50i_iommu_domain_free()
719 sun50i_domain->dt = NULL; in sun50i_iommu_domain_free()
724 static int sun50i_iommu_attach_domain(struct sun50i_iommu *iommu, in sun50i_iommu_attach_domain() argument
727 iommu->domain = &sun50i_domain->domain; in sun50i_iommu_attach_domain()
728 sun50i_domain->iommu = iommu; in sun50i_iommu_attach_domain()
730 sun50i_domain->dt_dma = dma_map_single(iommu->dev, sun50i_domain->dt, in sun50i_iommu_attach_domain()
732 if (dma_mapping_error(iommu->dev, sun50i_domain->dt_dma)) { in sun50i_iommu_attach_domain()
733 dev_err(iommu->dev, "Couldn't map L1 Page Table\n"); in sun50i_iommu_attach_domain()
734 return -ENOMEM; in sun50i_iommu_attach_domain()
737 return sun50i_iommu_enable(iommu); in sun50i_iommu_attach_domain()
740 static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu, in sun50i_iommu_detach_domain() argument
751 dte_addr = &sun50i_domain->dt[i]; in sun50i_iommu_detach_domain()
761 sun50i_iommu_free_page_table(iommu, page_table); in sun50i_iommu_detach_domain()
765 sun50i_iommu_disable(iommu); in sun50i_iommu_detach_domain()
767 dma_unmap_single(iommu->dev, virt_to_phys(sun50i_domain->dt), in sun50i_iommu_detach_domain()
770 iommu->domain = NULL; in sun50i_iommu_detach_domain()
776 struct sun50i_iommu *iommu = dev_iommu_priv_get(dev); in sun50i_iommu_identity_attach() local
779 dev_dbg(dev, "Detaching from IOMMU domain\n"); in sun50i_iommu_identity_attach()
781 if (iommu->domain == identity_domain) in sun50i_iommu_identity_attach()
784 sun50i_domain = to_sun50i_domain(iommu->domain); in sun50i_iommu_identity_attach()
785 if (refcount_dec_and_test(&sun50i_domain->refcnt)) in sun50i_iommu_identity_attach()
786 sun50i_iommu_detach_domain(iommu, sun50i_domain); in sun50i_iommu_identity_attach()
803 struct sun50i_iommu *iommu; in sun50i_iommu_attach_device() local
805 iommu = sun50i_iommu_from_dev(dev); in sun50i_iommu_attach_device()
806 if (!iommu) in sun50i_iommu_attach_device()
807 return -ENODEV; in sun50i_iommu_attach_device()
809 dev_dbg(dev, "Attaching to IOMMU domain\n"); in sun50i_iommu_attach_device()
811 refcount_inc(&sun50i_domain->refcnt); in sun50i_iommu_attach_device()
813 if (iommu->domain == domain) in sun50i_iommu_attach_device()
818 sun50i_iommu_attach_domain(iommu, sun50i_domain); in sun50i_iommu_attach_device()
825 struct sun50i_iommu *iommu; in sun50i_iommu_probe_device() local
827 iommu = sun50i_iommu_from_dev(dev); in sun50i_iommu_probe_device()
828 if (!iommu) in sun50i_iommu_probe_device()
829 return ERR_PTR(-ENODEV); in sun50i_iommu_probe_device()
831 return &iommu->iommu; in sun50i_iommu_probe_device()
837 struct platform_device *iommu_pdev = of_find_device_by_node(args->np); in sun50i_iommu_of_xlate()
838 unsigned id = args->args[0]; in sun50i_iommu_of_xlate()
863 static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu, in sun50i_iommu_report_fault() argument
867 dev_err(iommu->dev, "Page fault for %pad (master %d, dir %s)\n", in sun50i_iommu_report_fault()
870 if (iommu->domain) in sun50i_iommu_report_fault()
871 report_iommu_fault(iommu->domain, iommu->dev, iova, prot); in sun50i_iommu_report_fault()
873 dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n"); in sun50i_iommu_report_fault()
875 sun50i_iommu_zap_range(iommu, iova, SPAGE_SIZE); in sun50i_iommu_report_fault()
878 static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu, in sun50i_iommu_handle_pt_irq() argument
886 assert_spin_locked(&iommu->iommu_lock); in sun50i_iommu_handle_pt_irq()
888 iova = iommu_read(iommu, addr_reg); in sun50i_iommu_handle_pt_irq()
889 blame = iommu_read(iommu, blame_reg); in sun50i_iommu_handle_pt_irq()
897 sun50i_iommu_report_fault(iommu, master, iova, IOMMU_FAULT_READ); in sun50i_iommu_handle_pt_irq()
902 static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu) in sun50i_iommu_handle_perm_irq() argument
910 assert_spin_locked(&iommu->iommu_lock); in sun50i_iommu_handle_perm_irq()
912 blame = iommu_read(iommu, IOMMU_INT_STA_REG); in sun50i_iommu_handle_perm_irq()
914 iova = iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG(master)); in sun50i_iommu_handle_perm_irq()
915 aci = sun50i_get_pte_aci(iommu_read(iommu, in sun50i_iommu_handle_perm_irq()
920 * If we are in the read-only domain, then it means we in sun50i_iommu_handle_perm_irq()
928 * If we are in the write-only domain, then it means in sun50i_iommu_handle_perm_irq()
952 sun50i_iommu_report_fault(iommu, master, iova, dir); in sun50i_iommu_handle_perm_irq()
960 struct sun50i_iommu *iommu = dev_id; in sun50i_iommu_irq() local
962 spin_lock(&iommu->iommu_lock); in sun50i_iommu_irq()
964 status = iommu_read(iommu, IOMMU_INT_STA_REG); in sun50i_iommu_irq()
966 spin_unlock(&iommu->iommu_lock); in sun50i_iommu_irq()
970 l1_status = iommu_read(iommu, IOMMU_L1PG_INT_REG); in sun50i_iommu_irq()
971 l2_status = iommu_read(iommu, IOMMU_L2PG_INT_REG); in sun50i_iommu_irq()
974 sun50i_iommu_handle_pt_irq(iommu, in sun50i_iommu_irq()
978 sun50i_iommu_handle_pt_irq(iommu, in sun50i_iommu_irq()
982 sun50i_iommu_handle_perm_irq(iommu); in sun50i_iommu_irq()
984 iommu_write(iommu, IOMMU_INT_CLR_REG, status); in sun50i_iommu_irq()
987 iommu_write(iommu, IOMMU_RESET_REG, ~resets); in sun50i_iommu_irq()
988 iommu_write(iommu, IOMMU_RESET_REG, IOMMU_RESET_RELEASE_ALL); in sun50i_iommu_irq()
990 spin_unlock(&iommu->iommu_lock); in sun50i_iommu_irq()
997 struct sun50i_iommu *iommu; in sun50i_iommu_probe() local
1000 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL); in sun50i_iommu_probe()
1001 if (!iommu) in sun50i_iommu_probe()
1002 return -ENOMEM; in sun50i_iommu_probe()
1003 spin_lock_init(&iommu->iommu_lock); in sun50i_iommu_probe()
1004 iommu->domain = &sun50i_iommu_identity_domain; in sun50i_iommu_probe()
1005 platform_set_drvdata(pdev, iommu); in sun50i_iommu_probe()
1006 iommu->dev = &pdev->dev; in sun50i_iommu_probe()
1008 iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev), in sun50i_iommu_probe()
1012 if (!iommu->pt_pool) in sun50i_iommu_probe()
1013 return -ENOMEM; in sun50i_iommu_probe()
1015 iommu->base = devm_platform_ioremap_resource(pdev, 0); in sun50i_iommu_probe()
1016 if (IS_ERR(iommu->base)) { in sun50i_iommu_probe()
1017 ret = PTR_ERR(iommu->base); in sun50i_iommu_probe()
1027 iommu->clk = devm_clk_get(&pdev->dev, NULL); in sun50i_iommu_probe()
1028 if (IS_ERR(iommu->clk)) { in sun50i_iommu_probe()
1029 dev_err(&pdev->dev, "Couldn't get our clock.\n"); in sun50i_iommu_probe()
1030 ret = PTR_ERR(iommu->clk); in sun50i_iommu_probe()
1034 iommu->reset = devm_reset_control_get(&pdev->dev, NULL); in sun50i_iommu_probe()
1035 if (IS_ERR(iommu->reset)) { in sun50i_iommu_probe()
1036 dev_err(&pdev->dev, "Couldn't get our reset line.\n"); in sun50i_iommu_probe()
1037 ret = PTR_ERR(iommu->reset); in sun50i_iommu_probe()
1041 ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev, in sun50i_iommu_probe()
1042 NULL, dev_name(&pdev->dev)); in sun50i_iommu_probe()
1046 ret = iommu_device_register(&iommu->iommu, &sun50i_iommu_ops, &pdev->dev); in sun50i_iommu_probe()
1050 ret = devm_request_irq(&pdev->dev, irq, sun50i_iommu_irq, 0, in sun50i_iommu_probe()
1051 dev_name(&pdev->dev), iommu); in sun50i_iommu_probe()
1058 iommu_device_unregister(&iommu->iommu); in sun50i_iommu_probe()
1061 iommu_device_sysfs_remove(&iommu->iommu); in sun50i_iommu_probe()
1064 kmem_cache_destroy(iommu->pt_pool); in sun50i_iommu_probe()
1070 { .compatible = "allwinner,sun50i-h6-iommu", },
1071 { .compatible = "allwinner,sun50i-h616-iommu", },
1078 .name = "sun50i-iommu",
1085 MODULE_DESCRIPTION("Allwinner H6 IOMMU driver");