Lines Matching +full:page +full:- +full:based
1 // SPDX-License-Identifier: GPL-2.0-only
7 #include <linux/adreno-smmu-priv.h>
8 #include <linux/io-pgtable.h>
20 struct page *prr_page;
33 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
37 /** @root_page_table: Stores the root page table pointer. */
45 /* based on iommu_pgsize() in iommu.c: */
55 /* Page sizes supported by the hardware and small enough for @size */ in calc_pgsize()
56 pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0); in calc_pgsize()
58 /* Constrain the page sizes further based on the maximum alignment */ in calc_pgsize()
62 /* Make sure we have at least one suitable page size */ in calc_pgsize()
65 /* Pick the biggest page size remaining */ in calc_pgsize()
71 /* Find the next biggest support page size, if it exists */ in calc_pgsize()
72 pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); in calc_pgsize()
80 * There's no point trying a bigger page size unless the virtual in calc_pgsize()
81 * and physical addresses are similarly offset within the larger page. in calc_pgsize()
83 if ((iova ^ paddr) & (pgsize_next - 1)) in calc_pgsize()
86 /* Calculate the offset to the next page size alignment boundary */ in calc_pgsize()
87 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); in calc_pgsize()
90 * If size is big enough to accommodate the larger page, reduce in calc_pgsize()
105 struct io_pgtable_ops *ops = pagetable->pgtbl_ops; in msm_iommu_pagetable_unmap()
114 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); in msm_iommu_pagetable_unmap()
116 ret = -EINVAL; in msm_iommu_pagetable_unmap()
126 size -= unmapped; in msm_iommu_pagetable_unmap()
129 iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain); in msm_iommu_pagetable_unmap()
137 struct io_pgtable_ops *ops = pagetable->pgtbl_ops; in msm_iommu_pagetable_map_prr()
138 struct msm_iommu *iommu = to_msm_iommu(pagetable->parent); in msm_iommu_pagetable_map_prr()
139 phys_addr_t phys = page_to_phys(iommu->prr_page); in msm_iommu_pagetable_map_prr()
147 ret = ops->map_pages(ops, addr, phys, size, 1, prot, GFP_KERNEL, &mapped); in msm_iommu_pagetable_map_prr()
153 len -= mapped; in msm_iommu_pagetable_map_prr()
156 msm_iommu_pagetable_unmap(mmu, iova, addr - iova); in msm_iommu_pagetable_map_prr()
157 return -EINVAL; in msm_iommu_pagetable_map_prr()
169 struct io_pgtable_ops *ops = pagetable->pgtbl_ops; in msm_iommu_pagetable_map()
178 size_t size = sg->length; in msm_iommu_pagetable_map()
185 off -= size; in msm_iommu_pagetable_map()
190 size -= off; in msm_iommu_pagetable_map()
200 ret = ops->map_pages(ops, addr, phys, pgsize, count, in msm_iommu_pagetable_map()
208 size -= mapped; in msm_iommu_pagetable_map()
209 len -= mapped; in msm_iommu_pagetable_map()
212 msm_iommu_pagetable_unmap(mmu, iova, addr - iova); in msm_iommu_pagetable_map()
213 return -EINVAL; in msm_iommu_pagetable_map()
224 struct msm_iommu *iommu = to_msm_iommu(pagetable->parent); in msm_iommu_pagetable_destroy()
226 dev_get_drvdata(pagetable->parent->dev); in msm_iommu_pagetable_destroy()
230 * disable TTBR0 in the arm-smmu driver in msm_iommu_pagetable_destroy()
232 mutex_lock(&iommu->init_lock); in msm_iommu_pagetable_destroy()
233 if (--iommu->pagetables == 0) { in msm_iommu_pagetable_destroy()
234 adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL); in msm_iommu_pagetable_destroy()
236 if (adreno_smmu->set_prr_bit) { in msm_iommu_pagetable_destroy()
237 adreno_smmu->set_prr_bit(adreno_smmu->cookie, false); in msm_iommu_pagetable_destroy()
238 __free_page(iommu->prr_page); in msm_iommu_pagetable_destroy()
239 iommu->prr_page = NULL; in msm_iommu_pagetable_destroy()
242 mutex_unlock(&iommu->init_lock); in msm_iommu_pagetable_destroy()
244 free_io_pgtable_ops(pagetable->pgtbl_ops); in msm_iommu_pagetable_destroy()
253 if (mmu->type != MSM_MMU_IOMMU_PAGETABLE) in msm_iommu_pagetable_params()
254 return -EINVAL; in msm_iommu_pagetable_params()
259 *ttbr = pagetable->ttbr; in msm_iommu_pagetable_params()
262 *asid = pagetable->asid; in msm_iommu_pagetable_params()
271 return &iommu->domain->geometry; in msm_iommu_get_geometry()
280 if (mmu->type != MSM_MMU_IOMMU_PAGETABLE) in msm_iommu_pagetable_walk()
281 return -EINVAL; in msm_iommu_pagetable_walk()
285 if (!pagetable->pgtbl_ops->pgtable_walk) in msm_iommu_pagetable_walk()
286 return -EINVAL; in msm_iommu_pagetable_walk()
288 pagetable->pgtbl_ops->pgtable_walk(pagetable->pgtbl_ops, iova, &wd); in msm_iommu_pagetable_walk()
303 * L1, L2 and L3 page tables. in msm_iommu_pagetable_prealloc_count()
306 * 2M contiguous blocks, but it's simpler to over-provision and return in msm_iommu_pagetable_prealloc_count()
309 * The first level descriptor (v8 / v7-lpae page table format) encodes in msm_iommu_pagetable_prealloc_count()
313 …stem-Level-Architecture/Virtual-Memory-System-Architecture--VMSA-/Long-descriptor-translation-tabl… in msm_iommu_pagetable_prealloc_count()
315 pt_count = ((ALIGN(iova + len, 1ull << 39) - ALIGN_DOWN(iova, 1ull << 39)) >> 39) + in msm_iommu_pagetable_prealloc_count()
316 ((ALIGN(iova + len, 1ull << 30) - ALIGN_DOWN(iova, 1ull << 30)) >> 30) + in msm_iommu_pagetable_prealloc_count()
317 ((ALIGN(iova + len, 1ull << 21) - ALIGN_DOWN(iova, 1ull << 21)) >> 21); in msm_iommu_pagetable_prealloc_count()
319 p->count += pt_count; in msm_iommu_pagetable_prealloc_count()
326 return to_msm_iommu(pagetable->parent)->pt_cache; in get_pt_cache()
335 p->pages = kvmalloc_array(p->count, sizeof(p->pages), GFP_KERNEL); in msm_iommu_pagetable_prealloc_allocate()
336 if (!p->pages) in msm_iommu_pagetable_prealloc_allocate()
337 return -ENOMEM; in msm_iommu_pagetable_prealloc_allocate()
339 ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages); in msm_iommu_pagetable_prealloc_allocate()
340 if (ret != p->count) { in msm_iommu_pagetable_prealloc_allocate()
341 kfree(p->pages); in msm_iommu_pagetable_prealloc_allocate()
342 p->pages = NULL; in msm_iommu_pagetable_prealloc_allocate()
343 p->count = ret; in msm_iommu_pagetable_prealloc_allocate()
344 return -ENOMEM; in msm_iommu_pagetable_prealloc_allocate()
354 uint32_t remaining_pt_count = p->count - p->ptr; in msm_iommu_pagetable_prealloc_cleanup()
356 if (!p->pages) in msm_iommu_pagetable_prealloc_cleanup()
359 if (p->count > 0) in msm_iommu_pagetable_prealloc_cleanup()
360 trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count); in msm_iommu_pagetable_prealloc_cleanup()
362 kmem_cache_free_bulk(pt_cache, remaining_pt_count, &p->pages[p->ptr]); in msm_iommu_pagetable_prealloc_cleanup()
363 kvfree(p->pages); in msm_iommu_pagetable_prealloc_cleanup()
367 * alloc_pt() - Custom page table allocator
368 * @cookie: Cookie passed at page table allocation time.
369 * @size: Size of the page table. This size should be fixed,
370 * and determined at creation time based on the granule size.
373 * We want a custom allocator so we can use a cache for page table
374 * allocations and amortize the cost of the over-reservation that's
377 * Return: non-NULL on success, NULL if the allocation failed for any
384 struct msm_mmu_prealloc *p = pagetable->base.prealloc; in msm_iommu_pagetable_alloc_pt()
385 void *page; in msm_iommu_pagetable_alloc_pt() local
387 /* Allocation of the root page table happening during init. */ in msm_iommu_pagetable_alloc_pt()
388 if (unlikely(!pagetable->root_page_table)) { in msm_iommu_pagetable_alloc_pt()
389 struct page *p; in msm_iommu_pagetable_alloc_pt()
391 p = alloc_pages_node(dev_to_node(pagetable->iommu_dev), in msm_iommu_pagetable_alloc_pt()
393 page = p ? page_address(p) : NULL; in msm_iommu_pagetable_alloc_pt()
394 pagetable->root_page_table = page; in msm_iommu_pagetable_alloc_pt()
395 return page; in msm_iommu_pagetable_alloc_pt()
398 if (WARN_ON(!p) || WARN_ON(p->ptr >= p->count)) in msm_iommu_pagetable_alloc_pt()
401 page = p->pages[p->ptr++]; in msm_iommu_pagetable_alloc_pt()
402 memset(page, 0, size); in msm_iommu_pagetable_alloc_pt()
405 * Page table entries don't use virtual addresses, which trips out in msm_iommu_pagetable_alloc_pt()
410 * Let's just ignore memory passed to the page-table driver for now. in msm_iommu_pagetable_alloc_pt()
412 kmemleak_ignore(page); in msm_iommu_pagetable_alloc_pt()
414 return page; in msm_iommu_pagetable_alloc_pt()
419 * free_pt() - Custom page table free function
420 * @cookie: Cookie passed at page table allocation time.
421 * @data: Page table to free.
422 * @size: Size of the page table. This size should be fixed,
423 * and determined at creation time based on the granule size.
430 if (unlikely(pagetable->root_page_table == data)) { in msm_iommu_pagetable_free_pt()
432 pagetable->root_page_table = NULL; in msm_iommu_pagetable_free_pt()
436 kmem_cache_free(get_pt_cache(&pagetable->base), data); in msm_iommu_pagetable_free_pt()
453 if (!pm_runtime_get_if_in_use(pagetable->iommu_dev)) in msm_iommu_tlb_flush_all()
456 adreno_smmu = dev_get_drvdata(pagetable->parent->dev); in msm_iommu_tlb_flush_all()
458 pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie); in msm_iommu_tlb_flush_all()
460 pm_runtime_put_autosuspend(pagetable->iommu_dev); in msm_iommu_tlb_flush_all()
469 if (!pm_runtime_get_if_in_use(pagetable->iommu_dev)) in msm_iommu_tlb_flush_walk()
472 adreno_smmu = dev_get_drvdata(pagetable->parent->dev); in msm_iommu_tlb_flush_walk()
474 pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie); in msm_iommu_tlb_flush_walk()
476 pm_runtime_put_autosuspend(pagetable->iommu_dev); in msm_iommu_tlb_flush_walk()
497 pg_shift = __ffs(cfg->pgsize_bitmap); in get_tblsz()
499 bits_per_level = pg_shift - ilog2(sizeof(u64)); in get_tblsz()
506 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev); in msm_iommu_pagetable_create()
514 if (adreno_smmu->cookie) in msm_iommu_pagetable_create()
515 ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie); in msm_iommu_pagetable_create()
519 * qcom_smmu_impl_of_match[] in arm-smmu-qcom.c in msm_iommu_pagetable_create()
521 if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables")) in msm_iommu_pagetable_create()
522 return ERR_PTR(-ENODEV); in msm_iommu_pagetable_create()
526 return ERR_PTR(-ENOMEM); in msm_iommu_pagetable_create()
528 msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs, in msm_iommu_pagetable_create()
542 * With userspace managed VM (aka VM_BIND), we need to pre- in msm_iommu_pagetable_create()
544 * handing them to io-pgtable via custom alloc/free ops as in msm_iommu_pagetable_create()
551 * Restrict to single page granules. Otherwise we may run in msm_iommu_pagetable_create()
558 * Note that prior to commit 33729a5fc0ca ("iommu/io-pgtable-arm: in msm_iommu_pagetable_create()
560 * io-pgtable-arm. But this apparently does not work in msm_iommu_pagetable_create()
567 pagetable->iommu_dev = ttbr1_cfg->iommu_dev; in msm_iommu_pagetable_create()
568 pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, in msm_iommu_pagetable_create()
571 if (!pagetable->pgtbl_ops) { in msm_iommu_pagetable_create()
573 return ERR_PTR(-ENOMEM); in msm_iommu_pagetable_create()
578 * the arm-smmu driver as a trigger to set up TTBR0 in msm_iommu_pagetable_create()
580 mutex_lock(&iommu->init_lock); in msm_iommu_pagetable_create()
581 if (iommu->pagetables++ == 0) { in msm_iommu_pagetable_create()
582 ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg); in msm_iommu_pagetable_create()
584 iommu->pagetables--; in msm_iommu_pagetable_create()
585 mutex_unlock(&iommu->init_lock); in msm_iommu_pagetable_create()
586 free_io_pgtable_ops(pagetable->pgtbl_ops); in msm_iommu_pagetable_create()
591 BUG_ON(iommu->prr_page); in msm_iommu_pagetable_create()
592 if (adreno_smmu->set_prr_bit) { in msm_iommu_pagetable_create()
594 * We need a zero'd page for two reasons: in msm_iommu_pagetable_create()
602 * is in the PRR page. in msm_iommu_pagetable_create()
604 iommu->prr_page = alloc_page(GFP_KERNEL | __GFP_ZERO); in msm_iommu_pagetable_create()
605 adreno_smmu->set_prr_addr(adreno_smmu->cookie, in msm_iommu_pagetable_create()
606 page_to_phys(iommu->prr_page)); in msm_iommu_pagetable_create()
607 adreno_smmu->set_prr_bit(adreno_smmu->cookie, true); in msm_iommu_pagetable_create()
610 mutex_unlock(&iommu->init_lock); in msm_iommu_pagetable_create()
613 pagetable->parent = parent; in msm_iommu_pagetable_create()
614 pagetable->tlb = ttbr1_cfg->tlb; in msm_iommu_pagetable_create()
615 pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap; in msm_iommu_pagetable_create()
616 pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr; in msm_iommu_pagetable_create()
619 * TODO we would like each set of page tables to have a unique ASID in msm_iommu_pagetable_create()
624 pagetable->asid = 0; in msm_iommu_pagetable_create()
626 return &pagetable->base; in msm_iommu_pagetable_create()
633 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev); in msm_gpu_fault_handler()
636 if (adreno_smmu->get_fault_info) { in msm_gpu_fault_handler()
637 adreno_smmu->get_fault_info(adreno_smmu->cookie, &info); in msm_gpu_fault_handler()
641 if (iommu->base.handler) in msm_gpu_fault_handler()
642 return iommu->base.handler(iommu->base.arg, iova, flags, ptr); in msm_gpu_fault_handler()
654 if (iommu->base.handler) in msm_disp_fault_handler()
655 return iommu->base.handler(iommu->base.arg, iova, flags, NULL); in msm_disp_fault_handler()
657 return -ENOSYS; in msm_disp_fault_handler()
662 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev); in msm_iommu_set_stall()
664 if (adreno_smmu->set_stall) in msm_iommu_set_stall()
665 adreno_smmu->set_stall(adreno_smmu->cookie, enable); in msm_iommu_set_stall()
672 iommu_detach_device(iommu->domain, mmu->dev); in msm_iommu_detach()
684 /* The arm-smmu driver expects the addresses to be sign extended */ in msm_iommu_map()
688 ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot); in msm_iommu_map()
691 return (ret == len) ? 0 : -EINVAL; in msm_iommu_map()
701 iommu_unmap(iommu->domain, iova, len); in msm_iommu_unmap()
709 iommu_domain_free(iommu->domain); in msm_iommu_destroy()
710 kmem_cache_destroy(iommu->pt_cache); in msm_iommu_destroy()
729 return ERR_PTR(-ENODEV); in msm_iommu_new()
740 return ERR_PTR(-ENOMEM); in msm_iommu_new()
743 iommu->domain = domain; in msm_iommu_new()
744 msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU); in msm_iommu_new()
746 mutex_init(&iommu->init_lock); in msm_iommu_new()
748 ret = iommu_attach_device(iommu->domain, dev); in msm_iommu_new()
755 return &iommu->base; in msm_iommu_new()
768 iommu_set_fault_handler(iommu->domain, msm_disp_fault_handler, iommu); in msm_iommu_disp_new()
784 if (adreno_smmu->cookie) { in msm_iommu_gpu_new()
786 adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie); in msm_iommu_gpu_new()
789 iommu->pt_cache = in msm_iommu_gpu_new()
790 kmem_cache_create("msm-mmu-pt", tblsz, tblsz, 0, NULL); in msm_iommu_gpu_new()
792 iommu_set_fault_handler(iommu->domain, msm_gpu_fault_handler, iommu); in msm_iommu_gpu_new()
795 if (adreno_smmu->set_stall) in msm_iommu_gpu_new()
796 adreno_smmu->set_stall(adreno_smmu->cookie, true); in msm_iommu_gpu_new()