Lines Matching +full:disable +full:- +full:mmu +full:- +full:reset

1 // SPDX-License-Identifier: GPL-2.0
9 #include <linux/dma-mapping.h>
13 #include <linux/io-pgtable.h>
26 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
27 #define mmu_read(dev, reg) readl(dev->iomem + reg)
39 * have to be both write-back, if one of them is write-through
40 * or non-cacheable, we just choose non-cacheable. Device
41 * memory is also translated to non-cacheable.
52 * to Mali's internal-shareable mode. As per the Mali
53 * Spec, inner and outer-shareable modes aren't allowed
57 * inner-shareable.
76 /* Wait for the MMU status to indicate there is no active command, in
78 ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
82 /* The GPU hung, let's trigger a reset */
84 dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
94 /* write AS_COMMAND when MMU is ready to accept another command */
120 region_width = max(fls64(region_start ^ (region_end - 1)),
121 const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
147 /* Run the MMU operation */
155 struct panfrost_mmu *mmu,
160 spin_lock(&pfdev->as_lock);
161 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
162 spin_unlock(&pfdev->as_lock);
166 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
168 int as_nr = mmu->as;
169 u64 transtab = mmu->cfg.transtab;
170 u64 memattr = mmu->cfg.memattr;
171 u64 transcfg = mmu->cfg.transcfg;
206 static int mmu_cfg_init_mali_lpae(struct panfrost_mmu *mmu)
208 struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
210 /* TODO: The following fields are duplicated between the MMU and Page
213 mmu->cfg.transtab = pgtbl_cfg->arm_mali_lpae_cfg.transtab;
214 mmu->cfg.memattr = pgtbl_cfg->arm_mali_lpae_cfg.memattr;
215 mmu->cfg.transcfg = AS_TRANSCFG_ADRMODE_LEGACY;
220 static int mmu_cfg_init_aarch64_4k(struct panfrost_mmu *mmu)
222 struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
223 struct panfrost_device *pfdev = mmu->pfdev;
225 if (drm_WARN_ON(pfdev->ddev, pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
227 return -EINVAL;
229 mmu->cfg.transtab = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
231 mmu->cfg.memattr = mair_to_memattr(pgtbl_cfg->arm_lpae_s1_cfg.mair,
232 pgtbl_cfg->coherent_walk);
234 mmu->cfg.transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
237 AS_TRANSCFG_INA_BITS(55 - pgtbl_cfg->ias);
238 if (pgtbl_cfg->coherent_walk)
239 mmu->cfg.transcfg |= AS_TRANSCFG_PTW_SH_OS;
244 static int panfrost_mmu_cfg_init(struct panfrost_mmu *mmu,
247 struct panfrost_device *pfdev = mmu->pfdev;
251 return mmu_cfg_init_aarch64_4k(mmu);
253 return mmu_cfg_init_mali_lpae(mmu);
256 drm_WARN(pfdev->ddev, 1, "Invalid pgtable format");
257 return -EINVAL;
261 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
265 spin_lock(&pfdev->as_lock);
267 as = mmu->as;
269 int en = atomic_inc_return(&mmu->as_count);
278 list_move(&mmu->list, &pfdev->as_lru_list);
280 if (pfdev->as_faulty_mask & mask) {
281 /* Unhandled pagefault on this AS, the MMU was
282 * disabled. We need to re-enable the MMU after
286 mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
287 pfdev->as_faulty_mask &= ~mask;
288 panfrost_mmu_enable(pfdev, mmu);
295 as = ffz(pfdev->as_alloc_mask);
296 if (!(BIT(as) & pfdev->features.as_present)) {
299 list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
300 if (!atomic_read(&lru_mmu->as_count))
303 WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
305 list_del_init(&lru_mmu->list);
306 as = lru_mmu->as;
309 lru_mmu->as = -1;
313 mmu->as = as;
314 set_bit(as, &pfdev->as_alloc_mask);
315 atomic_set(&mmu->as_count, 1);
316 list_add(&mmu->list, &pfdev->as_lru_list);
318 dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
320 panfrost_mmu_enable(pfdev, mmu);
323 spin_unlock(&pfdev->as_lock);
327 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
329 atomic_dec(&mmu->as_count);
330 WARN_ON(atomic_read(&mmu->as_count) < 0);
335 struct panfrost_mmu *mmu, *mmu_tmp;
337 clear_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended);
339 spin_lock(&pfdev->as_lock);
341 pfdev->as_alloc_mask = 0;
342 pfdev->as_faulty_mask = 0;
344 list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
345 mmu->as = -1;
346 atomic_set(&mmu->as_count, 0);
347 list_del_init(&mmu->list);
350 spin_unlock(&pfdev->as_lock);
359 * io-pgtable only operates on multiple pages within a single table
362 * boundary of block size B is logically B - A % B, but in unsigned
364 * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :)
366 size_t blk_offset = -addr % SZ_2M;
372 blk_offset = -addr % SZ_1G ?: SZ_1G;
378 struct panfrost_mmu *mmu,
381 if (mmu->as < 0)
384 pm_runtime_get_noresume(pfdev->dev);
387 if (pm_runtime_active(pfdev->dev))
388 mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
390 pm_runtime_put_autosuspend(pfdev->dev);
393 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
398 struct io_pgtable_ops *ops = mmu->pgtbl_ops;
405 dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
411 ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
417 len -= mapped;
421 panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
428 struct panfrost_gem_object *bo = mapping->obj;
429 struct drm_gem_shmem_object *shmem = &bo->base;
430 struct drm_gem_object *obj = &shmem->base;
431 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
435 if (WARN_ON(mapping->active))
438 if (bo->noexec)
445 mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
447 mapping->active = true;
454 struct panfrost_gem_object *bo = mapping->obj;
455 struct drm_gem_object *obj = &bo->base.base;
456 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
457 struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
458 u64 iova = mapping->mmnode.start << PAGE_SHIFT;
459 size_t len = mapping->mmnode.size << PAGE_SHIFT;
462 if (WARN_ON(!mapping->active))
465 dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
466 mapping->mmu->as, iova, len);
470 size_t pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
472 if (bo->is_heap)
474 if (!bo->is_heap || ops->iova_to_phys(ops, iova)) {
475 unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
482 panfrost_mmu_flush_range(pfdev, mapping->mmu,
483 mapping->mmnode.start << PAGE_SHIFT, len);
484 mapping->active = false;
492 //struct panfrost_mmu *mmu = cookie;
513 struct panfrost_mmu *mmu;
515 spin_lock(&pfdev->as_lock);
516 list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
517 if (as == mmu->as)
524 spin_lock(&mmu->mm_lock);
526 drm_mm_for_each_node(node, &mmu->mm) {
527 if (offset >= node->start &&
528 offset < (node->start + node->size)) {
531 kref_get(&mapping->refcount);
536 spin_unlock(&mmu->mm_lock);
538 spin_unlock(&pfdev->as_lock);
558 return -ENOENT;
560 bo = bomapping->obj;
561 if (!bo->is_heap) {
562 dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
563 bomapping->mmnode.start << PAGE_SHIFT);
564 ret = -EINVAL;
567 WARN_ON(bomapping->mmu->as != as);
570 addr &= ~((u64)SZ_2M - 1);
572 page_offset -= bomapping->mmnode.start;
574 obj = &bo->base.base;
576 dma_resv_lock(obj->resv, NULL);
578 if (!bo->base.pages) {
579 bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
581 if (!bo->sgts) {
582 ret = -ENOMEM;
586 pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
589 kvfree(bo->sgts);
590 bo->sgts = NULL;
591 ret = -ENOMEM;
594 bo->base.pages = pages;
595 refcount_set(&bo->base.pages_use_count, 1);
597 pages = bo->base.pages;
604 mapping = bo->base.base.filp->f_mapping;
623 sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
629 ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
633 mmu_map_sg(pfdev, bomapping->mmu, addr,
636 bomapping->active = true;
637 bo->heap_rss_size += SZ_2M;
639 dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
642 dma_resv_unlock(obj->resv);
651 dma_resv_unlock(obj->resv);
659 struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu,
661 struct panfrost_device *pfdev = mmu->pfdev;
663 spin_lock(&pfdev->as_lock);
664 if (mmu->as >= 0) {
665 pm_runtime_get_noresume(pfdev->dev);
666 if (pm_runtime_active(pfdev->dev))
667 panfrost_mmu_disable(pfdev, mmu->as);
668 pm_runtime_put_autosuspend(pfdev->dev);
670 clear_bit(mmu->as, &pfdev->as_alloc_mask);
671 clear_bit(mmu->as, &pfdev->as_in_use_mask);
672 list_del(&mmu->list);
674 spin_unlock(&pfdev->as_lock);
676 free_io_pgtable_ops(mmu->pgtbl_ops);
677 drm_mm_takedown(&mmu->mm);
678 kfree(mmu);
681 void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
683 kref_put(&mmu->refcount, panfrost_mmu_release_ctx);
686 struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
688 kref_get(&mmu->refcount);
690 return mmu;
694 #define PFN_4G_MASK (PFN_4G - 1)
709 (*end)--;
712 if (next_seg - *start <= PFN_16M)
715 *end = min(*end, ALIGN(*start, PFN_4G) - 1);
721 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(pfdev->features.mmu_features);
722 u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(pfdev->features.mmu_features);
723 struct panfrost_mmu *mmu;
727 if (pfdev->comp->gpu_quirks & BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE)) {
729 dev_err_once(pfdev->dev,
731 return ERR_PTR(-EINVAL);
738 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
739 if (!mmu)
740 return ERR_PTR(-ENOMEM);
742 mmu->pfdev = pfdev;
743 spin_lock_init(&mmu->mm_lock);
745 /* 4G enough for now. can be 48-bit */
746 drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
747 mmu->mm.color_adjust = panfrost_drm_mm_color_adjust;
749 INIT_LIST_HEAD(&mmu->list);
750 mmu->as = -1;
752 mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
756 .coherent_walk = pfdev->coherent,
758 .iommu_dev = pfdev->dev,
761 mmu->pgtbl_ops = alloc_io_pgtable_ops(fmt, &mmu->pgtbl_cfg, mmu);
762 if (!mmu->pgtbl_ops) {
763 ret = -EINVAL;
767 ret = panfrost_mmu_cfg_init(mmu, fmt);
771 kref_init(&mmu->refcount);
773 return mmu;
776 free_io_pgtable_ops(mmu->pgtbl_ops);
779 kfree(mmu);
808 if (test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended))
825 u32 as = ffs(status | (status >> 16)) - 1;
845 ret = -1;
851 dev_err(pfdev->dev,
867 spin_lock(&pfdev->as_lock);
868 /* Ignore MMU interrupts on this AS until it's been
869 * re-enabled.
871 pfdev->as_faulty_mask |= mask;
873 /* Disable the MMU to kill jobs on this AS. */
875 spin_unlock(&pfdev->as_lock);
880 /* If we received new MMU interrupts, process them before returning. */
882 status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask;
886 if (!test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) {
887 spin_lock(&pfdev->as_lock);
888 mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
889 spin_unlock(&pfdev->as_lock);
899 pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
900 if (pfdev->mmu_irq < 0)
901 return pfdev->mmu_irq;
903 err = devm_request_threaded_irq(pfdev->dev, pfdev->mmu_irq,
906 IRQF_SHARED, KBUILD_MODNAME "-mmu",
910 dev_err(pfdev->dev, "failed to request mmu irq");
924 set_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended);
927 synchronize_irq(pfdev->mmu_irq);