Lines Matching full:mmu
76 /* Wait for the MMU status to indicate there is no active command, in
94 /* write AS_COMMAND when MMU is ready to accept another command */
147 /* Run the MMU operation */
155 struct panfrost_mmu *mmu,
161 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
166 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
168 int as_nr = mmu->as;
169 u64 transtab = mmu->cfg.transtab;
170 u64 memattr = mmu->cfg.memattr;
171 u64 transcfg = mmu->cfg.transcfg;
206 static int mmu_cfg_init_mali_lpae(struct panfrost_mmu *mmu)
208 struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
210 /* TODO: The following fields are duplicated between the MMU and Page
213 mmu->cfg.transtab = pgtbl_cfg->arm_mali_lpae_cfg.transtab;
214 mmu->cfg.memattr = pgtbl_cfg->arm_mali_lpae_cfg.memattr;
215 mmu->cfg.transcfg = AS_TRANSCFG_ADRMODE_LEGACY;
220 static int mmu_cfg_init_aarch64_4k(struct panfrost_mmu *mmu)
222 struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
223 struct panfrost_device *pfdev = mmu->pfdev;
229 mmu->cfg.transtab = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
231 mmu->cfg.memattr = mair_to_memattr(pgtbl_cfg->arm_lpae_s1_cfg.mair,
234 mmu->cfg.transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
239 mmu->cfg.transcfg |= AS_TRANSCFG_PTW_SH_OS;
244 static int panfrost_mmu_cfg_init(struct panfrost_mmu *mmu,
247 struct panfrost_device *pfdev = mmu->pfdev;
251 return mmu_cfg_init_aarch64_4k(mmu);
253 return mmu_cfg_init_mali_lpae(mmu);
261 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
267 as = mmu->as;
269 int en = atomic_inc_return(&mmu->as_count);
278 list_move(&mmu->list, &pfdev->as_lru_list);
281 /* Unhandled pagefault on this AS, the MMU was
282 * disabled. We need to re-enable the MMU after
288 panfrost_mmu_enable(pfdev, mmu);
313 mmu->as = as;
315 atomic_set(&mmu->as_count, 1);
316 list_add(&mmu->list, &pfdev->as_lru_list);
318 dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
320 panfrost_mmu_enable(pfdev, mmu);
327 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
329 atomic_dec(&mmu->as_count);
330 WARN_ON(atomic_read(&mmu->as_count) < 0);
335 struct panfrost_mmu *mmu, *mmu_tmp;
344 list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
345 mmu->as = -1;
346 atomic_set(&mmu->as_count, 0);
347 list_del_init(&mmu->list);
378 struct panfrost_mmu *mmu,
381 if (mmu->as < 0)
388 mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
393 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
398 struct io_pgtable_ops *ops = mmu->pgtbl_ops;
405 dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
421 panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
445 mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
457 struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
466 mapping->mmu->as, iova, len);
482 panfrost_mmu_flush_range(pfdev, mapping->mmu,
492 //struct panfrost_mmu *mmu = cookie;
513 struct panfrost_mmu *mmu;
516 list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
517 if (as == mmu->as)
524 spin_lock(&mmu->mm_lock);
526 drm_mm_for_each_node(node, &mmu->mm) {
536 spin_unlock(&mmu->mm_lock);
567 WARN_ON(bomapping->mmu->as != as);
633 mmu_map_sg(pfdev, bomapping->mmu, addr,
659 struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu,
661 struct panfrost_device *pfdev = mmu->pfdev;
664 if (mmu->as >= 0) {
667 panfrost_mmu_disable(pfdev, mmu->as);
670 clear_bit(mmu->as, &pfdev->as_alloc_mask);
671 clear_bit(mmu->as, &pfdev->as_in_use_mask);
672 list_del(&mmu->list);
676 free_io_pgtable_ops(mmu->pgtbl_ops);
677 drm_mm_takedown(&mmu->mm);
678 kfree(mmu);
681 void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
683 kref_put(&mmu->refcount, panfrost_mmu_release_ctx);
686 struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
688 kref_get(&mmu->refcount);
690 return mmu;
723 struct panfrost_mmu *mmu;
738 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
739 if (!mmu)
742 mmu->pfdev = pfdev;
743 spin_lock_init(&mmu->mm_lock);
746 drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
747 mmu->mm.color_adjust = panfrost_drm_mm_color_adjust;
749 INIT_LIST_HEAD(&mmu->list);
750 mmu->as = -1;
752 mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
761 mmu->pgtbl_ops = alloc_io_pgtable_ops(fmt, &mmu->pgtbl_cfg, mmu);
762 if (!mmu->pgtbl_ops) {
767 ret = panfrost_mmu_cfg_init(mmu, fmt);
771 kref_init(&mmu->refcount);
773 return mmu;
776 free_io_pgtable_ops(mmu->pgtbl_ops);
779 kfree(mmu);
868 /* Ignore MMU interrupts on this AS until it's been
873 /* Disable the MMU to kill jobs on this AS. */
880 /* If we received new MMU interrupts, process them before returning. */
899 pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
906 IRQF_SHARED, KBUILD_MODNAME "-mmu",
910 dev_err(pfdev->dev, "failed to request mmu irq");