Lines Matching refs:pfdev
71 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
78 ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
83 panfrost_device_schedule_reset(pfdev);
84 dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
90 static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
95 status = wait_ready(pfdev, as_nr);
97 mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
102 static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
132 mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
133 mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
134 write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
138 static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
145 lock_region(pfdev, as_nr, iova, size);
148 write_cmd(pfdev, as_nr, op);
151 return wait_ready(pfdev, as_nr);
154 static int mmu_hw_do_operation(struct panfrost_device *pfdev,
160 spin_lock(&pfdev->as_lock);
161 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
162 spin_unlock(&pfdev->as_lock);
166 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
173 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
175 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
176 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
181 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
182 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
184 mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
185 mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
187 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
190 static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
192 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
194 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
195 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
197 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
198 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
200 mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
201 mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), 0);
203 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
223 struct panfrost_device *pfdev = mmu->pfdev;
225 if (drm_WARN_ON(pfdev->ddev, pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
247 struct panfrost_device *pfdev = mmu->pfdev;
256 drm_WARN(pfdev->ddev, 1, "Invalid pgtable format");
261 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
265 spin_lock(&pfdev->as_lock);
278 list_move(&mmu->list, &pfdev->as_lru_list);
280 if (pfdev->as_faulty_mask & mask) {
285 mmu_write(pfdev, MMU_INT_CLEAR, mask);
286 mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
287 pfdev->as_faulty_mask &= ~mask;
288 panfrost_mmu_enable(pfdev, mmu);
295 as = ffz(pfdev->as_alloc_mask);
296 if (!(BIT(as) & pfdev->features.as_present)) {
299 list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
303 WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
314 set_bit(as, &pfdev->as_alloc_mask);
316 list_add(&mmu->list, &pfdev->as_lru_list);
318 dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
320 panfrost_mmu_enable(pfdev, mmu);
323 spin_unlock(&pfdev->as_lock);
327 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
333 void panfrost_mmu_reset(struct panfrost_device *pfdev)
337 clear_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended);
339 spin_lock(&pfdev->as_lock);
341 pfdev->as_alloc_mask = 0;
342 pfdev->as_faulty_mask = 0;
344 list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
350 spin_unlock(&pfdev->as_lock);
352 mmu_write(pfdev, MMU_INT_CLEAR, ~0);
353 mmu_write(pfdev, MMU_INT_MASK, ~0);
377 static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
384 pm_runtime_get_noresume(pfdev->dev);
387 if (pm_runtime_active(pfdev->dev))
388 mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
390 pm_runtime_put_autosuspend(pfdev->dev);
393 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
405 dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
421 panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
431 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
445 mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
456 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
465 dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
482 panfrost_mmu_flush_range(pfdev, mapping->mmu,
508 addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
515 spin_lock(&pfdev->as_lock);
516 list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
538 spin_unlock(&pfdev->as_lock);
544 static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
556 bomapping = addr_to_mapping(pfdev, as, addr);
562 dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
629 ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
633 mmu_map_sg(pfdev, bomapping->mmu, addr,
639 dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
661 struct panfrost_device *pfdev = mmu->pfdev;
663 spin_lock(&pfdev->as_lock);
665 pm_runtime_get_noresume(pfdev->dev);
666 if (pm_runtime_active(pfdev->dev))
667 panfrost_mmu_disable(pfdev, mmu->as);
668 pm_runtime_put_autosuspend(pfdev->dev);
670 clear_bit(mmu->as, &pfdev->as_alloc_mask);
671 clear_bit(mmu->as, &pfdev->as_in_use_mask);
674 spin_unlock(&pfdev->as_lock);
719 struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
721 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(pfdev->features.mmu_features);
722 u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(pfdev->features.mmu_features);
727 if (pfdev->comp->gpu_quirks & BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE)) {
728 if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) {
729 dev_err_once(pfdev->dev,
742 mmu->pfdev = pfdev;
756 .coherent_walk = pfdev->coherent,
758 .iommu_dev = pfdev->dev,
783 static const char *access_type_name(struct panfrost_device *pfdev,
788 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
806 struct panfrost_device *pfdev = data;
808 if (test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended))
811 if (!mmu_read(pfdev, MMU_INT_STAT))
814 mmu_write(pfdev, MMU_INT_MASK, 0);
820 struct panfrost_device *pfdev = data;
821 u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
833 fault_status = mmu_read(pfdev, AS_FAULTSTATUS(as));
834 addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(as));
835 addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(as)) << 32;
842 mmu_write(pfdev, MMU_INT_CLEAR, mask);
847 ret = panfrost_mmu_map_fault_addr(pfdev, as, addr);
851 dev_err(pfdev->dev,
864 access_type, access_type_name(pfdev, fault_status),
867 spin_lock(&pfdev->as_lock);
871 pfdev->as_faulty_mask |= mask;
874 panfrost_mmu_disable(pfdev, as);
875 spin_unlock(&pfdev->as_lock);
882 status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask;
886 if (!test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) {
887 spin_lock(&pfdev->as_lock);
888 mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
889 spin_unlock(&pfdev->as_lock);
895 int panfrost_mmu_init(struct panfrost_device *pfdev)
899 pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
900 if (pfdev->mmu_irq < 0)
901 return pfdev->mmu_irq;
903 err = devm_request_threaded_irq(pfdev->dev, pfdev->mmu_irq,
907 pfdev);
910 dev_err(pfdev->dev, "failed to request mmu irq");
917 void panfrost_mmu_fini(struct panfrost_device *pfdev)
919 mmu_write(pfdev, MMU_INT_MASK, 0);
922 void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev)
924 set_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended);
926 mmu_write(pfdev, MMU_INT_MASK, 0);
927 synchronize_irq(pfdev->mmu_irq);