Lines Matching defs:vdev
47 static void *ivpu_pgtable_alloc_page(struct ivpu_device *vdev, dma_addr_t *dma)
59 dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
60 if (dma_mapping_error(vdev->drm.dev, dma_addr))
72 dma_unmap_page(vdev->drm.dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
79 static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
86 dma_unmap_page(vdev->drm.dev, dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK, PAGE_SIZE,
93 static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
117 ivpu_pgtable_free_page(vdev, pte_dma_ptr, pte_dma);
121 ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);
126 ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);
129 ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
135 ivpu_mmu_ensure_pgd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
143 pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);
154 ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)
162 pud_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pud_dma);
166 drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]);
171 drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]);
185 ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);
190 ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,
199 pmd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pmd_dma);
203 drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]);
214 ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);
219 ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
228 pte_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pte_dma);
239 ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
248 drm_WARN_ON(&vdev->drm, ctx->id == IVPU_RESERVED_CONTEXT_MMU_SSID);
251 if (!ivpu_mmu_ensure_pgd(vdev, &ctx->pgtable))
255 if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
259 if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx))
263 pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx);
274 ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
279 drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size));
280 drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size));
285 int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
310 ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
319 ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot);
322 ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
337 static void ivpu_mmu_context_set_page_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
348 static void ivpu_mmu_context_split_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
359 static void ivpu_mmu_context_split_64k_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
366 ivpu_dbg(vdev, MMU_MAP, "Split 64K page ctx: %u vpu_addr: 0x%llx\n", ctx->id, vpu_addr);
369 ivpu_mmu_context_split_page(vdev, ctx, start + offset);
375 ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
385 if (drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr | size, IVPU_MMU_PAGE_SIZE)))
390 ivpu_dbg(vdev, MMU_MAP, "Set read-only pages ctx: %u vpu_addr: 0x%llx size: %lu\n",
396 ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr);
400 ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr + size);
405 ivpu_mmu_context_set_page_ro(vdev, ctx, vpu_addr);
415 ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
417 ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
432 ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
441 if (drm_WARN_ON(&vdev->drm, !ctx))
460 ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
463 ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
465 ivpu_err(vdev, "Failed to map context pages\n");
472 ret = ivpu_mmu_cd_set(vdev, ctx->id, &ctx->pgtable);
474 ivpu_err(vdev, "Failed to set context descriptor for context %u: %d\n",
484 ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
486 ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
500 ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
507 if (drm_WARN_ON(&vdev->drm, !ctx))
516 ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
528 ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
530 ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
564 void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
571 start = vdev->hw->ranges.global.start;
572 end = vdev->hw->ranges.shave.end;
574 start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start);
575 end = max_t(u64, vdev->hw->ranges.user.end, vdev->hw->ranges.dma.end);
582 void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
585 ivpu_mmu_cd_clear(vdev, ctx->id);
590 ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
594 void ivpu_mmu_global_context_init(struct ivpu_device *vdev)
596 ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
599 void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
601 ivpu_mmu_context_fini(vdev, &vdev->gctx);
604 int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev)
608 ivpu_mmu_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID);
610 mutex_lock(&vdev->rctx.lock);
612 if (!ivpu_mmu_ensure_pgd(vdev, &vdev->rctx.pgtable)) {
613 ivpu_err(vdev, "Failed to allocate root page table for reserved context\n");
618 ret = ivpu_mmu_cd_set(vdev, vdev->rctx.id, &vdev->rctx.pgtable);
620 ivpu_err(vdev, "Failed to set context descriptor for reserved context\n");
624 mutex_unlock(&vdev->rctx.lock);
628 mutex_unlock(&vdev->rctx.lock);
629 ivpu_mmu_context_fini(vdev, &vdev->rctx);
633 void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
635 ivpu_mmu_cd_clear(vdev, vdev->rctx.id);
636 ivpu_mmu_context_fini(vdev, &vdev->rctx);