Lines Matching refs:vpu_addr
240 u64 vpu_addr, dma_addr_t dma_addr, u64 prot) in ivpu_mmu_context_map_page() argument
243 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_map_page()
244 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_map_page()
245 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_map_page()
246 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); in ivpu_mmu_context_map_page()
274 ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, in ivpu_mmu_context_map_cont_64k() argument
279 drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size)); in ivpu_mmu_context_map_cont_64k()
285 int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); in ivpu_mmu_context_map_cont_64k()
291 vpu_addr += IVPU_MMU_PAGE_SIZE; in ivpu_mmu_context_map_cont_64k()
298 static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr) in ivpu_mmu_context_unmap_page() argument
300 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_unmap_page()
301 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_unmap_page()
302 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_unmap_page()
303 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); in ivpu_mmu_context_unmap_page()
311 u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot) in ivpu_mmu_context_map_pages() argument
318 IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) { in ivpu_mmu_context_map_pages()
319 ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot); in ivpu_mmu_context_map_pages()
322 ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); in ivpu_mmu_context_map_pages()
329 vpu_addr += map_size; in ivpu_mmu_context_map_pages()
338 u64 vpu_addr) in ivpu_mmu_context_set_page_ro() argument
340 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_set_page_ro()
341 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_set_page_ro()
342 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_set_page_ro()
343 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); in ivpu_mmu_context_set_page_ro()
349 u64 vpu_addr) in ivpu_mmu_context_split_page() argument
351 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_split_page()
352 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_split_page()
353 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_split_page()
354 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); in ivpu_mmu_context_split_page()
360 u64 vpu_addr) in ivpu_mmu_context_split_64k_page() argument
362 u64 start = ALIGN_DOWN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE); in ivpu_mmu_context_split_64k_page()
363 u64 end = ALIGN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE); in ivpu_mmu_context_split_64k_page()
366 ivpu_dbg(vdev, MMU_MAP, "Split 64K page ctx: %u vpu_addr: 0x%llx\n", ctx->id, vpu_addr); in ivpu_mmu_context_split_64k_page()
375 ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, in ivpu_mmu_context_set_pages_ro() argument
378 u64 end = vpu_addr + size; in ivpu_mmu_context_set_pages_ro()
385 if (drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr | size, IVPU_MMU_PAGE_SIZE))) in ivpu_mmu_context_set_pages_ro()
391 ctx->id, vpu_addr, size); in ivpu_mmu_context_set_pages_ro()
395 if (!IS_ALIGNED(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE)) in ivpu_mmu_context_set_pages_ro()
396 ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr); in ivpu_mmu_context_set_pages_ro()
399 if (!IS_ALIGNED(vpu_addr + size, IVPU_MMU_CONT_PAGES_SIZE)) in ivpu_mmu_context_set_pages_ro()
400 ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr + size); in ivpu_mmu_context_set_pages_ro()
404 if (vpu_addr < end) in ivpu_mmu_context_set_pages_ro()
405 ivpu_mmu_context_set_page_ro(vdev, ctx, vpu_addr); in ivpu_mmu_context_set_pages_ro()
407 vpu_addr += IVPU_MMU_PAGE_SIZE; in ivpu_mmu_context_set_pages_ro()
422 static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size) in ivpu_mmu_context_unmap_pages() argument
425 ivpu_mmu_context_unmap_page(ctx, vpu_addr); in ivpu_mmu_context_unmap_pages()
426 vpu_addr += IVPU_MMU_PAGE_SIZE; in ivpu_mmu_context_unmap_pages()
433 u64 vpu_addr, struct sg_table *sgt, bool llc_coherent) in ivpu_mmu_context_map_sgt() argument
435 size_t start_vpu_addr = vpu_addr; in ivpu_mmu_context_map_sgt()
444 if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE)) in ivpu_mmu_context_map_sgt()
447 if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK) in ivpu_mmu_context_map_sgt()
461 ctx->id, dma_addr, vpu_addr, size); in ivpu_mmu_context_map_sgt()
463 ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot); in ivpu_mmu_context_map_sgt()
468 vpu_addr += size; in ivpu_mmu_context_map_sgt()
494 ivpu_mmu_context_unmap_pages(ctx, start_vpu_addr, vpu_addr - start_vpu_addr); in ivpu_mmu_context_map_sgt()
501 u64 vpu_addr, struct sg_table *sgt) in ivpu_mmu_context_unmap_sgt() argument
517 ctx->id, dma_addr, vpu_addr, size); in ivpu_mmu_context_unmap_sgt()
519 ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size); in ivpu_mmu_context_unmap_sgt()
520 vpu_addr += size; in ivpu_mmu_context_unmap_sgt()