Lines Matching defs:vpu_addr
240 u64 vpu_addr, dma_addr_t dma_addr, u64 prot)
243 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
244 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
245 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
246 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
274 ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
279 drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size));
285 int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
291 vpu_addr += IVPU_MMU_PAGE_SIZE;
298 static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
300 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
301 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
302 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
303 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
311 u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)
318 IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) {
319 ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot);
322 ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
329 vpu_addr += map_size;
338 u64 vpu_addr)
340 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
341 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
342 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
343 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
349 u64 vpu_addr)
351 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
352 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
353 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
354 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
360 u64 vpu_addr)
362 u64 start = ALIGN_DOWN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE);
363 u64 end = ALIGN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE);
366 ivpu_dbg(vdev, MMU_MAP, "Split 64K page ctx: %u vpu_addr: 0x%llx\n", ctx->id, vpu_addr);
375 ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
378 u64 end = vpu_addr + size;
385 if (drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr | size, IVPU_MMU_PAGE_SIZE)))
390 ivpu_dbg(vdev, MMU_MAP, "Set read-only pages ctx: %u vpu_addr: 0x%llx size: %lu\n",
391 ctx->id, vpu_addr, size);
395 if (!IS_ALIGNED(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE))
396 ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr);
399 if (!IS_ALIGNED(vpu_addr + size, IVPU_MMU_CONT_PAGES_SIZE))
400 ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr + size);
404 if (vpu_addr < end)
405 ivpu_mmu_context_set_page_ro(vdev, ctx, vpu_addr);
407 vpu_addr += IVPU_MMU_PAGE_SIZE;
422 static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
425 ivpu_mmu_context_unmap_page(ctx, vpu_addr);
426 vpu_addr += IVPU_MMU_PAGE_SIZE;
433 u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)
435 size_t start_vpu_addr = vpu_addr;
444 if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
447 if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK)
460 ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
461 ctx->id, dma_addr, vpu_addr, size);
463 ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
468 vpu_addr += size;
494 ivpu_mmu_context_unmap_pages(ctx, start_vpu_addr, vpu_addr - start_vpu_addr);
501 u64 vpu_addr, struct sg_table *sgt)
516 ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
517 ctx->id, dma_addr, vpu_addr, size);
519 ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
520 vpu_addr += size;