1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6 #include <linux/bitfield.h> 7 #include <linux/highmem.h> 8 #include <linux/set_memory.h> 9 10 #include <drm/drm_cache.h> 11 12 #include "ivpu_drv.h" 13 #include "ivpu_hw.h" 14 #include "ivpu_mmu.h" 15 #include "ivpu_mmu_context.h" 16 17 #define IVPU_MMU_VPU_ADDRESS_MASK GENMASK(47, 12) 18 #define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39) 19 #define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30) 20 #define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21) 21 #define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12) 22 #define IVPU_MMU_ENTRY_FLAGS_MASK (BIT(52) | GENMASK(11, 0)) 23 #define IVPU_MMU_ENTRY_FLAG_CONT BIT(52) 24 #define IVPU_MMU_ENTRY_FLAG_NG BIT(11) 25 #define IVPU_MMU_ENTRY_FLAG_AF BIT(10) 26 #define IVPU_MMU_ENTRY_FLAG_USER BIT(6) 27 #define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2) 28 #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1) 29 #define IVPU_MMU_ENTRY_FLAG_VALID BIT(0) 30 31 #define IVPU_MMU_PAGE_SIZE SZ_4K 32 #define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16) 33 #define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE) 34 #define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE) 35 #define IVPU_MMU_PUD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE) 36 #define IVPU_MMU_PGD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE) 37 #define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64)) 38 39 #define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000 40 #define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID) 41 #define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK) 42 #define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \ 43 IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID) 44 45 static void *ivpu_pgtable_alloc_page(struct ivpu_device *vdev, dma_addr_t *dma) 46 { 47 dma_addr_t dma_addr; 48 struct page *page; 49 void *cpu; 50 51 page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 52 if (!page) 53 return NULL; 54 55 set_pages_array_wc(&page, 1); 56 57 dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 58 if (dma_mapping_error(vdev->drm.dev, dma_addr)) 59 goto err_free_page; 60 61 cpu = vmap(&page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 62 if (!cpu) 63 goto err_dma_unmap_page; 64 65 66 *dma = dma_addr; 67 return cpu; 68 69 err_dma_unmap_page: 70 dma_unmap_page(vdev->drm.dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); 71 72 err_free_page: 73 put_page(page); 74 return NULL; 75 } 76 77 static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr) 78 { 79 struct page *page; 80 81 if (cpu_addr) { 82 page = vmalloc_to_page(cpu_addr); 83 vunmap(cpu_addr); 84 dma_unmap_page(vdev->drm.dev, dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK, PAGE_SIZE, 85 DMA_BIDIRECTIONAL); 86 set_pages_array_wb(&page, 1); 87 put_page(page); 88 } 89 } 90 91 static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) 92 { 93 dma_addr_t pgd_dma; 94 95 pgtable->pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma); 96 if (!pgtable->pgd_dma_ptr) 97 return -ENOMEM; 98 99 pgtable->pgd_dma = pgd_dma; 100 101 return 0; 102 } 103 104 static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) 105 { 106 int pgd_idx, pud_idx, pmd_idx; 107 dma_addr_t pud_dma, pmd_dma, pte_dma; 108 u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr; 109 110 for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) { 111 pud_dma_ptr = pgtable->pud_ptrs[pgd_idx]; 112 pud_dma = pgtable->pgd_dma_ptr[pgd_idx]; 113 114 if (!pud_dma_ptr) 115 continue; 116 117 for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) { 118 pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx]; 119 pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx]; 120 121 if (!pmd_dma_ptr) 122 continue; 123 124 for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) { 125 pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx]; 126 pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx]; 127 128 ivpu_pgtable_free_page(vdev, pte_dma_ptr, pte_dma); 129 } 130 131 kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]); 132 ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma); 133 } 134 135 kfree(pgtable->pmd_ptrs[pgd_idx]); 136 kfree(pgtable->pte_ptrs[pgd_idx]); 137 ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma); 138 } 139 140 ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma); 141 } 142 143 static u64* 144 ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx) 145 { 146 u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx]; 147 dma_addr_t pud_dma; 148 149 if (pud_dma_ptr) 150 return pud_dma_ptr; 151 152 pud_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pud_dma); 153 if (!pud_dma_ptr) 154 return NULL; 155 156 drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]); 157 pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); 158 if (!pgtable->pmd_ptrs[pgd_idx]) 159 goto err_free_pud_dma_ptr; 160 161 drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]); 162 pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); 163 if (!pgtable->pte_ptrs[pgd_idx]) 164 goto err_free_pmd_ptrs; 165 166 pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr; 167 pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID; 168 169 return pud_dma_ptr; 170 171 err_free_pmd_ptrs: 172 kfree(pgtable->pmd_ptrs[pgd_idx]); 173 174 err_free_pud_dma_ptr: 175 ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma); 176 return NULL; 177 } 178 179 static u64* 180 ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx, 181 int pud_idx) 182 { 183 u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx]; 184 dma_addr_t pmd_dma; 185 186 if (pmd_dma_ptr) 187 return pmd_dma_ptr; 188 189 pmd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pmd_dma); 190 if (!pmd_dma_ptr) 191 return NULL; 192 193 drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]); 194 pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); 195 if (!pgtable->pte_ptrs[pgd_idx][pud_idx]) 196 goto err_free_pmd_dma_ptr; 197 198 pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr; 199 pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID; 200 201 return pmd_dma_ptr; 202 203 err_free_pmd_dma_ptr: 204 ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma); 205 return NULL; 206 } 207 208 static u64* 209 ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, 210 int pgd_idx, int pud_idx, int pmd_idx) 211 { 212 u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx]; 213 dma_addr_t pte_dma; 214 215 if (pte_dma_ptr) 216 return pte_dma_ptr; 217 218 pte_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pte_dma); 219 if (!pte_dma_ptr) 220 return NULL; 221 222 pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr; 223 pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID; 224 225 return pte_dma_ptr; 226 } 227 228 static int 229 ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 230 u64 vpu_addr, dma_addr_t dma_addr, u64 prot) 231 { 232 u64 *pte; 233 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); 234 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); 235 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); 236 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); 237 238 /* Allocate PUD - second level page table if needed */ 239 if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx)) 240 return -ENOMEM; 241 242 /* Allocate PMD - third level page table if needed */ 243 if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx)) 244 return -ENOMEM; 245 246 /* Allocate PTE - fourth level page table if needed */ 247 pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx); 248 if (!pte) 249 return -ENOMEM; 250 251 /* Update PTE */ 252 pte[pte_idx] = dma_addr | prot; 253 254 return 0; 255 } 256 257 static int 258 ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, 259 dma_addr_t dma_addr, u64 prot) 260 { 261 size_t size = IVPU_MMU_CONT_PAGES_SIZE; 262 263 drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size)); 264 drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size)); 265 266 prot |= IVPU_MMU_ENTRY_FLAG_CONT; 267 268 while (size) { 269 int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); 270 271 if (ret) 272 return ret; 273 274 size -= IVPU_MMU_PAGE_SIZE; 275 vpu_addr += IVPU_MMU_PAGE_SIZE; 276 dma_addr += IVPU_MMU_PAGE_SIZE; 277 } 278 279 return 0; 280 } 281 282 static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr) 283 { 284 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); 285 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); 286 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); 287 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); 288 289 /* Update PTE with dummy physical address and clear flags */ 290 ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID; 291 } 292 293 static int 294 ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 295 u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot) 296 { 297 int map_size; 298 int ret; 299 300 while (size) { 301 if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE && 302 IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) { 303 ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot); 304 map_size = IVPU_MMU_CONT_PAGES_SIZE; 305 } else { 306 ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); 307 map_size = IVPU_MMU_PAGE_SIZE; 308 } 309 310 if (ret) 311 return ret; 312 313 vpu_addr += map_size; 314 dma_addr += map_size; 315 size -= map_size; 316 } 317 318 return 0; 319 } 320 321 static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size) 322 { 323 while (size) { 324 ivpu_mmu_context_unmap_page(ctx, vpu_addr); 325 vpu_addr += IVPU_MMU_PAGE_SIZE; 326 size -= IVPU_MMU_PAGE_SIZE; 327 } 328 } 329 330 int 331 ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 332 u64 vpu_addr, struct sg_table *sgt, bool llc_coherent) 333 { 334 struct scatterlist *sg; 335 int ret; 336 u64 prot; 337 u64 i; 338 339 if (drm_WARN_ON(&vdev->drm, !ctx)) 340 return -EINVAL; 341 342 if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE)) 343 return -EINVAL; 344 345 if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK) 346 return -EINVAL; 347 348 prot = IVPU_MMU_ENTRY_MAPPED; 349 if (llc_coherent) 350 prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT; 351 352 mutex_lock(&ctx->lock); 353 354 for_each_sgtable_dma_sg(sgt, sg, i) { 355 dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset; 356 size_t size = sg_dma_len(sg) + sg->offset; 357 358 ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot); 359 if (ret) { 360 ivpu_err(vdev, "Failed to map context pages\n"); 361 mutex_unlock(&ctx->lock); 362 return ret; 363 } 364 vpu_addr += size; 365 } 366 367 /* Ensure page table modifications are flushed from wc buffers to memory */ 368 wmb(); 369 mutex_unlock(&ctx->lock); 370 371 ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); 372 if (ret) 373 ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); 374 return ret; 375 } 376 377 void 378 ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 379 u64 vpu_addr, struct sg_table *sgt) 380 { 381 struct scatterlist *sg; 382 int ret; 383 u64 i; 384 385 if (drm_WARN_ON(&vdev->drm, !ctx)) 386 return; 387 388 mutex_lock(&ctx->lock); 389 390 for_each_sgtable_dma_sg(sgt, sg, i) { 391 size_t size = sg_dma_len(sg) + sg->offset; 392 393 ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size); 394 vpu_addr += size; 395 } 396 397 /* Ensure page table modifications are flushed from wc buffers to memory */ 398 wmb(); 399 mutex_unlock(&ctx->lock); 400 401 ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); 402 if (ret) 403 ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); 404 } 405 406 int 407 ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range, 408 u64 size, struct drm_mm_node *node) 409 { 410 int ret; 411 412 WARN_ON(!range); 413 414 mutex_lock(&ctx->lock); 415 if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) { 416 ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0, 417 range->start, range->end, DRM_MM_INSERT_BEST); 418 if (!ret) 419 goto unlock; 420 } 421 422 ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0, 423 range->start, range->end, DRM_MM_INSERT_BEST); 424 unlock: 425 mutex_unlock(&ctx->lock); 426 return ret; 427 } 428 429 void 430 ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node) 431 { 432 mutex_lock(&ctx->lock); 433 drm_mm_remove_node(node); 434 mutex_unlock(&ctx->lock); 435 } 436 437 static int 438 ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id) 439 { 440 u64 start, end; 441 int ret; 442 443 mutex_init(&ctx->lock); 444 445 ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable); 446 if (ret) { 447 ivpu_err(vdev, "Failed to initialize pgtable for ctx %u: %d\n", context_id, ret); 448 return ret; 449 } 450 451 if (!context_id) { 452 start = vdev->hw->ranges.global.start; 453 end = vdev->hw->ranges.shave.end; 454 } else { 455 start = vdev->hw->ranges.user.start; 456 end = vdev->hw->ranges.dma.end; 457 } 458 459 drm_mm_init(&ctx->mm, start, end - start); 460 ctx->id = context_id; 461 462 return 0; 463 } 464 465 static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) 466 { 467 if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr)) 468 return; 469 470 mutex_destroy(&ctx->lock); 471 ivpu_mmu_pgtables_free(vdev, &ctx->pgtable); 472 drm_mm_takedown(&ctx->mm); 473 474 ctx->pgtable.pgd_dma_ptr = NULL; 475 ctx->pgtable.pgd_dma = 0; 476 } 477 478 int ivpu_mmu_global_context_init(struct ivpu_device *vdev) 479 { 480 return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID); 481 } 482 483 void ivpu_mmu_global_context_fini(struct ivpu_device *vdev) 484 { 485 return ivpu_mmu_context_fini(vdev, &vdev->gctx); 486 } 487 488 int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev) 489 { 490 return ivpu_mmu_user_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID); 491 } 492 493 void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev) 494 { 495 return ivpu_mmu_user_context_fini(vdev, &vdev->rctx); 496 } 497 498 void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid) 499 { 500 struct ivpu_file_priv *file_priv; 501 502 xa_lock(&vdev->context_xa); 503 504 file_priv = xa_load(&vdev->context_xa, ssid); 505 if (file_priv) 506 file_priv->has_mmu_faults = true; 507 508 xa_unlock(&vdev->context_xa); 509 } 510 511 int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id) 512 { 513 int ret; 514 515 drm_WARN_ON(&vdev->drm, !ctx_id); 516 517 ret = ivpu_mmu_context_init(vdev, ctx, ctx_id); 518 if (ret) { 519 ivpu_err(vdev, "Failed to initialize context %u: %d\n", ctx_id, ret); 520 return ret; 521 } 522 523 ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable); 524 if (ret) { 525 ivpu_err(vdev, "Failed to set page table for context %u: %d\n", ctx_id, ret); 526 goto err_context_fini; 527 } 528 529 return 0; 530 531 err_context_fini: 532 ivpu_mmu_context_fini(vdev, ctx); 533 return ret; 534 } 535 536 void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) 537 { 538 drm_WARN_ON(&vdev->drm, !ctx->id); 539 540 ivpu_mmu_clear_pgtable(vdev, ctx->id); 541 ivpu_mmu_context_fini(vdev, ctx); 542 } 543