1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6 #include <linux/bitfield.h> 7 #include <linux/highmem.h> 8 #include <linux/set_memory.h> 9 #include <linux/vmalloc.h> 10 11 #include <drm/drm_cache.h> 12 13 #include "ivpu_drv.h" 14 #include "ivpu_hw.h" 15 #include "ivpu_mmu.h" 16 #include "ivpu_mmu_context.h" 17 18 #define IVPU_MMU_VPU_ADDRESS_MASK GENMASK(47, 12) 19 #define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39) 20 #define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30) 21 #define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21) 22 #define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12) 23 #define IVPU_MMU_ENTRY_FLAGS_MASK (BIT(52) | GENMASK(11, 0)) 24 #define IVPU_MMU_ENTRY_FLAG_CONT BIT(52) 25 #define IVPU_MMU_ENTRY_FLAG_NG BIT(11) 26 #define IVPU_MMU_ENTRY_FLAG_AF BIT(10) 27 #define IVPU_MMU_ENTRY_FLAG_RO BIT(7) 28 #define IVPU_MMU_ENTRY_FLAG_USER BIT(6) 29 #define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2) 30 #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1) 31 #define IVPU_MMU_ENTRY_FLAG_VALID BIT(0) 32 33 #define IVPU_MMU_PAGE_SIZE SZ_4K 34 #define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16) 35 #define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE) 36 #define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE) 37 #define IVPU_MMU_PUD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE) 38 #define IVPU_MMU_PGD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE) 39 #define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64)) 40 41 #define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000 42 #define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID) 43 #define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK) 44 #define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \ 45 IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID) 46 47 static void *ivpu_pgtable_alloc_page(struct ivpu_device *vdev, dma_addr_t *dma) 48 { 49 dma_addr_t dma_addr; 50 struct page *page; 51 void *cpu; 52 53 page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 54 if (!page) 55 return NULL; 56 57 set_pages_array_wc(&page, 1); 58 59 dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 60 if (dma_mapping_error(vdev->drm.dev, dma_addr)) 61 goto err_free_page; 62 63 cpu = vmap(&page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 64 if (!cpu) 65 goto err_dma_unmap_page; 66 67 68 *dma = dma_addr; 69 return cpu; 70 71 err_dma_unmap_page: 72 dma_unmap_page(vdev->drm.dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); 73 74 err_free_page: 75 put_page(page); 76 return NULL; 77 } 78 79 static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr) 80 { 81 struct page *page; 82 83 if (cpu_addr) { 84 page = vmalloc_to_page(cpu_addr); 85 vunmap(cpu_addr); 86 dma_unmap_page(vdev->drm.dev, dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK, PAGE_SIZE, 87 DMA_BIDIRECTIONAL); 88 set_pages_array_wb(&page, 1); 89 put_page(page); 90 } 91 } 92 93 static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) 94 { 95 int pgd_idx, pud_idx, pmd_idx; 96 dma_addr_t pud_dma, pmd_dma, pte_dma; 97 u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr; 98 99 for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) { 100 pud_dma_ptr = pgtable->pud_ptrs[pgd_idx]; 101 pud_dma = pgtable->pgd_dma_ptr[pgd_idx]; 102 103 if (!pud_dma_ptr) 104 continue; 105 106 for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) { 107 pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx]; 108 pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx]; 109 110 if (!pmd_dma_ptr) 111 continue; 112 113 for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) { 114 pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx]; 115 pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx]; 116 117 ivpu_pgtable_free_page(vdev, pte_dma_ptr, pte_dma); 118 } 119 120 kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]); 121 ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma); 122 } 123 124 kfree(pgtable->pmd_ptrs[pgd_idx]); 125 kfree(pgtable->pte_ptrs[pgd_idx]); 126 ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma); 127 } 128 129 ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma); 130 pgtable->pgd_dma_ptr = NULL; 131 pgtable->pgd_dma = 0; 132 } 133 134 static u64* 135 ivpu_mmu_ensure_pgd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) 136 { 137 u64 *pgd_dma_ptr = pgtable->pgd_dma_ptr; 138 dma_addr_t pgd_dma; 139 140 if (pgd_dma_ptr) 141 return pgd_dma_ptr; 142 143 pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma); 144 if (!pgd_dma_ptr) 145 return NULL; 146 147 pgtable->pgd_dma_ptr = pgd_dma_ptr; 148 pgtable->pgd_dma = pgd_dma; 149 150 return pgd_dma_ptr; 151 } 152 153 static u64* 154 ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx) 155 { 156 u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx]; 157 dma_addr_t pud_dma; 158 159 if (pud_dma_ptr) 160 return pud_dma_ptr; 161 162 pud_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pud_dma); 163 if (!pud_dma_ptr) 164 return NULL; 165 166 drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]); 167 pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); 168 if (!pgtable->pmd_ptrs[pgd_idx]) 169 goto err_free_pud_dma_ptr; 170 171 drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]); 172 pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); 173 if (!pgtable->pte_ptrs[pgd_idx]) 174 goto err_free_pmd_ptrs; 175 176 pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr; 177 pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID; 178 179 return pud_dma_ptr; 180 181 err_free_pmd_ptrs: 182 kfree(pgtable->pmd_ptrs[pgd_idx]); 183 184 err_free_pud_dma_ptr: 185 ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma); 186 return NULL; 187 } 188 189 static u64* 190 ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx, 191 int pud_idx) 192 { 193 u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx]; 194 dma_addr_t pmd_dma; 195 196 if (pmd_dma_ptr) 197 return pmd_dma_ptr; 198 199 pmd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pmd_dma); 200 if (!pmd_dma_ptr) 201 return NULL; 202 203 drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]); 204 pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); 205 if (!pgtable->pte_ptrs[pgd_idx][pud_idx]) 206 goto err_free_pmd_dma_ptr; 207 208 pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr; 209 pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID; 210 211 return pmd_dma_ptr; 212 213 err_free_pmd_dma_ptr: 214 ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma); 215 return NULL; 216 } 217 218 static u64* 219 ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, 220 int pgd_idx, int pud_idx, int pmd_idx) 221 { 222 u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx]; 223 dma_addr_t pte_dma; 224 225 if (pte_dma_ptr) 226 return pte_dma_ptr; 227 228 pte_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pte_dma); 229 if (!pte_dma_ptr) 230 return NULL; 231 232 pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr; 233 pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID; 234 235 return pte_dma_ptr; 236 } 237 238 static int 239 ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 240 u64 vpu_addr, dma_addr_t dma_addr, u64 prot) 241 { 242 u64 *pte; 243 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); 244 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); 245 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); 246 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); 247 248 drm_WARN_ON(&vdev->drm, ctx->id == IVPU_RESERVED_CONTEXT_MMU_SSID); 249 250 /* Allocate PGD - first level page table if needed */ 251 if (!ivpu_mmu_ensure_pgd(vdev, &ctx->pgtable)) 252 return -ENOMEM; 253 254 /* Allocate PUD - second level page table if needed */ 255 if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx)) 256 return -ENOMEM; 257 258 /* Allocate PMD - third level page table if needed */ 259 if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx)) 260 return -ENOMEM; 261 262 /* Allocate PTE - fourth level page table if needed */ 263 pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx); 264 if (!pte) 265 return -ENOMEM; 266 267 /* Update PTE */ 268 pte[pte_idx] = dma_addr | prot; 269 270 return 0; 271 } 272 273 static int 274 ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, 275 dma_addr_t dma_addr, u64 prot) 276 { 277 size_t size = IVPU_MMU_CONT_PAGES_SIZE; 278 279 drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size)); 280 drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size)); 281 282 prot |= IVPU_MMU_ENTRY_FLAG_CONT; 283 284 while (size) { 285 int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); 286 287 if (ret) 288 return ret; 289 290 size -= IVPU_MMU_PAGE_SIZE; 291 vpu_addr += IVPU_MMU_PAGE_SIZE; 292 dma_addr += IVPU_MMU_PAGE_SIZE; 293 } 294 295 return 0; 296 } 297 298 static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr) 299 { 300 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); 301 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); 302 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); 303 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); 304 305 /* Update PTE with dummy physical address and clear flags */ 306 ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID; 307 } 308 309 static int 310 ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 311 u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot) 312 { 313 int map_size; 314 int ret; 315 316 while (size) { 317 if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE && 318 IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) { 319 ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot); 320 map_size = IVPU_MMU_CONT_PAGES_SIZE; 321 } else { 322 ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); 323 map_size = IVPU_MMU_PAGE_SIZE; 324 } 325 326 if (ret) 327 return ret; 328 329 vpu_addr += map_size; 330 dma_addr += map_size; 331 size -= map_size; 332 } 333 334 return 0; 335 } 336 337 static void ivpu_mmu_context_set_page_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 338 u64 vpu_addr) 339 { 340 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); 341 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); 342 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); 343 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); 344 345 ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] |= IVPU_MMU_ENTRY_FLAG_RO; 346 } 347 348 static void ivpu_mmu_context_split_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 349 u64 vpu_addr) 350 { 351 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); 352 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); 353 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); 354 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); 355 356 ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] &= ~IVPU_MMU_ENTRY_FLAG_CONT; 357 } 358 359 static void ivpu_mmu_context_split_64k_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 360 u64 vpu_addr) 361 { 362 u64 start = ALIGN_DOWN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE); 363 u64 end = ALIGN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE); 364 u64 offset = 0; 365 366 ivpu_dbg(vdev, MMU_MAP, "Split 64K page ctx: %u vpu_addr: 0x%llx\n", ctx->id, vpu_addr); 367 368 while (start + offset < end) { 369 ivpu_mmu_context_split_page(vdev, ctx, start + offset); 370 offset += IVPU_MMU_PAGE_SIZE; 371 } 372 } 373 374 int 375 ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, 376 size_t size) 377 { 378 u64 end = vpu_addr + size; 379 size_t size_left = size; 380 int ret; 381 382 if (size == 0) 383 return 0; 384 385 if (drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr | size, IVPU_MMU_PAGE_SIZE))) 386 return -EINVAL; 387 388 mutex_lock(&ctx->lock); 389 390 ivpu_dbg(vdev, MMU_MAP, "Set read-only pages ctx: %u vpu_addr: 0x%llx size: %lu\n", 391 ctx->id, vpu_addr, size); 392 393 if (!ivpu_disable_mmu_cont_pages) { 394 /* Split 64K contiguous page at the beginning if needed */ 395 if (!IS_ALIGNED(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE)) 396 ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr); 397 398 /* Split 64K contiguous page at the end if needed */ 399 if (!IS_ALIGNED(vpu_addr + size, IVPU_MMU_CONT_PAGES_SIZE)) 400 ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr + size); 401 } 402 403 while (size_left) { 404 if (vpu_addr < end) 405 ivpu_mmu_context_set_page_ro(vdev, ctx, vpu_addr); 406 407 vpu_addr += IVPU_MMU_PAGE_SIZE; 408 size_left -= IVPU_MMU_PAGE_SIZE; 409 } 410 411 /* Ensure page table modifications are flushed from wc buffers to memory */ 412 wmb(); 413 414 mutex_unlock(&ctx->lock); 415 ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); 416 if (ret) 417 ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); 418 419 return 0; 420 } 421 422 static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size) 423 { 424 while (size) { 425 ivpu_mmu_context_unmap_page(ctx, vpu_addr); 426 vpu_addr += IVPU_MMU_PAGE_SIZE; 427 size -= IVPU_MMU_PAGE_SIZE; 428 } 429 } 430 431 int 432 ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 433 u64 vpu_addr, struct sg_table *sgt, bool llc_coherent, bool read_only) 434 { 435 size_t start_vpu_addr = vpu_addr; 436 struct scatterlist *sg; 437 int ret; 438 u64 prot; 439 u64 i; 440 441 if (drm_WARN_ON(&vdev->drm, !ctx)) 442 return -EINVAL; 443 444 if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE)) 445 return -EINVAL; 446 447 if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK) 448 return -EINVAL; 449 450 prot = IVPU_MMU_ENTRY_MAPPED; 451 if (llc_coherent) 452 prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT; 453 if (read_only) 454 prot |= IVPU_MMU_ENTRY_FLAG_RO; 455 456 mutex_lock(&ctx->lock); 457 458 for_each_sgtable_dma_sg(sgt, sg, i) { 459 dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset; 460 size_t size = sg_dma_len(sg) + sg->offset; 461 462 ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n", 463 ctx->id, dma_addr, vpu_addr, size); 464 465 ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot); 466 if (ret) { 467 ivpu_err(vdev, "Failed to map context pages\n"); 468 goto err_unmap_pages; 469 } 470 vpu_addr += size; 471 } 472 473 if (!ctx->is_cd_valid) { 474 ret = ivpu_mmu_cd_set(vdev, ctx->id, &ctx->pgtable); 475 if (ret) { 476 ivpu_err(vdev, "Failed to set context descriptor for context %u: %d\n", 477 ctx->id, ret); 478 goto err_unmap_pages; 479 } 480 ctx->is_cd_valid = true; 481 } 482 483 /* Ensure page table modifications are flushed from wc buffers to memory */ 484 wmb(); 485 486 ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); 487 if (ret) { 488 ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); 489 goto err_unmap_pages; 490 } 491 492 mutex_unlock(&ctx->lock); 493 return 0; 494 495 err_unmap_pages: 496 ivpu_mmu_context_unmap_pages(ctx, start_vpu_addr, vpu_addr - start_vpu_addr); 497 mutex_unlock(&ctx->lock); 498 return ret; 499 } 500 501 void 502 ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 503 u64 vpu_addr, struct sg_table *sgt) 504 { 505 struct scatterlist *sg; 506 int ret; 507 u64 i; 508 509 if (drm_WARN_ON(&vdev->drm, !ctx)) 510 return; 511 512 mutex_lock(&ctx->lock); 513 514 for_each_sgtable_dma_sg(sgt, sg, i) { 515 dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset; 516 size_t size = sg_dma_len(sg) + sg->offset; 517 518 ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n", 519 ctx->id, dma_addr, vpu_addr, size); 520 521 ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size); 522 vpu_addr += size; 523 } 524 525 /* Ensure page table modifications are flushed from wc buffers to memory */ 526 wmb(); 527 528 mutex_unlock(&ctx->lock); 529 530 ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); 531 if (ret) 532 ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); 533 } 534 535 int 536 ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range, 537 u64 size, struct drm_mm_node *node) 538 { 539 int ret; 540 541 WARN_ON(!range); 542 543 mutex_lock(&ctx->lock); 544 if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) { 545 ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0, 546 range->start, range->end, DRM_MM_INSERT_BEST); 547 if (!ret) 548 goto unlock; 549 } 550 551 ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0, 552 range->start, range->end, DRM_MM_INSERT_BEST); 553 unlock: 554 mutex_unlock(&ctx->lock); 555 return ret; 556 } 557 558 void 559 ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node) 560 { 561 mutex_lock(&ctx->lock); 562 drm_mm_remove_node(node); 563 mutex_unlock(&ctx->lock); 564 } 565 566 void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id) 567 { 568 u64 start, end; 569 570 mutex_init(&ctx->lock); 571 572 if (!context_id) { 573 start = vdev->hw->ranges.runtime.start; 574 end = vdev->hw->ranges.shave.end; 575 } else { 576 start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start); 577 end = max_t(u64, vdev->hw->ranges.user.end, vdev->hw->ranges.dma.end); 578 } 579 580 drm_mm_init(&ctx->mm, start, end - start); 581 ctx->id = context_id; 582 } 583 584 void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) 585 { 586 if (ctx->is_cd_valid) { 587 ivpu_mmu_cd_clear(vdev, ctx->id); 588 ctx->is_cd_valid = false; 589 } 590 591 mutex_destroy(&ctx->lock); 592 ivpu_mmu_pgtables_free(vdev, &ctx->pgtable); 593 drm_mm_takedown(&ctx->mm); 594 } 595 596 void ivpu_mmu_global_context_init(struct ivpu_device *vdev) 597 { 598 ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID); 599 } 600 601 void ivpu_mmu_global_context_fini(struct ivpu_device *vdev) 602 { 603 ivpu_mmu_context_fini(vdev, &vdev->gctx); 604 } 605 606 int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev) 607 { 608 int ret; 609 610 ivpu_mmu_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID); 611 612 mutex_lock(&vdev->rctx.lock); 613 614 if (!ivpu_mmu_ensure_pgd(vdev, &vdev->rctx.pgtable)) { 615 ivpu_err(vdev, "Failed to allocate root page table for reserved context\n"); 616 ret = -ENOMEM; 617 goto err_ctx_fini; 618 } 619 620 ret = ivpu_mmu_cd_set(vdev, vdev->rctx.id, &vdev->rctx.pgtable); 621 if (ret) { 622 ivpu_err(vdev, "Failed to set context descriptor for reserved context\n"); 623 goto err_ctx_fini; 624 } 625 626 mutex_unlock(&vdev->rctx.lock); 627 return ret; 628 629 err_ctx_fini: 630 mutex_unlock(&vdev->rctx.lock); 631 ivpu_mmu_context_fini(vdev, &vdev->rctx); 632 return ret; 633 } 634 635 void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev) 636 { 637 ivpu_mmu_cd_clear(vdev, vdev->rctx.id); 638 ivpu_mmu_context_fini(vdev, &vdev->rctx); 639 } 640