1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_pt.h" 7 8 #include "regs/xe_gtt_defs.h" 9 #include "xe_bo.h" 10 #include "xe_device.h" 11 #include "xe_drm_client.h" 12 #include "xe_exec_queue.h" 13 #include "xe_gt.h" 14 #include "xe_gt_stats.h" 15 #include "xe_migrate.h" 16 #include "xe_page_reclaim.h" 17 #include "xe_pt_types.h" 18 #include "xe_pt_walk.h" 19 #include "xe_res_cursor.h" 20 #include "xe_sched_job.h" 21 #include "xe_svm.h" 22 #include "xe_sync.h" 23 #include "xe_tlb_inval_job.h" 24 #include "xe_trace.h" 25 #include "xe_ttm_stolen_mgr.h" 26 #include "xe_userptr.h" 27 #include "xe_vm.h" 28 29 struct xe_pt_dir { 30 struct xe_pt pt; 31 /** @children: Array of page-table child nodes */ 32 struct xe_ptw *children[XE_PDES]; 33 /** @staging: Array of page-table staging nodes */ 34 struct xe_ptw *staging[XE_PDES]; 35 }; 36 37 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM) 38 #define xe_pt_set_addr(__xe_pt, __addr) ((__xe_pt)->addr = (__addr)) 39 #define xe_pt_addr(__xe_pt) ((__xe_pt)->addr) 40 #else 41 #define xe_pt_set_addr(__xe_pt, __addr) 42 #define xe_pt_addr(__xe_pt) 0ull 43 #endif 44 45 static const u64 xe_normal_pt_shifts[] = {12, 21, 30, 39, 48}; 46 static const u64 xe_compact_pt_shifts[] = {16, 21, 30, 39, 48}; 47 48 #define XE_PT_HIGHEST_LEVEL (ARRAY_SIZE(xe_normal_pt_shifts) - 1) 49 50 static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt) 51 { 52 return container_of(pt, struct xe_pt_dir, pt); 53 } 54 55 static struct xe_pt * 56 xe_pt_entry_staging(struct xe_pt_dir *pt_dir, unsigned int index) 57 { 58 return container_of(pt_dir->staging[index], struct xe_pt, base); 59 } 60 61 static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm, 62 unsigned int level) 63 { 64 struct xe_device *xe = tile_to_xe(tile); 65 u16 pat_index = xe->pat.idx[XE_CACHE_WB]; 66 u8 id = tile->id; 67 68 if (!xe_vm_has_scratch(vm)) 69 return 0; 70 71 if (level > MAX_HUGEPTE_LEVEL) 72 return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo, 73 0); 74 75 return vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0) | 76 XE_PTE_NULL; 77 } 78 79 static void xe_pt_free(struct xe_pt *pt) 80 { 81 if (pt->level) 82 kfree(as_xe_pt_dir(pt)); 83 else 84 kfree(pt); 85 } 86 87 /** 88 * xe_pt_create() - Create a page-table. 89 * @vm: The vm to create for. 90 * @tile: The tile to create for. 91 * @level: The page-table level. 92 * @exec: The drm_exec object used to lock the vm. 93 * 94 * Allocate and initialize a single struct xe_pt metadata structure. Also 95 * create the corresponding page-table bo, but don't initialize it. If the 96 * level is grater than zero, then it's assumed to be a directory page- 97 * table and the directory structure is also allocated and initialized to 98 * NULL pointers. 99 * 100 * Return: A valid struct xe_pt pointer on success, Pointer error code on 101 * error. 102 */ 103 struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, 104 unsigned int level, struct drm_exec *exec) 105 { 106 struct xe_pt *pt; 107 struct xe_bo *bo; 108 u32 bo_flags; 109 int err; 110 111 if (level) { 112 struct xe_pt_dir *dir = kzalloc_obj(*dir); 113 114 pt = (dir) ? &dir->pt : NULL; 115 } else { 116 pt = kzalloc_obj(*pt); 117 } 118 if (!pt) 119 return ERR_PTR(-ENOMEM); 120 121 bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | 122 XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE | 123 XE_BO_FLAG_NO_RESV_EVICT | XE_BO_FLAG_PAGETABLE; 124 if (vm->xef) /* userspace */ 125 bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE | XE_BO_FLAG_FORCE_USER_VRAM; 126 127 pt->level = level; 128 129 drm_WARN_ON(&vm->xe->drm, IS_ERR_OR_NULL(exec)); 130 bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K, 131 ttm_bo_type_kernel, 132 bo_flags, exec); 133 if (IS_ERR(bo)) { 134 err = PTR_ERR(bo); 135 goto err_kfree; 136 } 137 pt->bo = bo; 138 pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL; 139 pt->base.staging = level ? as_xe_pt_dir(pt)->staging : NULL; 140 141 if (vm->xef) 142 xe_drm_client_add_bo(vm->xef->client, pt->bo); 143 xe_tile_assert(tile, level <= XE_VM_MAX_LEVEL); 144 145 return pt; 146 147 err_kfree: 148 xe_pt_free(pt); 149 return ERR_PTR(err); 150 } 151 ALLOW_ERROR_INJECTION(xe_pt_create, ERRNO); 152 153 /** 154 * xe_pt_populate_empty() - Populate a page-table bo with scratch- or zero 155 * entries. 156 * @tile: The tile the scratch pagetable of which to use. 157 * @vm: The vm we populate for. 158 * @pt: The pagetable the bo of which to initialize. 159 * 160 * Populate the page-table bo of @pt with entries pointing into the tile's 161 * scratch page-table tree if any. Otherwise populate with zeros. 162 */ 163 void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm, 164 struct xe_pt *pt) 165 { 166 struct iosys_map *map = &pt->bo->vmap; 167 u64 empty; 168 int i; 169 170 if (!xe_vm_has_scratch(vm)) { 171 /* 172 * FIXME: Some memory is allocated already allocated to zero? 173 * Find out which memory that is and avoid this memset... 174 */ 175 xe_map_memset(vm->xe, map, 0, 0, SZ_4K); 176 } else { 177 empty = __xe_pt_empty_pte(tile, vm, pt->level); 178 for (i = 0; i < XE_PDES; i++) 179 xe_pt_write(vm->xe, map, i, empty); 180 } 181 } 182 183 /** 184 * xe_pt_shift() - Return the ilog2 value of the size of the address range of 185 * a page-table at a certain level. 186 * @level: The level. 187 * 188 * Return: The ilog2 value of the size of the address range of a page-table 189 * at level @level. 190 */ 191 unsigned int xe_pt_shift(unsigned int level) 192 { 193 return XE_PTE_SHIFT + XE_PDE_SHIFT * level; 194 } 195 196 /** 197 * xe_pt_destroy() - Destroy a page-table tree. 198 * @pt: The root of the page-table tree to destroy. 199 * @flags: vm flags. Currently unused. 200 * @deferred: List head of lockless list for deferred putting. NULL for 201 * immediate putting. 202 * 203 * Puts the page-table bo, recursively calls xe_pt_destroy on all children 204 * and finally frees @pt. TODO: Can we remove the @flags argument? 205 */ 206 void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred) 207 { 208 int i; 209 210 if (!pt) 211 return; 212 213 XE_WARN_ON(!list_empty(&pt->bo->ttm.base.gpuva.list)); 214 xe_bo_unpin(pt->bo); 215 xe_bo_put_deferred(pt->bo, deferred); 216 217 if (pt->level > 0 && pt->num_live) { 218 struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt); 219 220 for (i = 0; i < XE_PDES; i++) { 221 if (xe_pt_entry_staging(pt_dir, i)) 222 xe_pt_destroy(xe_pt_entry_staging(pt_dir, i), flags, 223 deferred); 224 } 225 } 226 xe_pt_free(pt); 227 } 228 229 /** 230 * xe_pt_clear() - Clear a page-table. 231 * @xe: xe device. 232 * @pt: The page-table. 233 * 234 * Clears page-table by setting to zero. 235 */ 236 void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt) 237 { 238 struct iosys_map *map = &pt->bo->vmap; 239 240 xe_map_memset(xe, map, 0, 0, SZ_4K); 241 } 242 243 /** 244 * DOC: Pagetable building 245 * 246 * Below we use the term "page-table" for both page-directories, containing 247 * pointers to lower level page-directories or page-tables, and level 0 248 * page-tables that contain only page-table-entries pointing to memory pages. 249 * 250 * When inserting an address range in an already existing page-table tree 251 * there will typically be a set of page-tables that are shared with other 252 * address ranges, and a set that are private to this address range. 253 * The set of shared page-tables can be at most two per level, 254 * and those can't be updated immediately because the entries of those 255 * page-tables may still be in use by the gpu for other mappings. Therefore 256 * when inserting entries into those, we instead stage those insertions by 257 * adding insertion data into struct xe_vm_pgtable_update structures. This 258 * data, (subtrees for the cpu and page-table-entries for the gpu) is then 259 * added in a separate commit step. CPU-data is committed while still under the 260 * vm lock, the object lock and for userptr, the notifier lock in read mode. 261 * The GPU async data is committed either by the GPU or CPU after fulfilling 262 * relevant dependencies. 263 * For non-shared page-tables (and, in fact, for shared ones that aren't 264 * existing at the time of staging), we add the data in-place without the 265 * special update structures. This private part of the page-table tree will 266 * remain disconnected from the vm page-table tree until data is committed to 267 * the shared page tables of the vm tree in the commit phase. 268 */ 269 270 struct xe_pt_update { 271 /** @update: The update structure we're building for this parent. */ 272 struct xe_vm_pgtable_update *update; 273 /** @parent: The parent. Used to detect a parent change. */ 274 struct xe_pt *parent; 275 /** @preexisting: Whether the parent was pre-existing or allocated */ 276 bool preexisting; 277 }; 278 279 /** 280 * struct xe_pt_stage_bind_walk - Walk state for the stage_bind walk. 281 */ 282 struct xe_pt_stage_bind_walk { 283 /** @base: The base class. */ 284 struct xe_pt_walk base; 285 286 /* Input parameters for the walk */ 287 /** @vm: The vm we're building for. */ 288 struct xe_vm *vm; 289 /** @tile: The tile we're building for. */ 290 struct xe_tile *tile; 291 /** @default_vram_pte: PTE flag only template for VRAM. No address is associated */ 292 u64 default_vram_pte; 293 /** @default_system_pte: PTE flag only template for System. No address is associated */ 294 u64 default_system_pte; 295 /** @dma_offset: DMA offset to add to the PTE. */ 296 u64 dma_offset; 297 /** 298 * @needs_64K: This address range enforces 64K alignment and 299 * granularity on VRAM. 300 */ 301 bool needs_64K; 302 /** @clear_pt: clear page table entries during the bind walk */ 303 bool clear_pt; 304 /** 305 * @vma: VMA being mapped 306 */ 307 struct xe_vma *vma; 308 309 /* Also input, but is updated during the walk*/ 310 /** @curs: The DMA address cursor. */ 311 struct xe_res_cursor *curs; 312 /** @va_curs_start: The Virtual address corresponding to @curs->start */ 313 u64 va_curs_start; 314 315 /* Output */ 316 /** @wupd: Walk output data for page-table updates. */ 317 struct xe_walk_update { 318 /** @wupd.entries: Caller provided storage. */ 319 struct xe_vm_pgtable_update *entries; 320 /** @wupd.num_used_entries: Number of update @entries used. */ 321 unsigned int num_used_entries; 322 /** @wupd.updates: Tracks the update entry at a given level */ 323 struct xe_pt_update updates[XE_VM_MAX_LEVEL + 1]; 324 } wupd; 325 326 /* Walk state */ 327 /** 328 * @l0_end_addr: The end address of the current l0 leaf. Used for 329 * 64K granularity detection. 330 */ 331 u64 l0_end_addr; 332 /** @addr_64K: The start address of the current 64K chunk. */ 333 u64 addr_64K; 334 /** @found_64K: Whether @add_64K actually points to a 64K chunk. */ 335 bool found_64K; 336 }; 337 338 static int 339 xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent, 340 pgoff_t offset, bool alloc_entries) 341 { 342 struct xe_pt_update *upd = &wupd->updates[parent->level]; 343 struct xe_vm_pgtable_update *entry; 344 345 /* 346 * For *each level*, we could only have one active 347 * struct xt_pt_update at any one time. Once we move on to a 348 * new parent and page-directory, the old one is complete, and 349 * updates are either already stored in the build tree or in 350 * @wupd->entries 351 */ 352 if (likely(upd->parent == parent)) 353 return 0; 354 355 upd->parent = parent; 356 upd->preexisting = true; 357 358 if (wupd->num_used_entries == XE_VM_MAX_LEVEL * 2 + 1) 359 return -EINVAL; 360 361 entry = wupd->entries + wupd->num_used_entries++; 362 upd->update = entry; 363 entry->ofs = offset; 364 entry->pt_bo = parent->bo; 365 entry->pt = parent; 366 entry->flags = 0; 367 entry->qwords = 0; 368 entry->pt_bo->update_index = -1; 369 370 if (alloc_entries) { 371 entry->pt_entries = kmalloc_objs(*entry->pt_entries, XE_PDES); 372 if (!entry->pt_entries) 373 return -ENOMEM; 374 } 375 376 return 0; 377 } 378 379 /* 380 * NOTE: This is a very frequently called function so we allow ourselves 381 * to annotate (using branch prediction hints) the fastpath of updating a 382 * non-pre-existing pagetable with leaf ptes. 383 */ 384 static int 385 xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent, 386 pgoff_t offset, struct xe_pt *xe_child, u64 pte) 387 { 388 struct xe_pt_update *upd = &xe_walk->wupd.updates[parent->level]; 389 struct xe_pt_update *child_upd = xe_child ? 390 &xe_walk->wupd.updates[xe_child->level] : NULL; 391 int ret; 392 393 ret = xe_pt_new_shared(&xe_walk->wupd, parent, offset, true); 394 if (unlikely(ret)) 395 return ret; 396 397 /* 398 * Register this new pagetable so that it won't be recognized as 399 * a shared pagetable by a subsequent insertion. 400 */ 401 if (unlikely(child_upd)) { 402 child_upd->update = NULL; 403 child_upd->parent = xe_child; 404 child_upd->preexisting = false; 405 } 406 407 if (likely(!upd->preexisting)) { 408 /* Continue building a non-connected subtree. */ 409 struct iosys_map *map = &parent->bo->vmap; 410 411 if (unlikely(xe_child)) { 412 parent->base.children[offset] = &xe_child->base; 413 parent->base.staging[offset] = &xe_child->base; 414 } 415 416 xe_pt_write(xe_walk->vm->xe, map, offset, pte); 417 parent->num_live++; 418 } else { 419 /* Shared pt. Stage update. */ 420 unsigned int idx; 421 struct xe_vm_pgtable_update *entry = upd->update; 422 423 idx = offset - entry->ofs; 424 entry->pt_entries[idx].pt = xe_child; 425 entry->pt_entries[idx].pte = pte; 426 entry->qwords++; 427 } 428 429 return 0; 430 } 431 432 static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level, 433 struct xe_pt_stage_bind_walk *xe_walk) 434 { 435 u64 size, dma; 436 437 if (level > MAX_HUGEPTE_LEVEL) 438 return false; 439 440 /* Does the virtual range requested cover a huge pte? */ 441 if (!xe_pt_covers(addr, next, level, &xe_walk->base)) 442 return false; 443 444 /* Does the DMA segment cover the whole pte? */ 445 if (next - xe_walk->va_curs_start > xe_walk->curs->size) 446 return false; 447 448 /* null VMA's do not have dma addresses */ 449 if (xe_vma_is_null(xe_walk->vma)) 450 return true; 451 452 /* if we are clearing page table, no dma addresses*/ 453 if (xe_walk->clear_pt) 454 return true; 455 456 /* Is the DMA address huge PTE size aligned? */ 457 size = next - addr; 458 dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs); 459 460 return IS_ALIGNED(dma, size); 461 } 462 463 /* 464 * Scan the requested mapping to check whether it can be done entirely 465 * with 64K PTEs. 466 */ 467 static bool 468 xe_pt_scan_64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk) 469 { 470 struct xe_res_cursor curs = *xe_walk->curs; 471 472 if (!IS_ALIGNED(addr, SZ_64K)) 473 return false; 474 475 if (next > xe_walk->l0_end_addr) 476 return false; 477 478 /* null VMA's do not have dma addresses */ 479 if (xe_vma_is_null(xe_walk->vma)) 480 return true; 481 482 xe_res_next(&curs, addr - xe_walk->va_curs_start); 483 for (; addr < next; addr += SZ_64K) { 484 if (!IS_ALIGNED(xe_res_dma(&curs), SZ_64K) || curs.size < SZ_64K) 485 return false; 486 487 xe_res_next(&curs, SZ_64K); 488 } 489 490 return addr == next; 491 } 492 493 /* 494 * For non-compact "normal" 4K level-0 pagetables, we want to try to group 495 * addresses together in 64K-contigous regions to add a 64K TLB hint for the 496 * device to the PTE. 497 * This function determines whether the address is part of such a 498 * segment. For VRAM in normal pagetables, this is strictly necessary on 499 * some devices. 500 */ 501 static bool 502 xe_pt_is_pte_ps64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk) 503 { 504 /* Address is within an already found 64k region */ 505 if (xe_walk->found_64K && addr - xe_walk->addr_64K < SZ_64K) 506 return true; 507 508 xe_walk->found_64K = xe_pt_scan_64K(addr, addr + SZ_64K, xe_walk); 509 xe_walk->addr_64K = addr; 510 511 return xe_walk->found_64K; 512 } 513 514 static int 515 xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset, 516 unsigned int level, u64 addr, u64 next, 517 struct xe_ptw **child, 518 enum page_walk_action *action, 519 struct xe_pt_walk *walk) 520 { 521 struct xe_pt_stage_bind_walk *xe_walk = 522 container_of(walk, typeof(*xe_walk), base); 523 u16 pat_index = xe_walk->vma->attr.pat_index; 524 struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base); 525 struct xe_vm *vm = xe_walk->vm; 526 struct xe_pt *xe_child; 527 bool covers; 528 int ret = 0; 529 u64 pte; 530 531 /* Is this a leaf entry ?*/ 532 if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) { 533 struct xe_res_cursor *curs = xe_walk->curs; 534 bool is_null = xe_vma_is_null(xe_walk->vma); 535 bool is_vram = is_null ? false : xe_res_is_vram(curs); 536 537 XE_WARN_ON(xe_walk->va_curs_start != addr); 538 539 if (xe_walk->clear_pt) { 540 pte = 0; 541 } else { 542 pte = vm->pt_ops->pte_encode_vma(is_null ? 0 : 543 xe_res_dma(curs) + 544 xe_walk->dma_offset, 545 xe_walk->vma, 546 pat_index, level); 547 if (!is_null) 548 pte |= is_vram ? xe_walk->default_vram_pte : 549 xe_walk->default_system_pte; 550 551 /* 552 * Set the XE_PTE_PS64 hint if possible, otherwise if 553 * this device *requires* 64K PTE size for VRAM, fail. 554 */ 555 if (level == 0 && !xe_parent->is_compact) { 556 if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) { 557 xe_walk->vma->gpuva.flags |= 558 XE_VMA_PTE_64K; 559 pte |= XE_PTE_PS64; 560 } else if (XE_WARN_ON(xe_walk->needs_64K && 561 is_vram)) { 562 return -EINVAL; 563 } 564 } 565 } 566 567 ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte); 568 if (unlikely(ret)) 569 return ret; 570 571 if (!is_null && !xe_walk->clear_pt) 572 xe_res_next(curs, next - addr); 573 xe_walk->va_curs_start = next; 574 xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level); 575 *action = ACTION_CONTINUE; 576 577 return ret; 578 } 579 580 /* 581 * Descending to lower level. Determine if we need to allocate a 582 * new page table or -directory, which we do if there is no 583 * previous one or there is one we can completely replace. 584 */ 585 if (level == 1) { 586 walk->shifts = xe_normal_pt_shifts; 587 xe_walk->l0_end_addr = next; 588 } 589 590 covers = xe_pt_covers(addr, next, level, &xe_walk->base); 591 if (covers || !*child) { 592 u64 flags = 0; 593 594 xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1, 595 xe_vm_validation_exec(vm)); 596 if (IS_ERR(xe_child)) 597 return PTR_ERR(xe_child); 598 599 xe_pt_set_addr(xe_child, 600 round_down(addr, 1ull << walk->shifts[level])); 601 602 if (!covers) 603 xe_pt_populate_empty(xe_walk->tile, xe_walk->vm, xe_child); 604 605 *child = &xe_child->base; 606 607 /* 608 * Prefer the compact pagetable layout for L0 if possible. Only 609 * possible if VMA covers entire 2MB region as compact 64k and 610 * 4k pages cannot be mixed within a 2MB region. 611 * TODO: Suballocate the pt bo to avoid wasting a lot of 612 * memory. 613 */ 614 if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 && 615 covers && xe_pt_scan_64K(addr, next, xe_walk)) { 616 walk->shifts = xe_compact_pt_shifts; 617 xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT; 618 flags |= XE_PDE_64K; 619 xe_child->is_compact = true; 620 } 621 622 pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0) | flags; 623 ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child, 624 pte); 625 } 626 627 *action = ACTION_SUBTREE; 628 return ret; 629 } 630 631 static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = { 632 .pt_entry = xe_pt_stage_bind_entry, 633 }; 634 635 /* 636 * Default atomic expectations for different allocation scenarios are as follows: 637 * 638 * 1. Traditional API: When the VM is not in LR mode: 639 * - Device atomics are expected to function with all allocations. 640 * 641 * 2. Compute/SVM API: When the VM is in LR mode: 642 * - Device atomics are the default behavior when the bo is placed in a single region. 643 * - In all other cases device atomics will be disabled with AE=0 until an application 644 * request differently using a ioctl like madvise. 645 */ 646 static bool xe_atomic_for_vram(struct xe_vm *vm, struct xe_vma *vma) 647 { 648 if (vma->attr.atomic_access == DRM_XE_ATOMIC_CPU) 649 return false; 650 651 return true; 652 } 653 654 static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_vma *vma) 655 { 656 struct xe_device *xe = vm->xe; 657 struct xe_bo *bo = xe_vma_bo(vma); 658 659 if (!xe->info.has_device_atomics_on_smem || 660 vma->attr.atomic_access == DRM_XE_ATOMIC_CPU) 661 return false; 662 663 if (vma->attr.atomic_access == DRM_XE_ATOMIC_DEVICE) 664 return true; 665 666 /* 667 * If a SMEM+LMEM allocation is backed by SMEM, a device 668 * atomics will cause a gpu page fault and which then 669 * gets migrated to LMEM, bind such allocations with 670 * device atomics enabled. 671 */ 672 return (!IS_DGFX(xe) || (!xe_vm_in_lr_mode(vm) || 673 (bo && xe_bo_has_single_placement(bo)))); 674 } 675 676 /** 677 * xe_pt_stage_bind() - Build a disconnected page-table tree for a given address 678 * range. 679 * @tile: The tile we're building for. 680 * @vma: The vma indicating the address range. 681 * @range: The range indicating the address range. 682 * @entries: Storage for the update entries used for connecting the tree to 683 * the main tree at commit time. 684 * @num_entries: On output contains the number of @entries used. 685 * @clear_pt: Clear the page table entries. 686 * 687 * This function builds a disconnected page-table tree for a given address 688 * range. The tree is connected to the main vm tree for the gpu using 689 * xe_migrate_update_pgtables() and for the cpu using xe_pt_commit_bind(). 690 * The function builds xe_vm_pgtable_update structures for already existing 691 * shared page-tables, and non-existing shared and non-shared page-tables 692 * are built and populated directly. 693 * 694 * Return 0 on success, negative error code on error. 695 */ 696 static int 697 xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, 698 struct xe_svm_range *range, 699 struct xe_vm_pgtable_update *entries, 700 u32 *num_entries, bool clear_pt) 701 { 702 struct xe_device *xe = tile_to_xe(tile); 703 struct xe_bo *bo = xe_vma_bo(vma); 704 struct xe_res_cursor curs; 705 struct xe_vm *vm = xe_vma_vm(vma); 706 struct xe_pt_stage_bind_walk xe_walk = { 707 .base = { 708 .ops = &xe_pt_stage_bind_ops, 709 .shifts = xe_normal_pt_shifts, 710 .max_level = XE_PT_HIGHEST_LEVEL, 711 .staging = true, 712 }, 713 .vm = vm, 714 .tile = tile, 715 .curs = &curs, 716 .va_curs_start = range ? xe_svm_range_start(range) : 717 xe_vma_start(vma), 718 .vma = vma, 719 .wupd.entries = entries, 720 .clear_pt = clear_pt, 721 }; 722 struct xe_pt *pt = vm->pt_root[tile->id]; 723 int ret; 724 725 if (range) { 726 /* Move this entire thing to xe_svm.c? */ 727 xe_svm_notifier_lock(vm); 728 if (!xe_svm_range_pages_valid(range)) { 729 xe_svm_range_debug(range, "BIND PREPARE - RETRY"); 730 xe_svm_notifier_unlock(vm); 731 return -EAGAIN; 732 } 733 if (xe_svm_range_has_dma_mapping(range)) { 734 xe_res_first_dma(range->base.pages.dma_addr, 0, 735 xe_svm_range_size(range), 736 &curs); 737 xe_svm_range_debug(range, "BIND PREPARE - MIXED"); 738 } else { 739 xe_assert(xe, false); 740 } 741 /* 742 * Note, when unlocking the resource cursor dma addresses may become 743 * stale, but the bind will be aborted anyway at commit time. 744 */ 745 xe_svm_notifier_unlock(vm); 746 } 747 748 xe_walk.needs_64K = (vm->flags & XE_VM_FLAG_64K); 749 if (clear_pt) 750 goto walk_pt; 751 752 if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) { 753 xe_walk.default_vram_pte = xe_atomic_for_vram(vm, vma) ? XE_USM_PPGTT_PTE_AE : 0; 754 xe_walk.default_system_pte = xe_atomic_for_system(vm, vma) ? 755 XE_USM_PPGTT_PTE_AE : 0; 756 } 757 758 xe_walk.default_vram_pte |= XE_PPGTT_PTE_DM; 759 xe_walk.dma_offset = bo ? vram_region_gpu_offset(bo->ttm.resource) : 0; 760 if (!range) 761 xe_bo_assert_held(bo); 762 763 if (!xe_vma_is_null(vma) && !range) { 764 if (xe_vma_is_userptr(vma)) 765 xe_res_first_dma(to_userptr_vma(vma)->userptr.pages.dma_addr, 0, 766 xe_vma_size(vma), &curs); 767 else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo)) 768 xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma), 769 xe_vma_size(vma), &curs); 770 else 771 xe_res_first_sg(xe_bo_sg(bo), xe_vma_bo_offset(vma), 772 xe_vma_size(vma), &curs); 773 } else if (!range) { 774 curs.size = xe_vma_size(vma); 775 } 776 777 walk_pt: 778 ret = xe_pt_walk_range(&pt->base, pt->level, 779 range ? xe_svm_range_start(range) : xe_vma_start(vma), 780 range ? xe_svm_range_end(range) : xe_vma_end(vma), 781 &xe_walk.base); 782 783 *num_entries = xe_walk.wupd.num_used_entries; 784 return ret; 785 } 786 787 /** 788 * xe_pt_nonshared_offsets() - Determine the non-shared entry offsets of a 789 * shared pagetable. 790 * @addr: The start address within the non-shared pagetable. 791 * @end: The end address within the non-shared pagetable. 792 * @level: The level of the non-shared pagetable. 793 * @walk: Walk info. The function adjusts the walk action. 794 * @action: next action to perform (see enum page_walk_action) 795 * @offset: Ignored on input, First non-shared entry on output. 796 * @end_offset: Ignored on input, Last non-shared entry + 1 on output. 797 * 798 * A non-shared page-table has some entries that belong to the address range 799 * and others that don't. This function determines the entries that belong 800 * fully to the address range. Depending on level, some entries may 801 * partially belong to the address range (that can't happen at level 0). 802 * The function detects that and adjust those offsets to not include those 803 * partial entries. Iff it does detect partial entries, we know that there must 804 * be shared page tables also at lower levels, so it adjusts the walk action 805 * accordingly. 806 * 807 * Return: true if there were non-shared entries, false otherwise. 808 */ 809 static bool xe_pt_nonshared_offsets(u64 addr, u64 end, unsigned int level, 810 struct xe_pt_walk *walk, 811 enum page_walk_action *action, 812 pgoff_t *offset, pgoff_t *end_offset) 813 { 814 u64 size = 1ull << walk->shifts[level]; 815 816 *offset = xe_pt_offset(addr, level, walk); 817 *end_offset = xe_pt_num_entries(addr, end, level, walk) + *offset; 818 819 if (!level) 820 return true; 821 822 /* 823 * If addr or next are not size aligned, there are shared pts at lower 824 * level, so in that case traverse down the subtree 825 */ 826 *action = ACTION_CONTINUE; 827 if (!IS_ALIGNED(addr, size)) { 828 *action = ACTION_SUBTREE; 829 (*offset)++; 830 } 831 832 if (!IS_ALIGNED(end, size)) { 833 *action = ACTION_SUBTREE; 834 (*end_offset)--; 835 } 836 837 return *end_offset > *offset; 838 } 839 840 struct xe_pt_zap_ptes_walk { 841 /** @base: The walk base-class */ 842 struct xe_pt_walk base; 843 844 /* Input parameters for the walk */ 845 /** @tile: The tile we're building for */ 846 struct xe_tile *tile; 847 848 /* Output */ 849 /** @needs_invalidate: Whether we need to invalidate TLB*/ 850 bool needs_invalidate; 851 }; 852 853 static int xe_pt_zap_ptes_entry(struct xe_ptw *parent, pgoff_t offset, 854 unsigned int level, u64 addr, u64 next, 855 struct xe_ptw **child, 856 enum page_walk_action *action, 857 struct xe_pt_walk *walk) 858 { 859 struct xe_pt_zap_ptes_walk *xe_walk = 860 container_of(walk, typeof(*xe_walk), base); 861 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base); 862 pgoff_t end_offset; 863 864 XE_WARN_ON(!*child); 865 XE_WARN_ON(!level); 866 867 /* 868 * Note that we're called from an entry callback, and we're dealing 869 * with the child of that entry rather than the parent, so need to 870 * adjust level down. 871 */ 872 if (xe_pt_nonshared_offsets(addr, next, --level, walk, action, &offset, 873 &end_offset)) { 874 xe_map_memset(tile_to_xe(xe_walk->tile), &xe_child->bo->vmap, 875 offset * sizeof(u64), 0, 876 (end_offset - offset) * sizeof(u64)); 877 xe_walk->needs_invalidate = true; 878 } 879 880 return 0; 881 } 882 883 static const struct xe_pt_walk_ops xe_pt_zap_ptes_ops = { 884 .pt_entry = xe_pt_zap_ptes_entry, 885 }; 886 887 /** 888 * xe_pt_zap_ptes() - Zap (zero) gpu ptes of an address range 889 * @tile: The tile we're zapping for. 890 * @vma: GPU VMA detailing address range. 891 * 892 * Eviction and Userptr invalidation needs to be able to zap the 893 * gpu ptes of a given address range in pagefaulting mode. 894 * In order to be able to do that, that function needs access to the shared 895 * page-table entrieaso it can either clear the leaf PTEs or 896 * clear the pointers to lower-level page-tables. The caller is required 897 * to hold the necessary locks to ensure neither the page-table connectivity 898 * nor the page-table entries of the range is updated from under us. 899 * 900 * Return: Whether ptes were actually updated and a TLB invalidation is 901 * required. 902 */ 903 bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma) 904 { 905 struct xe_pt_zap_ptes_walk xe_walk = { 906 .base = { 907 .ops = &xe_pt_zap_ptes_ops, 908 .shifts = xe_normal_pt_shifts, 909 .max_level = XE_PT_HIGHEST_LEVEL, 910 }, 911 .tile = tile, 912 }; 913 struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id]; 914 u8 pt_mask = (vma->tile_present & ~vma->tile_invalidated); 915 916 if (xe_vma_bo(vma)) 917 xe_bo_assert_held(xe_vma_bo(vma)); 918 else if (xe_vma_is_userptr(vma)) 919 lockdep_assert_held(&xe_vma_vm(vma)->svm.gpusvm.notifier_lock); 920 921 if (!(pt_mask & BIT(tile->id))) 922 return false; 923 924 (void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma), 925 xe_vma_end(vma), &xe_walk.base); 926 927 return xe_walk.needs_invalidate; 928 } 929 930 /** 931 * xe_pt_zap_ptes_range() - Zap (zero) gpu ptes of a SVM range 932 * @tile: The tile we're zapping for. 933 * @vm: The VM we're zapping for. 934 * @range: The SVM range we're zapping for. 935 * 936 * SVM invalidation needs to be able to zap the gpu ptes of a given address 937 * range. In order to be able to do that, that function needs access to the 938 * shared page-table entries so it can either clear the leaf PTEs or 939 * clear the pointers to lower-level page-tables. The caller is required 940 * to hold the SVM notifier lock. 941 * 942 * Return: Whether ptes were actually updated and a TLB invalidation is 943 * required. 944 */ 945 bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm, 946 struct xe_svm_range *range) 947 { 948 struct xe_pt_zap_ptes_walk xe_walk = { 949 .base = { 950 .ops = &xe_pt_zap_ptes_ops, 951 .shifts = xe_normal_pt_shifts, 952 .max_level = XE_PT_HIGHEST_LEVEL, 953 }, 954 .tile = tile, 955 }; 956 struct xe_pt *pt = vm->pt_root[tile->id]; 957 u8 pt_mask = (range->tile_present & ~range->tile_invalidated); 958 959 /* 960 * Locking rules: 961 * 962 * - notifier_lock (write): full protection against page table changes 963 * and MMU notifier invalidations. 964 * 965 * - notifier_lock (read) + vm_lock (write): combined protection against 966 * invalidations and concurrent page table modifications. (e.g., madvise) 967 * 968 */ 969 lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 0) || 970 (lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) && 971 lockdep_is_held_type(&vm->lock, 0))); 972 973 if (!(pt_mask & BIT(tile->id))) 974 return false; 975 976 (void)xe_pt_walk_shared(&pt->base, pt->level, xe_svm_range_start(range), 977 xe_svm_range_end(range), &xe_walk.base); 978 979 return xe_walk.needs_invalidate; 980 } 981 982 static void 983 xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *tile, 984 struct iosys_map *map, void *data, 985 u32 qword_ofs, u32 num_qwords, 986 const struct xe_vm_pgtable_update *update) 987 { 988 struct xe_pt_entry *ptes = update->pt_entries; 989 u64 *ptr = data; 990 u32 i; 991 992 for (i = 0; i < num_qwords; i++) { 993 if (map) 994 xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) * 995 sizeof(u64), u64, ptes[i].pte); 996 else 997 ptr[i] = ptes[i].pte; 998 } 999 } 1000 1001 static void xe_pt_cancel_bind(struct xe_vma *vma, 1002 struct xe_vm_pgtable_update *entries, 1003 u32 num_entries) 1004 { 1005 u32 i, j; 1006 1007 for (i = 0; i < num_entries; i++) { 1008 struct xe_pt *pt = entries[i].pt; 1009 1010 if (!pt) 1011 continue; 1012 1013 if (pt->level) { 1014 for (j = 0; j < entries[i].qwords; j++) 1015 xe_pt_destroy(entries[i].pt_entries[j].pt, 1016 xe_vma_vm(vma)->flags, NULL); 1017 } 1018 1019 kfree(entries[i].pt_entries); 1020 entries[i].pt_entries = NULL; 1021 entries[i].qwords = 0; 1022 } 1023 } 1024 1025 #define XE_INVALID_VMA ((struct xe_vma *)(0xdeaddeadull)) 1026 1027 static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma) 1028 { 1029 struct xe_vm *vm; 1030 1031 if (vma == XE_INVALID_VMA) 1032 return; 1033 1034 vm = xe_vma_vm(vma); 1035 lockdep_assert_held(&vm->lock); 1036 1037 if (!xe_vma_has_no_bo(vma)) 1038 dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv); 1039 1040 xe_vm_assert_held(vm); 1041 } 1042 1043 static void xe_pt_commit_locks_assert(struct xe_vma *vma) 1044 { 1045 struct xe_vm *vm; 1046 1047 if (vma == XE_INVALID_VMA) 1048 return; 1049 1050 vm = xe_vma_vm(vma); 1051 xe_pt_commit_prepare_locks_assert(vma); 1052 1053 if (xe_vma_is_userptr(vma)) 1054 xe_svm_assert_held_read(vm); 1055 } 1056 1057 static void xe_pt_commit(struct xe_vma *vma, 1058 struct xe_vm_pgtable_update *entries, 1059 u32 num_entries, struct llist_head *deferred) 1060 { 1061 u32 i, j; 1062 1063 xe_pt_commit_locks_assert(vma); 1064 1065 for (i = 0; i < num_entries; i++) { 1066 struct xe_pt *pt = entries[i].pt; 1067 struct xe_pt_dir *pt_dir; 1068 1069 if (!pt->level) 1070 continue; 1071 1072 pt_dir = as_xe_pt_dir(pt); 1073 for (j = 0; j < entries[i].qwords; j++) { 1074 struct xe_pt *oldpte = entries[i].pt_entries[j].pt; 1075 int j_ = j + entries[i].ofs; 1076 1077 pt_dir->children[j_] = pt_dir->staging[j_]; 1078 xe_pt_destroy(oldpte, (vma == XE_INVALID_VMA) ? 0 : 1079 xe_vma_vm(vma)->flags, deferred); 1080 } 1081 } 1082 } 1083 1084 static void xe_pt_abort_bind(struct xe_vma *vma, 1085 struct xe_vm_pgtable_update *entries, 1086 u32 num_entries, bool rebind) 1087 { 1088 int i, j; 1089 1090 xe_pt_commit_prepare_locks_assert(vma); 1091 1092 for (i = num_entries - 1; i >= 0; --i) { 1093 struct xe_pt *pt = entries[i].pt; 1094 struct xe_pt_dir *pt_dir; 1095 1096 if (!rebind) 1097 pt->num_live -= entries[i].qwords; 1098 1099 if (!pt->level) 1100 continue; 1101 1102 pt_dir = as_xe_pt_dir(pt); 1103 for (j = 0; j < entries[i].qwords; j++) { 1104 u32 j_ = j + entries[i].ofs; 1105 struct xe_pt *newpte = xe_pt_entry_staging(pt_dir, j_); 1106 struct xe_pt *oldpte = entries[i].pt_entries[j].pt; 1107 1108 pt_dir->staging[j_] = oldpte ? &oldpte->base : 0; 1109 xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL); 1110 } 1111 } 1112 } 1113 1114 static void xe_pt_commit_prepare_bind(struct xe_vma *vma, 1115 struct xe_vm_pgtable_update *entries, 1116 u32 num_entries, bool rebind) 1117 { 1118 u32 i, j; 1119 1120 xe_pt_commit_prepare_locks_assert(vma); 1121 1122 for (i = 0; i < num_entries; i++) { 1123 struct xe_pt *pt = entries[i].pt; 1124 struct xe_pt_dir *pt_dir; 1125 1126 if (!rebind) 1127 pt->num_live += entries[i].qwords; 1128 1129 if (!pt->level) 1130 continue; 1131 1132 pt_dir = as_xe_pt_dir(pt); 1133 for (j = 0; j < entries[i].qwords; j++) { 1134 u32 j_ = j + entries[i].ofs; 1135 struct xe_pt *newpte = entries[i].pt_entries[j].pt; 1136 struct xe_pt *oldpte = NULL; 1137 1138 if (xe_pt_entry_staging(pt_dir, j_)) 1139 oldpte = xe_pt_entry_staging(pt_dir, j_); 1140 1141 pt_dir->staging[j_] = &newpte->base; 1142 entries[i].pt_entries[j].pt = oldpte; 1143 } 1144 } 1145 } 1146 1147 static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries, 1148 u32 num_entries) 1149 { 1150 u32 i; 1151 1152 for (i = 0; i < num_entries; i++) 1153 kfree(entries[i].pt_entries); 1154 } 1155 1156 static int 1157 xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma, 1158 struct xe_svm_range *range, 1159 struct xe_vm_pgtable_update *entries, 1160 u32 *num_entries, bool invalidate_on_bind) 1161 { 1162 int err; 1163 1164 *num_entries = 0; 1165 err = xe_pt_stage_bind(tile, vma, range, entries, num_entries, 1166 invalidate_on_bind); 1167 if (!err) 1168 xe_tile_assert(tile, *num_entries); 1169 1170 return err; 1171 } 1172 1173 static void xe_vm_dbg_print_entries(struct xe_device *xe, 1174 const struct xe_vm_pgtable_update *entries, 1175 unsigned int num_entries, bool bind) 1176 #if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)) 1177 { 1178 unsigned int i; 1179 1180 vm_dbg(&xe->drm, "%s: %u entries to update\n", bind ? "bind" : "unbind", 1181 num_entries); 1182 for (i = 0; i < num_entries; i++) { 1183 const struct xe_vm_pgtable_update *entry = &entries[i]; 1184 struct xe_pt *xe_pt = entry->pt; 1185 u64 page_size = 1ull << xe_pt_shift(xe_pt->level); 1186 u64 end; 1187 u64 start; 1188 1189 xe_assert(xe, !entry->pt->is_compact); 1190 start = entry->ofs * page_size; 1191 end = start + page_size * entry->qwords; 1192 vm_dbg(&xe->drm, 1193 "\t%u: Update level %u at (%u + %u) [%llx...%llx) f:%x\n", 1194 i, xe_pt->level, entry->ofs, entry->qwords, 1195 xe_pt_addr(xe_pt) + start, xe_pt_addr(xe_pt) + end, 0); 1196 } 1197 } 1198 #else 1199 {} 1200 #endif 1201 1202 static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs) 1203 { 1204 int i; 1205 1206 for (i = 0; i < num_syncs; i++) { 1207 struct dma_fence *fence = syncs[i].fence; 1208 1209 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1210 &fence->flags)) 1211 return false; 1212 } 1213 1214 return true; 1215 } 1216 1217 static int job_test_add_deps(struct xe_sched_job *job, 1218 struct dma_resv *resv, 1219 enum dma_resv_usage usage) 1220 { 1221 if (!job) { 1222 if (!dma_resv_test_signaled(resv, usage)) 1223 return -ETIME; 1224 1225 return 0; 1226 } 1227 1228 return xe_sched_job_add_deps(job, resv, usage); 1229 } 1230 1231 static int vma_add_deps(struct xe_vma *vma, struct xe_sched_job *job) 1232 { 1233 struct xe_bo *bo = xe_vma_bo(vma); 1234 1235 xe_bo_assert_held(bo); 1236 1237 if (bo && !bo->vm) 1238 return job_test_add_deps(job, bo->ttm.base.resv, 1239 DMA_RESV_USAGE_KERNEL); 1240 1241 return 0; 1242 } 1243 1244 static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op, 1245 struct xe_sched_job *job) 1246 { 1247 int err = 0; 1248 1249 /* 1250 * No need to check for is_cpu_addr_mirror here as vma_add_deps is a 1251 * NOP if VMA is_cpu_addr_mirror 1252 */ 1253 1254 switch (op->base.op) { 1255 case DRM_GPUVA_OP_MAP: 1256 if (!op->map.immediate && xe_vm_in_fault_mode(vm)) 1257 break; 1258 1259 err = vma_add_deps(op->map.vma, job); 1260 break; 1261 case DRM_GPUVA_OP_REMAP: 1262 if (op->remap.prev) 1263 err = vma_add_deps(op->remap.prev, job); 1264 if (!err && op->remap.next) 1265 err = vma_add_deps(op->remap.next, job); 1266 break; 1267 case DRM_GPUVA_OP_UNMAP: 1268 break; 1269 case DRM_GPUVA_OP_PREFETCH: 1270 err = vma_add_deps(gpuva_to_vma(op->base.prefetch.va), job); 1271 break; 1272 case DRM_GPUVA_OP_DRIVER: 1273 break; 1274 default: 1275 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 1276 } 1277 1278 return err; 1279 } 1280 1281 static int xe_pt_vm_dependencies(struct xe_sched_job *job, 1282 struct xe_tlb_inval_job *ijob, 1283 struct xe_tlb_inval_job *mjob, 1284 struct xe_vm *vm, 1285 struct xe_vma_ops *vops, 1286 struct xe_vm_pgtable_update_ops *pt_update_ops, 1287 struct xe_range_fence_tree *rftree) 1288 { 1289 struct xe_range_fence *rtfence; 1290 struct dma_fence *fence; 1291 struct xe_vma_op *op; 1292 int err = 0, i; 1293 1294 xe_vm_assert_held(vm); 1295 1296 if (!job && !no_in_syncs(vops->syncs, vops->num_syncs)) 1297 return -ETIME; 1298 1299 if (!job && !xe_exec_queue_is_idle(pt_update_ops->q)) 1300 return -ETIME; 1301 1302 if (pt_update_ops->wait_vm_bookkeep || pt_update_ops->wait_vm_kernel) { 1303 err = job_test_add_deps(job, xe_vm_resv(vm), 1304 pt_update_ops->wait_vm_bookkeep ? 1305 DMA_RESV_USAGE_BOOKKEEP : 1306 DMA_RESV_USAGE_KERNEL); 1307 if (err) 1308 return err; 1309 } 1310 1311 rtfence = xe_range_fence_tree_first(rftree, pt_update_ops->start, 1312 pt_update_ops->last); 1313 while (rtfence) { 1314 fence = rtfence->fence; 1315 1316 if (!dma_fence_is_signaled(fence)) { 1317 /* 1318 * Is this a CPU update? GPU is busy updating, so return 1319 * an error 1320 */ 1321 if (!job) 1322 return -ETIME; 1323 1324 dma_fence_get(fence); 1325 err = drm_sched_job_add_dependency(&job->drm, fence); 1326 if (err) 1327 return err; 1328 } 1329 1330 rtfence = xe_range_fence_tree_next(rtfence, 1331 pt_update_ops->start, 1332 pt_update_ops->last); 1333 } 1334 1335 list_for_each_entry(op, &vops->list, link) { 1336 err = op_add_deps(vm, op, job); 1337 if (err) 1338 return err; 1339 } 1340 1341 for (i = 0; job && !err && i < vops->num_syncs; i++) 1342 err = xe_sync_entry_add_deps(&vops->syncs[i], job); 1343 1344 if (job) { 1345 if (ijob) { 1346 err = xe_tlb_inval_job_alloc_dep(ijob); 1347 if (err) 1348 return err; 1349 } 1350 1351 if (mjob) { 1352 err = xe_tlb_inval_job_alloc_dep(mjob); 1353 if (err) 1354 return err; 1355 } 1356 } 1357 1358 return err; 1359 } 1360 1361 static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update) 1362 { 1363 struct xe_vma_ops *vops = pt_update->vops; 1364 struct xe_vm *vm = vops->vm; 1365 struct xe_range_fence_tree *rftree = &vm->rftree[pt_update->tile_id]; 1366 struct xe_vm_pgtable_update_ops *pt_update_ops = 1367 &vops->pt_update_ops[pt_update->tile_id]; 1368 1369 return xe_pt_vm_dependencies(pt_update->job, pt_update->ijob, 1370 pt_update->mjob, vm, pt_update->vops, 1371 pt_update_ops, rftree); 1372 } 1373 1374 #if IS_ENABLED(CONFIG_DRM_GPUSVM) 1375 #ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT 1376 1377 static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) 1378 { 1379 u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2; 1380 static u32 count; 1381 1382 if (count++ % divisor == divisor - 1) { 1383 uvma->userptr.divisor = divisor << 1; 1384 return true; 1385 } 1386 1387 return false; 1388 } 1389 1390 #else 1391 1392 static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) 1393 { 1394 return false; 1395 } 1396 1397 #endif 1398 1399 static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma, 1400 struct xe_vm_pgtable_update_ops *pt_update) 1401 { 1402 struct xe_userptr_vma *uvma; 1403 unsigned long notifier_seq; 1404 1405 xe_svm_assert_held_read(vm); 1406 1407 if (!xe_vma_is_userptr(vma)) 1408 return 0; 1409 1410 uvma = to_userptr_vma(vma); 1411 if (xe_pt_userptr_inject_eagain(uvma)) 1412 xe_vma_userptr_force_invalidate(uvma); 1413 1414 notifier_seq = uvma->userptr.pages.notifier_seq; 1415 1416 if (!mmu_interval_read_retry(&uvma->userptr.notifier, 1417 notifier_seq)) 1418 return 0; 1419 1420 if (xe_vm_in_fault_mode(vm)) 1421 return -EAGAIN; 1422 1423 /* 1424 * Just continue the operation since exec or rebind worker 1425 * will take care of rebinding. 1426 */ 1427 return 0; 1428 } 1429 1430 static int op_check_svm_userptr(struct xe_vm *vm, struct xe_vma_op *op, 1431 struct xe_vm_pgtable_update_ops *pt_update) 1432 { 1433 int err = 0; 1434 1435 xe_svm_assert_held_read(vm); 1436 1437 switch (op->base.op) { 1438 case DRM_GPUVA_OP_MAP: 1439 if (!op->map.immediate && xe_vm_in_fault_mode(vm)) 1440 break; 1441 1442 err = vma_check_userptr(vm, op->map.vma, pt_update); 1443 break; 1444 case DRM_GPUVA_OP_REMAP: 1445 if (op->remap.prev) 1446 err = vma_check_userptr(vm, op->remap.prev, pt_update); 1447 if (!err && op->remap.next) 1448 err = vma_check_userptr(vm, op->remap.next, pt_update); 1449 break; 1450 case DRM_GPUVA_OP_UNMAP: 1451 break; 1452 case DRM_GPUVA_OP_PREFETCH: 1453 if (xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))) { 1454 struct xe_svm_range *range = op->map_range.range; 1455 unsigned long i; 1456 1457 xe_assert(vm->xe, 1458 xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))); 1459 xa_for_each(&op->prefetch_range.range, i, range) { 1460 xe_svm_range_debug(range, "PRE-COMMIT"); 1461 1462 if (!xe_svm_range_pages_valid(range)) { 1463 xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); 1464 return -ENODATA; 1465 } 1466 } 1467 } else { 1468 err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va), pt_update); 1469 } 1470 break; 1471 #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) 1472 case DRM_GPUVA_OP_DRIVER: 1473 if (op->subop == XE_VMA_SUBOP_MAP_RANGE) { 1474 struct xe_svm_range *range = op->map_range.range; 1475 1476 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma)); 1477 1478 xe_svm_range_debug(range, "PRE-COMMIT"); 1479 1480 if (!xe_svm_range_pages_valid(range)) { 1481 xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); 1482 return -EAGAIN; 1483 } 1484 } 1485 break; 1486 #endif 1487 default: 1488 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 1489 } 1490 1491 return err; 1492 } 1493 1494 static int xe_pt_svm_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) 1495 { 1496 struct xe_vm *vm = pt_update->vops->vm; 1497 struct xe_vma_ops *vops = pt_update->vops; 1498 struct xe_vm_pgtable_update_ops *pt_update_ops = 1499 &vops->pt_update_ops[pt_update->tile_id]; 1500 struct xe_vma_op *op; 1501 int err; 1502 1503 err = xe_pt_pre_commit(pt_update); 1504 if (err) 1505 return err; 1506 1507 xe_svm_notifier_lock(vm); 1508 1509 list_for_each_entry(op, &vops->list, link) { 1510 err = op_check_svm_userptr(vm, op, pt_update_ops); 1511 if (err) { 1512 xe_svm_notifier_unlock(vm); 1513 break; 1514 } 1515 } 1516 1517 return err; 1518 } 1519 #endif 1520 1521 struct xe_pt_stage_unbind_walk { 1522 /** @base: The pagewalk base-class. */ 1523 struct xe_pt_walk base; 1524 1525 /* Input parameters for the walk */ 1526 /** @tile: The tile we're unbinding from. */ 1527 struct xe_tile *tile; 1528 1529 /** 1530 * @modified_start: Walk range start, modified to include any 1531 * shared pagetables that we're the only user of and can thus 1532 * treat as private. 1533 */ 1534 u64 modified_start; 1535 /** @modified_end: Walk range start, modified like @modified_start. */ 1536 u64 modified_end; 1537 1538 /** @prl: Backing pointer to page reclaim list in pt_update_ops */ 1539 struct xe_page_reclaim_list *prl; 1540 1541 /* Output */ 1542 /* @wupd: Structure to track the page-table updates we're building */ 1543 struct xe_walk_update wupd; 1544 }; 1545 1546 /* 1547 * Check whether this range is the only one populating this pagetable, 1548 * and in that case, update the walk range checks so that higher levels don't 1549 * view us as a shared pagetable. 1550 */ 1551 static bool xe_pt_check_kill(u64 addr, u64 next, unsigned int level, 1552 const struct xe_pt *child, 1553 enum page_walk_action *action, 1554 struct xe_pt_walk *walk) 1555 { 1556 struct xe_pt_stage_unbind_walk *xe_walk = 1557 container_of(walk, typeof(*xe_walk), base); 1558 unsigned int shift = walk->shifts[level]; 1559 u64 size = 1ull << shift; 1560 1561 if (IS_ALIGNED(addr, size) && IS_ALIGNED(next, size) && 1562 ((next - addr) >> shift) == child->num_live) { 1563 u64 size = 1ull << walk->shifts[level + 1]; 1564 1565 *action = ACTION_CONTINUE; 1566 1567 if (xe_walk->modified_start >= addr) 1568 xe_walk->modified_start = round_down(addr, size); 1569 if (xe_walk->modified_end <= next) 1570 xe_walk->modified_end = round_up(next, size); 1571 1572 return true; 1573 } 1574 1575 return false; 1576 } 1577 1578 /* page_size = 2^(reclamation_size + XE_PTE_SHIFT) */ 1579 #define COMPUTE_RECLAIM_ADDRESS_MASK(page_size) \ 1580 ({ \ 1581 BUILD_BUG_ON(!__builtin_constant_p(page_size)); \ 1582 ilog2(page_size) - XE_PTE_SHIFT; \ 1583 }) 1584 1585 static int generate_reclaim_entry(struct xe_tile *tile, 1586 struct xe_page_reclaim_list *prl, 1587 u64 pte, struct xe_pt *xe_child) 1588 { 1589 struct xe_gt *gt = tile->primary_gt; 1590 struct xe_guc_page_reclaim_entry *reclaim_entries = prl->entries; 1591 u64 phys_addr = pte & XE_PTE_ADDR_MASK; 1592 u64 phys_page = phys_addr >> XE_PTE_SHIFT; 1593 int num_entries = prl->num_entries; 1594 u32 reclamation_size; 1595 1596 xe_tile_assert(tile, xe_child->level <= MAX_HUGEPTE_LEVEL); 1597 xe_tile_assert(tile, reclaim_entries); 1598 xe_tile_assert(tile, num_entries < XE_PAGE_RECLAIM_MAX_ENTRIES - 1); 1599 1600 if (!xe_page_reclaim_list_valid(prl)) 1601 return -EINVAL; 1602 1603 /** 1604 * reclamation_size indicates the size of the page to be 1605 * invalidated and flushed from non-coherent cache. 1606 * Page size is computed as 2^(reclamation_size + XE_PTE_SHIFT) bytes. 1607 * Only 4K, 64K (level 0), and 2M pages are supported by hardware for page reclaim 1608 */ 1609 if (xe_child->level == 0 && !(pte & XE_PTE_PS64)) { 1610 xe_gt_stats_incr(gt, XE_GT_STATS_ID_PRL_4K_ENTRY_COUNT, 1); 1611 reclamation_size = COMPUTE_RECLAIM_ADDRESS_MASK(SZ_4K); /* reclamation_size = 0 */ 1612 xe_tile_assert(tile, phys_addr % SZ_4K == 0); 1613 } else if (xe_child->level == 0) { 1614 xe_gt_stats_incr(gt, XE_GT_STATS_ID_PRL_64K_ENTRY_COUNT, 1); 1615 reclamation_size = COMPUTE_RECLAIM_ADDRESS_MASK(SZ_64K); /* reclamation_size = 4 */ 1616 xe_tile_assert(tile, phys_addr % SZ_64K == 0); 1617 } else if (xe_child->level == 1 && pte & XE_PDE_PS_2M) { 1618 xe_gt_stats_incr(gt, XE_GT_STATS_ID_PRL_2M_ENTRY_COUNT, 1); 1619 reclamation_size = COMPUTE_RECLAIM_ADDRESS_MASK(SZ_2M); /* reclamation_size = 9 */ 1620 xe_tile_assert(tile, phys_addr % SZ_2M == 0); 1621 } else { 1622 xe_page_reclaim_list_abort(tile->primary_gt, prl, 1623 "unsupported PTE level=%u pte=%#llx", 1624 xe_child->level, pte); 1625 return -EINVAL; 1626 } 1627 1628 reclaim_entries[num_entries].qw = 1629 FIELD_PREP(XE_PAGE_RECLAIM_VALID, 1) | 1630 FIELD_PREP(XE_PAGE_RECLAIM_SIZE, reclamation_size) | 1631 FIELD_PREP(XE_PAGE_RECLAIM_ADDR_LO, phys_page) | 1632 FIELD_PREP(XE_PAGE_RECLAIM_ADDR_HI, phys_page >> 20); 1633 prl->num_entries++; 1634 vm_dbg(&tile_to_xe(tile)->drm, 1635 "PRL add entry: level=%u pte=%#llx reclamation_size=%u prl_idx=%d\n", 1636 xe_child->level, pte, reclamation_size, num_entries); 1637 1638 return 0; 1639 } 1640 1641 static int xe_pt_stage_unbind_entry(struct xe_ptw *parent, pgoff_t offset, 1642 unsigned int level, u64 addr, u64 next, 1643 struct xe_ptw **child, 1644 enum page_walk_action *action, 1645 struct xe_pt_walk *walk) 1646 { 1647 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base); 1648 struct xe_pt_stage_unbind_walk *xe_walk = 1649 container_of(walk, typeof(*xe_walk), base); 1650 struct xe_device *xe = tile_to_xe(xe_walk->tile); 1651 pgoff_t first = xe_pt_offset(addr, xe_child->level, walk); 1652 bool killed; 1653 1654 XE_WARN_ON(!*child); 1655 XE_WARN_ON(!level); 1656 /* Check for leaf node */ 1657 if (xe_walk->prl && xe_page_reclaim_list_valid(xe_walk->prl) && 1658 (!xe_child->base.children || !xe_child->base.children[first])) { 1659 struct iosys_map *leaf_map = &xe_child->bo->vmap; 1660 pgoff_t count = xe_pt_num_entries(addr, next, xe_child->level, walk); 1661 1662 for (pgoff_t i = 0; i < count; i++) { 1663 u64 pte = xe_map_rd(xe, leaf_map, (first + i) * sizeof(u64), u64); 1664 int ret; 1665 1666 /* 1667 * In rare scenarios, pte may not be written yet due to racy conditions. 1668 * In such cases, invalidate the PRL and fallback to full PPC invalidation. 1669 */ 1670 if (!pte) { 1671 xe_page_reclaim_list_abort(xe_walk->tile->primary_gt, xe_walk->prl, 1672 "found zero pte at addr=%#llx", addr); 1673 break; 1674 } 1675 1676 /* Ensure it is a defined page */ 1677 xe_tile_assert(xe_walk->tile, 1678 xe_child->level == 0 || 1679 (pte & (XE_PTE_PS64 | XE_PDE_PS_2M | XE_PDPE_PS_1G))); 1680 1681 /* An entry should be added for 64KB but contigious 4K have XE_PTE_PS64 */ 1682 if (pte & XE_PTE_PS64) 1683 i += 15; /* Skip other 15 consecutive 4K pages in the 64K page */ 1684 1685 /* Account for NULL terminated entry on end (-1) */ 1686 if (xe_walk->prl->num_entries < XE_PAGE_RECLAIM_MAX_ENTRIES - 1) { 1687 ret = generate_reclaim_entry(xe_walk->tile, xe_walk->prl, 1688 pte, xe_child); 1689 if (ret) 1690 break; 1691 } else { 1692 /* overflow, mark as invalid */ 1693 xe_page_reclaim_list_abort(xe_walk->tile->primary_gt, xe_walk->prl, 1694 "overflow while adding pte=%#llx", 1695 pte); 1696 break; 1697 } 1698 } 1699 } 1700 1701 killed = xe_pt_check_kill(addr, next, level - 1, xe_child, action, walk); 1702 1703 /* 1704 * Verify PRL is active and if entry is not a leaf pte (base.children conditions), 1705 * there is a potential need to invalidate the PRL if any PTE (num_live) are dropped. 1706 */ 1707 if (xe_walk->prl && level > 1 && xe_child->num_live && 1708 xe_child->base.children && xe_child->base.children[first]) { 1709 bool covered = xe_pt_covers(addr, next, xe_child->level, &xe_walk->base); 1710 1711 /* 1712 * If aborting page walk early (kill) or page walk completes the full range 1713 * we need to invalidate the PRL. 1714 */ 1715 if (killed || covered) 1716 xe_page_reclaim_list_abort(xe_walk->tile->primary_gt, xe_walk->prl, 1717 "kill at level=%u addr=%#llx next=%#llx num_live=%u", 1718 level, addr, next, xe_child->num_live); 1719 } 1720 1721 return 0; 1722 } 1723 1724 static int 1725 xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset, 1726 unsigned int level, u64 addr, u64 next, 1727 struct xe_ptw **child, 1728 enum page_walk_action *action, 1729 struct xe_pt_walk *walk) 1730 { 1731 struct xe_pt_stage_unbind_walk *xe_walk = 1732 container_of(walk, typeof(*xe_walk), base); 1733 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base); 1734 pgoff_t end_offset; 1735 u64 size = 1ull << walk->shifts[--level]; 1736 int err; 1737 1738 if (!IS_ALIGNED(addr, size)) 1739 addr = xe_walk->modified_start; 1740 if (!IS_ALIGNED(next, size)) 1741 next = xe_walk->modified_end; 1742 1743 /* Parent == *child is the root pt. Don't kill it. */ 1744 if (parent != *child && 1745 xe_pt_check_kill(addr, next, level, xe_child, action, walk)) 1746 return 0; 1747 1748 if (!xe_pt_nonshared_offsets(addr, next, level, walk, action, &offset, 1749 &end_offset)) 1750 return 0; 1751 1752 err = xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, true); 1753 if (err) 1754 return err; 1755 1756 xe_walk->wupd.updates[level].update->qwords = end_offset - offset; 1757 1758 return 0; 1759 } 1760 1761 static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = { 1762 .pt_entry = xe_pt_stage_unbind_entry, 1763 .pt_post_descend = xe_pt_stage_unbind_post_descend, 1764 }; 1765 1766 /** 1767 * xe_pt_stage_unbind() - Build page-table update structures for an unbind 1768 * operation 1769 * @tile: The tile we're unbinding for. 1770 * @vm: The vm 1771 * @vma: The vma we're unbinding. 1772 * @range: The range we're unbinding. 1773 * @entries: Caller-provided storage for the update structures. 1774 * 1775 * Builds page-table update structures for an unbind operation. The function 1776 * will attempt to remove all page-tables that we're the only user 1777 * of, and for that to work, the unbind operation must be committed in the 1778 * same critical section that blocks racing binds to the same page-table tree. 1779 * 1780 * Return: The number of entries used. 1781 */ 1782 static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, 1783 struct xe_vm *vm, 1784 struct xe_vma *vma, 1785 struct xe_svm_range *range, 1786 struct xe_vm_pgtable_update *entries) 1787 { 1788 u64 start = range ? xe_svm_range_start(range) : xe_vma_start(vma); 1789 u64 end = range ? xe_svm_range_end(range) : xe_vma_end(vma); 1790 struct xe_vm_pgtable_update_op *pt_update_op = 1791 container_of(entries, struct xe_vm_pgtable_update_op, entries[0]); 1792 struct xe_pt_stage_unbind_walk xe_walk = { 1793 .base = { 1794 .ops = &xe_pt_stage_unbind_ops, 1795 .shifts = xe_normal_pt_shifts, 1796 .max_level = XE_PT_HIGHEST_LEVEL, 1797 .staging = true, 1798 }, 1799 .tile = tile, 1800 .modified_start = start, 1801 .modified_end = end, 1802 .wupd.entries = entries, 1803 .prl = pt_update_op->prl, 1804 }; 1805 struct xe_pt *pt = vm->pt_root[tile->id]; 1806 1807 (void)xe_pt_walk_shared(&pt->base, pt->level, start, end, 1808 &xe_walk.base); 1809 1810 return xe_walk.wupd.num_used_entries; 1811 } 1812 1813 static void 1814 xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update, 1815 struct xe_tile *tile, struct iosys_map *map, 1816 void *ptr, u32 qword_ofs, u32 num_qwords, 1817 const struct xe_vm_pgtable_update *update) 1818 { 1819 struct xe_vm *vm = pt_update->vops->vm; 1820 u64 empty = __xe_pt_empty_pte(tile, vm, update->pt->level); 1821 int i; 1822 1823 if (map && map->is_iomem) 1824 for (i = 0; i < num_qwords; ++i) 1825 xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) * 1826 sizeof(u64), u64, empty); 1827 else if (map) 1828 memset64(map->vaddr + qword_ofs * sizeof(u64), empty, 1829 num_qwords); 1830 else 1831 memset64(ptr, empty, num_qwords); 1832 } 1833 1834 static void xe_pt_abort_unbind(struct xe_vma *vma, 1835 struct xe_vm_pgtable_update *entries, 1836 u32 num_entries) 1837 { 1838 int i, j; 1839 1840 xe_pt_commit_prepare_locks_assert(vma); 1841 1842 for (i = num_entries - 1; i >= 0; --i) { 1843 struct xe_vm_pgtable_update *entry = &entries[i]; 1844 struct xe_pt *pt = entry->pt; 1845 struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt); 1846 1847 pt->num_live += entry->qwords; 1848 1849 if (!pt->level) 1850 continue; 1851 1852 for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) 1853 pt_dir->staging[j] = 1854 entries[i].pt_entries[j - entry->ofs].pt ? 1855 &entries[i].pt_entries[j - entry->ofs].pt->base : NULL; 1856 } 1857 } 1858 1859 static void 1860 xe_pt_commit_prepare_unbind(struct xe_vma *vma, 1861 struct xe_vm_pgtable_update *entries, 1862 u32 num_entries) 1863 { 1864 int i, j; 1865 1866 xe_pt_commit_prepare_locks_assert(vma); 1867 1868 for (i = 0; i < num_entries; ++i) { 1869 struct xe_vm_pgtable_update *entry = &entries[i]; 1870 struct xe_pt *pt = entry->pt; 1871 struct xe_pt_dir *pt_dir; 1872 1873 pt->num_live -= entry->qwords; 1874 if (!pt->level) 1875 continue; 1876 1877 pt_dir = as_xe_pt_dir(pt); 1878 for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) { 1879 entry->pt_entries[j - entry->ofs].pt = 1880 xe_pt_entry_staging(pt_dir, j); 1881 pt_dir->staging[j] = NULL; 1882 } 1883 } 1884 } 1885 1886 static void 1887 xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops, 1888 u64 start, u64 end) 1889 { 1890 u64 last; 1891 u32 current_op = pt_update_ops->current_op; 1892 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; 1893 int i, level = 0; 1894 1895 for (i = 0; i < pt_op->num_entries; i++) { 1896 const struct xe_vm_pgtable_update *entry = &pt_op->entries[i]; 1897 1898 if (entry->pt->level > level) 1899 level = entry->pt->level; 1900 } 1901 1902 /* Greedy (non-optimal) calculation but simple */ 1903 start = ALIGN_DOWN(start, 0x1ull << xe_pt_shift(level)); 1904 last = ALIGN(end, 0x1ull << xe_pt_shift(level)) - 1; 1905 1906 if (start < pt_update_ops->start) 1907 pt_update_ops->start = start; 1908 if (last > pt_update_ops->last) 1909 pt_update_ops->last = last; 1910 } 1911 1912 static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma) 1913 { 1914 int shift = xe_device_get_root_tile(xe)->media_gt ? 1 : 0; 1915 1916 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) 1917 return dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1918 xe->info.tile_count << shift); 1919 1920 return 0; 1921 } 1922 1923 static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile, 1924 struct xe_vm_pgtable_update_ops *pt_update_ops, 1925 struct xe_vma *vma, bool invalidate_on_bind) 1926 { 1927 u32 current_op = pt_update_ops->current_op; 1928 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; 1929 int err; 1930 1931 xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma)); 1932 xe_bo_assert_held(xe_vma_bo(vma)); 1933 1934 vm_dbg(&xe_vma_vm(vma)->xe->drm, 1935 "Preparing bind, with range [%llx...%llx)\n", 1936 xe_vma_start(vma), xe_vma_end(vma) - 1); 1937 1938 pt_op->vma = NULL; 1939 pt_op->bind = true; 1940 pt_op->rebind = BIT(tile->id) & vma->tile_present; 1941 1942 err = vma_reserve_fences(tile_to_xe(tile), vma); 1943 if (err) 1944 return err; 1945 1946 err = xe_pt_prepare_bind(tile, vma, NULL, pt_op->entries, 1947 &pt_op->num_entries, invalidate_on_bind); 1948 if (!err) { 1949 xe_tile_assert(tile, pt_op->num_entries <= 1950 ARRAY_SIZE(pt_op->entries)); 1951 xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries, 1952 pt_op->num_entries, true); 1953 1954 xe_pt_update_ops_rfence_interval(pt_update_ops, 1955 xe_vma_start(vma), 1956 xe_vma_end(vma)); 1957 ++pt_update_ops->current_op; 1958 pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma); 1959 1960 /* 1961 * If rebind, we have to invalidate TLB on !LR vms to invalidate 1962 * cached PTEs point to freed memory. On LR vms this is done 1963 * automatically when the context is re-enabled by the rebind worker, 1964 * or in fault mode it was invalidated on PTE zapping. 1965 * 1966 * If !rebind, and scratch enabled VMs, there is a chance the scratch 1967 * PTE is already cached in the TLB so it needs to be invalidated. 1968 * On !LR VMs this is done in the ring ops preceding a batch, but on 1969 * LR, in particular on user-space batch buffer chaining, it needs to 1970 * be done here. 1971 */ 1972 if ((!pt_op->rebind && xe_vm_has_scratch(vm) && 1973 xe_vm_in_lr_mode(vm))) 1974 pt_update_ops->needs_invalidation = true; 1975 else if (pt_op->rebind && !xe_vm_in_lr_mode(vm)) 1976 /* We bump also if batch_invalidate_tlb is true */ 1977 vm->tlb_flush_seqno++; 1978 1979 vma->tile_staged |= BIT(tile->id); 1980 pt_op->vma = vma; 1981 xe_pt_commit_prepare_bind(vma, pt_op->entries, 1982 pt_op->num_entries, pt_op->rebind); 1983 } else { 1984 xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries); 1985 } 1986 1987 return err; 1988 } 1989 1990 static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile, 1991 struct xe_vm_pgtable_update_ops *pt_update_ops, 1992 struct xe_vma *vma, struct xe_svm_range *range) 1993 { 1994 u32 current_op = pt_update_ops->current_op; 1995 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; 1996 int err; 1997 1998 xe_tile_assert(tile, xe_vma_is_cpu_addr_mirror(vma)); 1999 2000 vm_dbg(&xe_vma_vm(vma)->xe->drm, 2001 "Preparing bind, with range [%lx...%lx)\n", 2002 xe_svm_range_start(range), xe_svm_range_end(range) - 1); 2003 2004 pt_op->vma = NULL; 2005 pt_op->bind = true; 2006 pt_op->rebind = BIT(tile->id) & range->tile_present; 2007 2008 err = xe_pt_prepare_bind(tile, vma, range, pt_op->entries, 2009 &pt_op->num_entries, false); 2010 if (!err) { 2011 xe_tile_assert(tile, pt_op->num_entries <= 2012 ARRAY_SIZE(pt_op->entries)); 2013 xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries, 2014 pt_op->num_entries, true); 2015 2016 xe_pt_update_ops_rfence_interval(pt_update_ops, 2017 xe_svm_range_start(range), 2018 xe_svm_range_end(range)); 2019 ++pt_update_ops->current_op; 2020 pt_update_ops->needs_svm_lock = true; 2021 2022 pt_op->vma = vma; 2023 xe_pt_commit_prepare_bind(vma, pt_op->entries, 2024 pt_op->num_entries, pt_op->rebind); 2025 } else { 2026 xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries); 2027 } 2028 2029 return err; 2030 } 2031 2032 static int unbind_op_prepare(struct xe_tile *tile, 2033 struct xe_vm_pgtable_update_ops *pt_update_ops, 2034 struct xe_vma *vma) 2035 { 2036 struct xe_device *xe = tile_to_xe(tile); 2037 u32 current_op = pt_update_ops->current_op; 2038 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; 2039 int err; 2040 2041 if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id))) 2042 return 0; 2043 2044 xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma)); 2045 xe_bo_assert_held(xe_vma_bo(vma)); 2046 2047 vm_dbg(&xe_vma_vm(vma)->xe->drm, 2048 "Preparing unbind, with range [%llx...%llx)\n", 2049 xe_vma_start(vma), xe_vma_end(vma) - 1); 2050 2051 pt_op->vma = vma; 2052 pt_op->bind = false; 2053 pt_op->rebind = false; 2054 /* 2055 * Maintain one PRL located in pt_update_ops that all others in unbind op reference. 2056 * Ensure that PRL is allocated only once, and if invalidated, remains an invalidated PRL. 2057 */ 2058 if (xe->info.has_page_reclaim_hw_assist && 2059 xe_page_reclaim_list_is_new(&pt_update_ops->prl)) 2060 xe_page_reclaim_list_alloc_entries(&pt_update_ops->prl); 2061 2062 /* Page reclaim may not be needed due to other features, so skip the corresponding VMA */ 2063 pt_op->prl = (xe_page_reclaim_list_valid(&pt_update_ops->prl) && 2064 !xe_page_reclaim_skip(tile, vma)) ? &pt_update_ops->prl : NULL; 2065 2066 err = vma_reserve_fences(tile_to_xe(tile), vma); 2067 if (err) 2068 return err; 2069 2070 pt_op->num_entries = xe_pt_stage_unbind(tile, xe_vma_vm(vma), 2071 vma, NULL, pt_op->entries); 2072 2073 xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries, 2074 pt_op->num_entries, false); 2075 xe_pt_update_ops_rfence_interval(pt_update_ops, xe_vma_start(vma), 2076 xe_vma_end(vma)); 2077 ++pt_update_ops->current_op; 2078 pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma); 2079 pt_update_ops->needs_invalidation = true; 2080 2081 xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries); 2082 2083 return 0; 2084 } 2085 2086 static bool 2087 xe_pt_op_check_range_skip_invalidation(struct xe_vm_pgtable_update_op *pt_op, 2088 struct xe_svm_range *range) 2089 { 2090 struct xe_vm_pgtable_update *update = pt_op->entries; 2091 2092 XE_WARN_ON(!pt_op->num_entries); 2093 2094 /* 2095 * We can't skip the invalidation if we are removing PTEs that span more 2096 * than the range, do some checks to ensure we are removing PTEs that 2097 * are invalid. 2098 */ 2099 2100 if (pt_op->num_entries > 1) 2101 return false; 2102 2103 if (update->pt->level == 0) 2104 return true; 2105 2106 if (update->pt->level == 1) 2107 return xe_svm_range_size(range) >= SZ_2M; 2108 2109 return false; 2110 } 2111 2112 static int unbind_range_prepare(struct xe_vm *vm, 2113 struct xe_tile *tile, 2114 struct xe_vm_pgtable_update_ops *pt_update_ops, 2115 struct xe_svm_range *range) 2116 { 2117 u32 current_op = pt_update_ops->current_op; 2118 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; 2119 2120 if (!(range->tile_present & BIT(tile->id))) 2121 return 0; 2122 2123 vm_dbg(&vm->xe->drm, 2124 "Preparing unbind, with range [%lx...%lx)\n", 2125 xe_svm_range_start(range), xe_svm_range_end(range) - 1); 2126 2127 pt_op->vma = XE_INVALID_VMA; 2128 pt_op->bind = false; 2129 pt_op->rebind = false; 2130 pt_op->prl = NULL; 2131 2132 pt_op->num_entries = xe_pt_stage_unbind(tile, vm, NULL, range, 2133 pt_op->entries); 2134 2135 xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries, 2136 pt_op->num_entries, false); 2137 xe_pt_update_ops_rfence_interval(pt_update_ops, xe_svm_range_start(range), 2138 xe_svm_range_end(range)); 2139 ++pt_update_ops->current_op; 2140 pt_update_ops->needs_svm_lock = true; 2141 pt_update_ops->needs_invalidation |= xe_vm_has_scratch(vm) || 2142 xe_vm_has_valid_gpu_mapping(tile, range->tile_present, 2143 range->tile_invalidated) || 2144 !xe_pt_op_check_range_skip_invalidation(pt_op, range); 2145 2146 xe_pt_commit_prepare_unbind(XE_INVALID_VMA, pt_op->entries, 2147 pt_op->num_entries); 2148 2149 return 0; 2150 } 2151 2152 static int op_prepare(struct xe_vm *vm, 2153 struct xe_tile *tile, 2154 struct xe_vm_pgtable_update_ops *pt_update_ops, 2155 struct xe_vma_op *op) 2156 { 2157 int err = 0; 2158 2159 xe_vm_assert_held(vm); 2160 2161 switch (op->base.op) { 2162 case DRM_GPUVA_OP_MAP: 2163 if ((!op->map.immediate && xe_vm_in_fault_mode(vm) && 2164 !op->map.invalidate_on_bind) || 2165 (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) 2166 break; 2167 2168 err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma, 2169 op->map.invalidate_on_bind); 2170 pt_update_ops->wait_vm_kernel = true; 2171 break; 2172 case DRM_GPUVA_OP_REMAP: 2173 { 2174 struct xe_vma *old = gpuva_to_vma(op->base.remap.unmap->va); 2175 2176 if (xe_vma_is_cpu_addr_mirror(old)) 2177 break; 2178 2179 err = unbind_op_prepare(tile, pt_update_ops, old); 2180 2181 if (!err && op->remap.prev) { 2182 err = bind_op_prepare(vm, tile, pt_update_ops, 2183 op->remap.prev, false); 2184 pt_update_ops->wait_vm_bookkeep = true; 2185 } 2186 if (!err && op->remap.next) { 2187 err = bind_op_prepare(vm, tile, pt_update_ops, 2188 op->remap.next, false); 2189 pt_update_ops->wait_vm_bookkeep = true; 2190 } 2191 break; 2192 } 2193 case DRM_GPUVA_OP_UNMAP: 2194 { 2195 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va); 2196 2197 if (xe_vma_is_cpu_addr_mirror(vma)) 2198 break; 2199 2200 err = unbind_op_prepare(tile, pt_update_ops, vma); 2201 break; 2202 } 2203 case DRM_GPUVA_OP_PREFETCH: 2204 { 2205 struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va); 2206 2207 if (xe_vma_is_cpu_addr_mirror(vma)) { 2208 struct xe_svm_range *range; 2209 unsigned long i; 2210 2211 xa_for_each(&op->prefetch_range.range, i, range) { 2212 err = bind_range_prepare(vm, tile, pt_update_ops, 2213 vma, range); 2214 if (err) 2215 return err; 2216 } 2217 } else { 2218 err = bind_op_prepare(vm, tile, pt_update_ops, vma, false); 2219 pt_update_ops->wait_vm_kernel = true; 2220 } 2221 break; 2222 } 2223 case DRM_GPUVA_OP_DRIVER: 2224 if (op->subop == XE_VMA_SUBOP_MAP_RANGE) { 2225 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma)); 2226 2227 err = bind_range_prepare(vm, tile, pt_update_ops, 2228 op->map_range.vma, 2229 op->map_range.range); 2230 } else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) { 2231 err = unbind_range_prepare(vm, tile, pt_update_ops, 2232 op->unmap_range.range); 2233 } 2234 break; 2235 default: 2236 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 2237 } 2238 2239 return err; 2240 } 2241 2242 static void 2243 xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops *pt_update_ops) 2244 { 2245 init_llist_head(&pt_update_ops->deferred); 2246 pt_update_ops->start = ~0x0ull; 2247 pt_update_ops->last = 0x0ull; 2248 xe_page_reclaim_list_init(&pt_update_ops->prl); 2249 } 2250 2251 /** 2252 * xe_pt_update_ops_prepare() - Prepare PT update operations 2253 * @tile: Tile of PT update operations 2254 * @vops: VMA operationa 2255 * 2256 * Prepare PT update operations which includes updating internal PT state, 2257 * allocate memory for page tables, populate page table being pruned in, and 2258 * create PT update operations for leaf insertion / removal. 2259 * 2260 * Return: 0 on success, negative error code on error. 2261 */ 2262 int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops) 2263 { 2264 struct xe_vm_pgtable_update_ops *pt_update_ops = 2265 &vops->pt_update_ops[tile->id]; 2266 struct xe_vma_op *op; 2267 int shift = tile->media_gt ? 1 : 0; 2268 int err; 2269 2270 lockdep_assert_held(&vops->vm->lock); 2271 xe_vm_assert_held(vops->vm); 2272 2273 xe_pt_update_ops_init(pt_update_ops); 2274 2275 err = dma_resv_reserve_fences(xe_vm_resv(vops->vm), 2276 tile_to_xe(tile)->info.tile_count << shift); 2277 if (err) 2278 return err; 2279 2280 list_for_each_entry(op, &vops->list, link) { 2281 err = op_prepare(vops->vm, tile, pt_update_ops, op); 2282 2283 if (err) 2284 return err; 2285 } 2286 2287 xe_tile_assert(tile, pt_update_ops->current_op <= 2288 pt_update_ops->num_ops); 2289 2290 #ifdef TEST_VM_OPS_ERROR 2291 if (vops->inject_error && 2292 vops->vm->xe->vm_inject_error_position == FORCE_OP_ERROR_PREPARE) 2293 return -ENOSPC; 2294 #endif 2295 2296 return 0; 2297 } 2298 ALLOW_ERROR_INJECTION(xe_pt_update_ops_prepare, ERRNO); 2299 2300 static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile, 2301 struct xe_vm_pgtable_update_ops *pt_update_ops, 2302 struct xe_vma *vma, struct dma_fence *fence, 2303 struct dma_fence *fence2, bool invalidate_on_bind) 2304 { 2305 xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma)); 2306 2307 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) { 2308 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, 2309 pt_update_ops->wait_vm_bookkeep ? 2310 DMA_RESV_USAGE_KERNEL : 2311 DMA_RESV_USAGE_BOOKKEEP); 2312 if (fence2) 2313 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2, 2314 pt_update_ops->wait_vm_bookkeep ? 2315 DMA_RESV_USAGE_KERNEL : 2316 DMA_RESV_USAGE_BOOKKEEP); 2317 } 2318 /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */ 2319 WRITE_ONCE(vma->tile_present, vma->tile_present | BIT(tile->id)); 2320 if (invalidate_on_bind) 2321 WRITE_ONCE(vma->tile_invalidated, 2322 vma->tile_invalidated | BIT(tile->id)); 2323 else 2324 WRITE_ONCE(vma->tile_invalidated, 2325 vma->tile_invalidated & ~BIT(tile->id)); 2326 vma->tile_staged &= ~BIT(tile->id); 2327 if (xe_vma_is_userptr(vma)) { 2328 xe_svm_assert_held_read(vm); 2329 to_userptr_vma(vma)->userptr.initial_bind = true; 2330 } 2331 2332 /* 2333 * Kick rebind worker if this bind triggers preempt fences and not in 2334 * the rebind worker 2335 */ 2336 if (pt_update_ops->wait_vm_bookkeep && 2337 xe_vm_in_preempt_fence_mode(vm) && 2338 !current->mm) 2339 xe_vm_queue_rebind_worker(vm); 2340 } 2341 2342 static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile, 2343 struct xe_vm_pgtable_update_ops *pt_update_ops, 2344 struct xe_vma *vma, struct dma_fence *fence, 2345 struct dma_fence *fence2) 2346 { 2347 xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma)); 2348 2349 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) { 2350 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, 2351 pt_update_ops->wait_vm_bookkeep ? 2352 DMA_RESV_USAGE_KERNEL : 2353 DMA_RESV_USAGE_BOOKKEEP); 2354 if (fence2) 2355 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2, 2356 pt_update_ops->wait_vm_bookkeep ? 2357 DMA_RESV_USAGE_KERNEL : 2358 DMA_RESV_USAGE_BOOKKEEP); 2359 } 2360 vma->tile_present &= ~BIT(tile->id); 2361 if (!vma->tile_present) { 2362 list_del_init(&vma->combined_links.rebind); 2363 if (xe_vma_is_userptr(vma)) { 2364 xe_svm_assert_held_read(vm); 2365 2366 spin_lock(&vm->userptr.invalidated_lock); 2367 list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link); 2368 spin_unlock(&vm->userptr.invalidated_lock); 2369 } 2370 } 2371 } 2372 2373 static void range_present_and_invalidated_tile(struct xe_vm *vm, 2374 struct xe_svm_range *range, 2375 u8 tile_id) 2376 { 2377 /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */ 2378 2379 lockdep_assert_held(&vm->svm.gpusvm.notifier_lock); 2380 2381 WRITE_ONCE(range->tile_present, range->tile_present | BIT(tile_id)); 2382 WRITE_ONCE(range->tile_invalidated, range->tile_invalidated & ~BIT(tile_id)); 2383 } 2384 2385 static void op_commit(struct xe_vm *vm, 2386 struct xe_tile *tile, 2387 struct xe_vm_pgtable_update_ops *pt_update_ops, 2388 struct xe_vma_op *op, struct dma_fence *fence, 2389 struct dma_fence *fence2) 2390 { 2391 xe_vm_assert_held(vm); 2392 2393 switch (op->base.op) { 2394 case DRM_GPUVA_OP_MAP: 2395 if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) || 2396 (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) 2397 break; 2398 2399 bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence, 2400 fence2, op->map.invalidate_on_bind); 2401 break; 2402 case DRM_GPUVA_OP_REMAP: 2403 { 2404 struct xe_vma *old = gpuva_to_vma(op->base.remap.unmap->va); 2405 2406 if (xe_vma_is_cpu_addr_mirror(old)) 2407 break; 2408 2409 unbind_op_commit(vm, tile, pt_update_ops, old, fence, fence2); 2410 2411 if (op->remap.prev) 2412 bind_op_commit(vm, tile, pt_update_ops, op->remap.prev, 2413 fence, fence2, false); 2414 if (op->remap.next) 2415 bind_op_commit(vm, tile, pt_update_ops, op->remap.next, 2416 fence, fence2, false); 2417 break; 2418 } 2419 case DRM_GPUVA_OP_UNMAP: 2420 { 2421 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va); 2422 2423 if (!xe_vma_is_cpu_addr_mirror(vma)) 2424 unbind_op_commit(vm, tile, pt_update_ops, vma, fence, 2425 fence2); 2426 break; 2427 } 2428 case DRM_GPUVA_OP_PREFETCH: 2429 { 2430 struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va); 2431 2432 if (xe_vma_is_cpu_addr_mirror(vma)) { 2433 struct xe_svm_range *range = NULL; 2434 unsigned long i; 2435 2436 xa_for_each(&op->prefetch_range.range, i, range) 2437 range_present_and_invalidated_tile(vm, range, tile->id); 2438 } else { 2439 bind_op_commit(vm, tile, pt_update_ops, vma, fence, 2440 fence2, false); 2441 } 2442 break; 2443 } 2444 case DRM_GPUVA_OP_DRIVER: 2445 { 2446 /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */ 2447 if (op->subop == XE_VMA_SUBOP_MAP_RANGE) 2448 range_present_and_invalidated_tile(vm, op->map_range.range, tile->id); 2449 else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) 2450 WRITE_ONCE(op->unmap_range.range->tile_present, 2451 op->unmap_range.range->tile_present & 2452 ~BIT(tile->id)); 2453 2454 break; 2455 } 2456 default: 2457 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 2458 } 2459 } 2460 2461 static const struct xe_migrate_pt_update_ops migrate_ops = { 2462 .populate = xe_vm_populate_pgtable, 2463 .clear = xe_migrate_clear_pgtable_callback, 2464 .pre_commit = xe_pt_pre_commit, 2465 }; 2466 2467 #if IS_ENABLED(CONFIG_DRM_GPUSVM) 2468 static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops = { 2469 .populate = xe_vm_populate_pgtable, 2470 .clear = xe_migrate_clear_pgtable_callback, 2471 .pre_commit = xe_pt_svm_userptr_pre_commit, 2472 }; 2473 #else 2474 static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops; 2475 #endif 2476 2477 static struct xe_dep_scheduler *to_dep_scheduler(struct xe_exec_queue *q, 2478 struct xe_gt *gt) 2479 { 2480 if (xe_gt_is_media_type(gt)) 2481 return q->tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT].dep_scheduler; 2482 2483 return q->tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT].dep_scheduler; 2484 } 2485 2486 /** 2487 * xe_pt_update_ops_run() - Run PT update operations 2488 * @tile: Tile of PT update operations 2489 * @vops: VMA operationa 2490 * 2491 * Run PT update operations which includes committing internal PT state changes, 2492 * creating job for PT update operations for leaf insertion / removal, and 2493 * installing job fence in various places. 2494 * 2495 * Return: fence on success, negative ERR_PTR on error. 2496 */ 2497 struct dma_fence * 2498 xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) 2499 { 2500 struct xe_vm *vm = vops->vm; 2501 struct xe_vm_pgtable_update_ops *pt_update_ops = 2502 &vops->pt_update_ops[tile->id]; 2503 struct xe_exec_queue *q = pt_update_ops->q; 2504 struct dma_fence *fence, *ifence = NULL, *mfence = NULL; 2505 struct xe_tlb_inval_job *ijob = NULL, *mjob = NULL; 2506 struct xe_range_fence *rfence; 2507 struct xe_vma_op *op; 2508 int err = 0, i; 2509 struct xe_migrate_pt_update update = { 2510 .ops = pt_update_ops->needs_svm_lock ? 2511 &svm_userptr_migrate_ops : 2512 &migrate_ops, 2513 .vops = vops, 2514 .tile_id = tile->id, 2515 }; 2516 2517 lockdep_assert_held(&vm->lock); 2518 xe_vm_assert_held(vm); 2519 2520 if (!pt_update_ops->current_op) { 2521 xe_tile_assert(tile, xe_vm_in_fault_mode(vm)); 2522 2523 return dma_fence_get_stub(); 2524 } 2525 2526 #ifdef TEST_VM_OPS_ERROR 2527 if (vops->inject_error && 2528 vm->xe->vm_inject_error_position == FORCE_OP_ERROR_RUN) 2529 return ERR_PTR(-ENOSPC); 2530 #endif 2531 2532 if (pt_update_ops->needs_invalidation) { 2533 struct xe_dep_scheduler *dep_scheduler = 2534 to_dep_scheduler(q, tile->primary_gt); 2535 2536 ijob = xe_tlb_inval_job_create(q, &tile->primary_gt->tlb_inval, 2537 dep_scheduler, vm, 2538 pt_update_ops->start, 2539 pt_update_ops->last, 2540 XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT); 2541 if (IS_ERR(ijob)) { 2542 err = PTR_ERR(ijob); 2543 goto kill_vm_tile1; 2544 } 2545 update.ijob = ijob; 2546 /* 2547 * Only add page reclaim for the primary GT. Media GT does not have 2548 * any PPC to flush, so enabling the PPC flush bit for media is 2549 * effectively a NOP and provides no performance benefit nor 2550 * interfere with primary GT. 2551 */ 2552 if (xe_page_reclaim_list_valid(&pt_update_ops->prl)) { 2553 xe_tlb_inval_job_add_page_reclaim(ijob, &pt_update_ops->prl); 2554 /* Release ref from alloc, job will now handle it */ 2555 xe_page_reclaim_list_invalidate(&pt_update_ops->prl); 2556 } 2557 2558 if (tile->media_gt) { 2559 dep_scheduler = to_dep_scheduler(q, tile->media_gt); 2560 2561 mjob = xe_tlb_inval_job_create(q, 2562 &tile->media_gt->tlb_inval, 2563 dep_scheduler, vm, 2564 pt_update_ops->start, 2565 pt_update_ops->last, 2566 XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT); 2567 if (IS_ERR(mjob)) { 2568 err = PTR_ERR(mjob); 2569 goto free_ijob; 2570 } 2571 update.mjob = mjob; 2572 } 2573 } 2574 2575 rfence = kzalloc_obj(*rfence); 2576 if (!rfence) { 2577 err = -ENOMEM; 2578 goto free_ijob; 2579 } 2580 2581 fence = xe_migrate_update_pgtables(tile->migrate, &update); 2582 if (IS_ERR(fence)) { 2583 err = PTR_ERR(fence); 2584 goto free_rfence; 2585 } 2586 2587 /* Point of no return - VM killed if failure after this */ 2588 for (i = 0; i < pt_update_ops->current_op; ++i) { 2589 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i]; 2590 2591 xe_pt_commit(pt_op->vma, pt_op->entries, 2592 pt_op->num_entries, &pt_update_ops->deferred); 2593 pt_op->vma = NULL; /* skip in xe_pt_update_ops_abort */ 2594 } 2595 2596 if (xe_range_fence_insert(&vm->rftree[tile->id], rfence, 2597 &xe_range_fence_kfree_ops, 2598 pt_update_ops->start, 2599 pt_update_ops->last, fence)) 2600 dma_fence_wait(fence, false); 2601 2602 if (ijob) 2603 ifence = xe_tlb_inval_job_push(ijob, tile->migrate, fence); 2604 if (mjob) 2605 mfence = xe_tlb_inval_job_push(mjob, tile->migrate, fence); 2606 2607 if (!mjob && !ijob) { 2608 dma_resv_add_fence(xe_vm_resv(vm), fence, 2609 pt_update_ops->wait_vm_bookkeep ? 2610 DMA_RESV_USAGE_KERNEL : 2611 DMA_RESV_USAGE_BOOKKEEP); 2612 2613 list_for_each_entry(op, &vops->list, link) 2614 op_commit(vops->vm, tile, pt_update_ops, op, fence, NULL); 2615 } else if (ijob && !mjob) { 2616 dma_resv_add_fence(xe_vm_resv(vm), ifence, 2617 pt_update_ops->wait_vm_bookkeep ? 2618 DMA_RESV_USAGE_KERNEL : 2619 DMA_RESV_USAGE_BOOKKEEP); 2620 2621 list_for_each_entry(op, &vops->list, link) 2622 op_commit(vops->vm, tile, pt_update_ops, op, ifence, NULL); 2623 } else { 2624 dma_resv_add_fence(xe_vm_resv(vm), ifence, 2625 pt_update_ops->wait_vm_bookkeep ? 2626 DMA_RESV_USAGE_KERNEL : 2627 DMA_RESV_USAGE_BOOKKEEP); 2628 2629 dma_resv_add_fence(xe_vm_resv(vm), mfence, 2630 pt_update_ops->wait_vm_bookkeep ? 2631 DMA_RESV_USAGE_KERNEL : 2632 DMA_RESV_USAGE_BOOKKEEP); 2633 2634 list_for_each_entry(op, &vops->list, link) 2635 op_commit(vops->vm, tile, pt_update_ops, op, ifence, 2636 mfence); 2637 } 2638 2639 if (pt_update_ops->needs_svm_lock) 2640 xe_svm_notifier_unlock(vm); 2641 2642 /* 2643 * The last fence is only used for zero bind queue idling; migrate 2644 * queues are not exposed to user space. 2645 */ 2646 if (!(q->flags & EXEC_QUEUE_FLAG_MIGRATE)) 2647 xe_exec_queue_last_fence_set(q, vm, fence); 2648 2649 xe_tlb_inval_job_put(mjob); 2650 xe_tlb_inval_job_put(ijob); 2651 dma_fence_put(ifence); 2652 dma_fence_put(mfence); 2653 2654 return fence; 2655 2656 free_rfence: 2657 kfree(rfence); 2658 free_ijob: 2659 xe_tlb_inval_job_put(mjob); 2660 xe_tlb_inval_job_put(ijob); 2661 kill_vm_tile1: 2662 if (err != -EAGAIN && err != -ENODATA && tile->id) 2663 xe_vm_kill(vops->vm, false); 2664 2665 return ERR_PTR(err); 2666 } 2667 ALLOW_ERROR_INJECTION(xe_pt_update_ops_run, ERRNO); 2668 2669 /** 2670 * xe_pt_update_ops_fini() - Finish PT update operations 2671 * @tile: Tile of PT update operations 2672 * @vops: VMA operations 2673 * 2674 * Finish PT update operations by committing to destroy page table memory 2675 */ 2676 void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops) 2677 { 2678 struct xe_vm_pgtable_update_ops *pt_update_ops = 2679 &vops->pt_update_ops[tile->id]; 2680 int i; 2681 2682 xe_page_reclaim_entries_put(pt_update_ops->prl.entries); 2683 2684 lockdep_assert_held(&vops->vm->lock); 2685 xe_vm_assert_held(vops->vm); 2686 2687 for (i = 0; i < pt_update_ops->current_op; ++i) { 2688 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i]; 2689 2690 xe_pt_free_bind(pt_op->entries, pt_op->num_entries); 2691 } 2692 xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred); 2693 } 2694 2695 /** 2696 * xe_pt_update_ops_abort() - Abort PT update operations 2697 * @tile: Tile of PT update operations 2698 * @vops: VMA operationa 2699 * 2700 * Abort PT update operations by unwinding internal PT state 2701 */ 2702 void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops) 2703 { 2704 struct xe_vm_pgtable_update_ops *pt_update_ops = 2705 &vops->pt_update_ops[tile->id]; 2706 int i; 2707 2708 lockdep_assert_held(&vops->vm->lock); 2709 xe_vm_assert_held(vops->vm); 2710 2711 for (i = pt_update_ops->num_ops - 1; i >= 0; --i) { 2712 struct xe_vm_pgtable_update_op *pt_op = 2713 &pt_update_ops->ops[i]; 2714 2715 if (!pt_op->vma || i >= pt_update_ops->current_op) 2716 continue; 2717 2718 if (pt_op->bind) 2719 xe_pt_abort_bind(pt_op->vma, pt_op->entries, 2720 pt_op->num_entries, 2721 pt_op->rebind); 2722 else 2723 xe_pt_abort_unbind(pt_op->vma, pt_op->entries, 2724 pt_op->num_entries); 2725 } 2726 2727 xe_pt_update_ops_fini(tile, vops); 2728 } 2729