1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_pt.h" 7 8 #include "regs/xe_gtt_defs.h" 9 #include "xe_bo.h" 10 #include "xe_device.h" 11 #include "xe_drm_client.h" 12 #include "xe_exec_queue.h" 13 #include "xe_gt.h" 14 #include "xe_gt_stats.h" 15 #include "xe_migrate.h" 16 #include "xe_page_reclaim.h" 17 #include "xe_pt_types.h" 18 #include "xe_pt_walk.h" 19 #include "xe_res_cursor.h" 20 #include "xe_sched_job.h" 21 #include "xe_svm.h" 22 #include "xe_sync.h" 23 #include "xe_tlb_inval_job.h" 24 #include "xe_trace.h" 25 #include "xe_ttm_stolen_mgr.h" 26 #include "xe_userptr.h" 27 #include "xe_vm.h" 28 29 struct xe_pt_dir { 30 struct xe_pt pt; 31 /** @children: Array of page-table child nodes */ 32 struct xe_ptw *children[XE_PDES]; 33 /** @staging: Array of page-table staging nodes */ 34 struct xe_ptw *staging[XE_PDES]; 35 }; 36 37 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM) 38 #define xe_pt_set_addr(__xe_pt, __addr) ((__xe_pt)->addr = (__addr)) 39 #define xe_pt_addr(__xe_pt) ((__xe_pt)->addr) 40 #else 41 #define xe_pt_set_addr(__xe_pt, __addr) 42 #define xe_pt_addr(__xe_pt) 0ull 43 #endif 44 45 static const u64 xe_normal_pt_shifts[] = {12, 21, 30, 39, 48}; 46 static const u64 xe_compact_pt_shifts[] = {16, 21, 30, 39, 48}; 47 48 #define XE_PT_HIGHEST_LEVEL (ARRAY_SIZE(xe_normal_pt_shifts) - 1) 49 50 static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt) 51 { 52 return container_of(pt, struct xe_pt_dir, pt); 53 } 54 55 static struct xe_pt * 56 xe_pt_entry_staging(struct xe_pt_dir *pt_dir, unsigned int index) 57 { 58 return container_of(pt_dir->staging[index], struct xe_pt, base); 59 } 60 61 static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm, 62 unsigned int level) 63 { 64 struct xe_device *xe = tile_to_xe(tile); 65 u16 pat_index = xe->pat.idx[XE_CACHE_WB]; 66 u8 id = tile->id; 67 68 if (!xe_vm_has_scratch(vm)) 69 return 0; 70 71 if (level > MAX_HUGEPTE_LEVEL) 72 return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo, 73 0); 74 75 return vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0) | 76 XE_PTE_NULL; 77 } 78 79 static void xe_pt_free(struct xe_pt *pt) 80 { 81 if (pt->level) 82 kfree(as_xe_pt_dir(pt)); 83 else 84 kfree(pt); 85 } 86 87 /** 88 * xe_pt_create() - Create a page-table. 89 * @vm: The vm to create for. 90 * @tile: The tile to create for. 91 * @level: The page-table level. 92 * @exec: The drm_exec object used to lock the vm. 93 * 94 * Allocate and initialize a single struct xe_pt metadata structure. Also 95 * create the corresponding page-table bo, but don't initialize it. If the 96 * level is grater than zero, then it's assumed to be a directory page- 97 * table and the directory structure is also allocated and initialized to 98 * NULL pointers. 99 * 100 * Return: A valid struct xe_pt pointer on success, Pointer error code on 101 * error. 102 */ 103 struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, 104 unsigned int level, struct drm_exec *exec) 105 { 106 struct xe_pt *pt; 107 struct xe_bo *bo; 108 u32 bo_flags; 109 int err; 110 111 if (level) { 112 struct xe_pt_dir *dir = kzalloc_obj(*dir, GFP_KERNEL); 113 114 pt = (dir) ? &dir->pt : NULL; 115 } else { 116 pt = kzalloc_obj(*pt, GFP_KERNEL); 117 } 118 if (!pt) 119 return ERR_PTR(-ENOMEM); 120 121 bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | 122 XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE | 123 XE_BO_FLAG_NO_RESV_EVICT | XE_BO_FLAG_PAGETABLE; 124 if (vm->xef) /* userspace */ 125 bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE | XE_BO_FLAG_FORCE_USER_VRAM; 126 127 pt->level = level; 128 129 drm_WARN_ON(&vm->xe->drm, IS_ERR_OR_NULL(exec)); 130 bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K, 131 ttm_bo_type_kernel, 132 bo_flags, exec); 133 if (IS_ERR(bo)) { 134 err = PTR_ERR(bo); 135 goto err_kfree; 136 } 137 pt->bo = bo; 138 pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL; 139 pt->base.staging = level ? as_xe_pt_dir(pt)->staging : NULL; 140 141 if (vm->xef) 142 xe_drm_client_add_bo(vm->xef->client, pt->bo); 143 xe_tile_assert(tile, level <= XE_VM_MAX_LEVEL); 144 145 return pt; 146 147 err_kfree: 148 xe_pt_free(pt); 149 return ERR_PTR(err); 150 } 151 ALLOW_ERROR_INJECTION(xe_pt_create, ERRNO); 152 153 /** 154 * xe_pt_populate_empty() - Populate a page-table bo with scratch- or zero 155 * entries. 156 * @tile: The tile the scratch pagetable of which to use. 157 * @vm: The vm we populate for. 158 * @pt: The pagetable the bo of which to initialize. 159 * 160 * Populate the page-table bo of @pt with entries pointing into the tile's 161 * scratch page-table tree if any. Otherwise populate with zeros. 162 */ 163 void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm, 164 struct xe_pt *pt) 165 { 166 struct iosys_map *map = &pt->bo->vmap; 167 u64 empty; 168 int i; 169 170 if (!xe_vm_has_scratch(vm)) { 171 /* 172 * FIXME: Some memory is allocated already allocated to zero? 173 * Find out which memory that is and avoid this memset... 174 */ 175 xe_map_memset(vm->xe, map, 0, 0, SZ_4K); 176 } else { 177 empty = __xe_pt_empty_pte(tile, vm, pt->level); 178 for (i = 0; i < XE_PDES; i++) 179 xe_pt_write(vm->xe, map, i, empty); 180 } 181 } 182 183 /** 184 * xe_pt_shift() - Return the ilog2 value of the size of the address range of 185 * a page-table at a certain level. 186 * @level: The level. 187 * 188 * Return: The ilog2 value of the size of the address range of a page-table 189 * at level @level. 190 */ 191 unsigned int xe_pt_shift(unsigned int level) 192 { 193 return XE_PTE_SHIFT + XE_PDE_SHIFT * level; 194 } 195 196 /** 197 * xe_pt_destroy() - Destroy a page-table tree. 198 * @pt: The root of the page-table tree to destroy. 199 * @flags: vm flags. Currently unused. 200 * @deferred: List head of lockless list for deferred putting. NULL for 201 * immediate putting. 202 * 203 * Puts the page-table bo, recursively calls xe_pt_destroy on all children 204 * and finally frees @pt. TODO: Can we remove the @flags argument? 205 */ 206 void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred) 207 { 208 int i; 209 210 if (!pt) 211 return; 212 213 XE_WARN_ON(!list_empty(&pt->bo->ttm.base.gpuva.list)); 214 xe_bo_unpin(pt->bo); 215 xe_bo_put_deferred(pt->bo, deferred); 216 217 if (pt->level > 0 && pt->num_live) { 218 struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt); 219 220 for (i = 0; i < XE_PDES; i++) { 221 if (xe_pt_entry_staging(pt_dir, i)) 222 xe_pt_destroy(xe_pt_entry_staging(pt_dir, i), flags, 223 deferred); 224 } 225 } 226 xe_pt_free(pt); 227 } 228 229 /** 230 * xe_pt_clear() - Clear a page-table. 231 * @xe: xe device. 232 * @pt: The page-table. 233 * 234 * Clears page-table by setting to zero. 235 */ 236 void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt) 237 { 238 struct iosys_map *map = &pt->bo->vmap; 239 240 xe_map_memset(xe, map, 0, 0, SZ_4K); 241 } 242 243 /** 244 * DOC: Pagetable building 245 * 246 * Below we use the term "page-table" for both page-directories, containing 247 * pointers to lower level page-directories or page-tables, and level 0 248 * page-tables that contain only page-table-entries pointing to memory pages. 249 * 250 * When inserting an address range in an already existing page-table tree 251 * there will typically be a set of page-tables that are shared with other 252 * address ranges, and a set that are private to this address range. 253 * The set of shared page-tables can be at most two per level, 254 * and those can't be updated immediately because the entries of those 255 * page-tables may still be in use by the gpu for other mappings. Therefore 256 * when inserting entries into those, we instead stage those insertions by 257 * adding insertion data into struct xe_vm_pgtable_update structures. This 258 * data, (subtrees for the cpu and page-table-entries for the gpu) is then 259 * added in a separate commit step. CPU-data is committed while still under the 260 * vm lock, the object lock and for userptr, the notifier lock in read mode. 261 * The GPU async data is committed either by the GPU or CPU after fulfilling 262 * relevant dependencies. 263 * For non-shared page-tables (and, in fact, for shared ones that aren't 264 * existing at the time of staging), we add the data in-place without the 265 * special update structures. This private part of the page-table tree will 266 * remain disconnected from the vm page-table tree until data is committed to 267 * the shared page tables of the vm tree in the commit phase. 268 */ 269 270 struct xe_pt_update { 271 /** @update: The update structure we're building for this parent. */ 272 struct xe_vm_pgtable_update *update; 273 /** @parent: The parent. Used to detect a parent change. */ 274 struct xe_pt *parent; 275 /** @preexisting: Whether the parent was pre-existing or allocated */ 276 bool preexisting; 277 }; 278 279 /** 280 * struct xe_pt_stage_bind_walk - Walk state for the stage_bind walk. 281 */ 282 struct xe_pt_stage_bind_walk { 283 /** @base: The base class. */ 284 struct xe_pt_walk base; 285 286 /* Input parameters for the walk */ 287 /** @vm: The vm we're building for. */ 288 struct xe_vm *vm; 289 /** @tile: The tile we're building for. */ 290 struct xe_tile *tile; 291 /** @default_vram_pte: PTE flag only template for VRAM. No address is associated */ 292 u64 default_vram_pte; 293 /** @default_system_pte: PTE flag only template for System. No address is associated */ 294 u64 default_system_pte; 295 /** @dma_offset: DMA offset to add to the PTE. */ 296 u64 dma_offset; 297 /** 298 * @needs_64K: This address range enforces 64K alignment and 299 * granularity on VRAM. 300 */ 301 bool needs_64K; 302 /** @clear_pt: clear page table entries during the bind walk */ 303 bool clear_pt; 304 /** 305 * @vma: VMA being mapped 306 */ 307 struct xe_vma *vma; 308 309 /* Also input, but is updated during the walk*/ 310 /** @curs: The DMA address cursor. */ 311 struct xe_res_cursor *curs; 312 /** @va_curs_start: The Virtual address corresponding to @curs->start */ 313 u64 va_curs_start; 314 315 /* Output */ 316 /** @wupd: Walk output data for page-table updates. */ 317 struct xe_walk_update { 318 /** @wupd.entries: Caller provided storage. */ 319 struct xe_vm_pgtable_update *entries; 320 /** @wupd.num_used_entries: Number of update @entries used. */ 321 unsigned int num_used_entries; 322 /** @wupd.updates: Tracks the update entry at a given level */ 323 struct xe_pt_update updates[XE_VM_MAX_LEVEL + 1]; 324 } wupd; 325 326 /* Walk state */ 327 /** 328 * @l0_end_addr: The end address of the current l0 leaf. Used for 329 * 64K granularity detection. 330 */ 331 u64 l0_end_addr; 332 /** @addr_64K: The start address of the current 64K chunk. */ 333 u64 addr_64K; 334 /** @found_64K: Whether @add_64K actually points to a 64K chunk. */ 335 bool found_64K; 336 }; 337 338 static int 339 xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent, 340 pgoff_t offset, bool alloc_entries) 341 { 342 struct xe_pt_update *upd = &wupd->updates[parent->level]; 343 struct xe_vm_pgtable_update *entry; 344 345 /* 346 * For *each level*, we could only have one active 347 * struct xt_pt_update at any one time. Once we move on to a 348 * new parent and page-directory, the old one is complete, and 349 * updates are either already stored in the build tree or in 350 * @wupd->entries 351 */ 352 if (likely(upd->parent == parent)) 353 return 0; 354 355 upd->parent = parent; 356 upd->preexisting = true; 357 358 if (wupd->num_used_entries == XE_VM_MAX_LEVEL * 2 + 1) 359 return -EINVAL; 360 361 entry = wupd->entries + wupd->num_used_entries++; 362 upd->update = entry; 363 entry->ofs = offset; 364 entry->pt_bo = parent->bo; 365 entry->pt = parent; 366 entry->flags = 0; 367 entry->qwords = 0; 368 entry->pt_bo->update_index = -1; 369 370 if (alloc_entries) { 371 entry->pt_entries = kmalloc_objs(*entry->pt_entries, XE_PDES, 372 GFP_KERNEL); 373 if (!entry->pt_entries) 374 return -ENOMEM; 375 } 376 377 return 0; 378 } 379 380 /* 381 * NOTE: This is a very frequently called function so we allow ourselves 382 * to annotate (using branch prediction hints) the fastpath of updating a 383 * non-pre-existing pagetable with leaf ptes. 384 */ 385 static int 386 xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent, 387 pgoff_t offset, struct xe_pt *xe_child, u64 pte) 388 { 389 struct xe_pt_update *upd = &xe_walk->wupd.updates[parent->level]; 390 struct xe_pt_update *child_upd = xe_child ? 391 &xe_walk->wupd.updates[xe_child->level] : NULL; 392 int ret; 393 394 ret = xe_pt_new_shared(&xe_walk->wupd, parent, offset, true); 395 if (unlikely(ret)) 396 return ret; 397 398 /* 399 * Register this new pagetable so that it won't be recognized as 400 * a shared pagetable by a subsequent insertion. 401 */ 402 if (unlikely(child_upd)) { 403 child_upd->update = NULL; 404 child_upd->parent = xe_child; 405 child_upd->preexisting = false; 406 } 407 408 if (likely(!upd->preexisting)) { 409 /* Continue building a non-connected subtree. */ 410 struct iosys_map *map = &parent->bo->vmap; 411 412 if (unlikely(xe_child)) { 413 parent->base.children[offset] = &xe_child->base; 414 parent->base.staging[offset] = &xe_child->base; 415 } 416 417 xe_pt_write(xe_walk->vm->xe, map, offset, pte); 418 parent->num_live++; 419 } else { 420 /* Shared pt. Stage update. */ 421 unsigned int idx; 422 struct xe_vm_pgtable_update *entry = upd->update; 423 424 idx = offset - entry->ofs; 425 entry->pt_entries[idx].pt = xe_child; 426 entry->pt_entries[idx].pte = pte; 427 entry->qwords++; 428 } 429 430 return 0; 431 } 432 433 static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level, 434 struct xe_pt_stage_bind_walk *xe_walk) 435 { 436 u64 size, dma; 437 438 if (level > MAX_HUGEPTE_LEVEL) 439 return false; 440 441 /* Does the virtual range requested cover a huge pte? */ 442 if (!xe_pt_covers(addr, next, level, &xe_walk->base)) 443 return false; 444 445 /* Does the DMA segment cover the whole pte? */ 446 if (next - xe_walk->va_curs_start > xe_walk->curs->size) 447 return false; 448 449 /* null VMA's do not have dma addresses */ 450 if (xe_vma_is_null(xe_walk->vma)) 451 return true; 452 453 /* if we are clearing page table, no dma addresses*/ 454 if (xe_walk->clear_pt) 455 return true; 456 457 /* Is the DMA address huge PTE size aligned? */ 458 size = next - addr; 459 dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs); 460 461 return IS_ALIGNED(dma, size); 462 } 463 464 /* 465 * Scan the requested mapping to check whether it can be done entirely 466 * with 64K PTEs. 467 */ 468 static bool 469 xe_pt_scan_64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk) 470 { 471 struct xe_res_cursor curs = *xe_walk->curs; 472 473 if (!IS_ALIGNED(addr, SZ_64K)) 474 return false; 475 476 if (next > xe_walk->l0_end_addr) 477 return false; 478 479 /* null VMA's do not have dma addresses */ 480 if (xe_vma_is_null(xe_walk->vma)) 481 return true; 482 483 xe_res_next(&curs, addr - xe_walk->va_curs_start); 484 for (; addr < next; addr += SZ_64K) { 485 if (!IS_ALIGNED(xe_res_dma(&curs), SZ_64K) || curs.size < SZ_64K) 486 return false; 487 488 xe_res_next(&curs, SZ_64K); 489 } 490 491 return addr == next; 492 } 493 494 /* 495 * For non-compact "normal" 4K level-0 pagetables, we want to try to group 496 * addresses together in 64K-contigous regions to add a 64K TLB hint for the 497 * device to the PTE. 498 * This function determines whether the address is part of such a 499 * segment. For VRAM in normal pagetables, this is strictly necessary on 500 * some devices. 501 */ 502 static bool 503 xe_pt_is_pte_ps64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk) 504 { 505 /* Address is within an already found 64k region */ 506 if (xe_walk->found_64K && addr - xe_walk->addr_64K < SZ_64K) 507 return true; 508 509 xe_walk->found_64K = xe_pt_scan_64K(addr, addr + SZ_64K, xe_walk); 510 xe_walk->addr_64K = addr; 511 512 return xe_walk->found_64K; 513 } 514 515 static int 516 xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset, 517 unsigned int level, u64 addr, u64 next, 518 struct xe_ptw **child, 519 enum page_walk_action *action, 520 struct xe_pt_walk *walk) 521 { 522 struct xe_pt_stage_bind_walk *xe_walk = 523 container_of(walk, typeof(*xe_walk), base); 524 u16 pat_index = xe_walk->vma->attr.pat_index; 525 struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base); 526 struct xe_vm *vm = xe_walk->vm; 527 struct xe_pt *xe_child; 528 bool covers; 529 int ret = 0; 530 u64 pte; 531 532 /* Is this a leaf entry ?*/ 533 if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) { 534 struct xe_res_cursor *curs = xe_walk->curs; 535 bool is_null = xe_vma_is_null(xe_walk->vma); 536 bool is_vram = is_null ? false : xe_res_is_vram(curs); 537 538 XE_WARN_ON(xe_walk->va_curs_start != addr); 539 540 if (xe_walk->clear_pt) { 541 pte = 0; 542 } else { 543 pte = vm->pt_ops->pte_encode_vma(is_null ? 0 : 544 xe_res_dma(curs) + 545 xe_walk->dma_offset, 546 xe_walk->vma, 547 pat_index, level); 548 if (!is_null) 549 pte |= is_vram ? xe_walk->default_vram_pte : 550 xe_walk->default_system_pte; 551 552 /* 553 * Set the XE_PTE_PS64 hint if possible, otherwise if 554 * this device *requires* 64K PTE size for VRAM, fail. 555 */ 556 if (level == 0 && !xe_parent->is_compact) { 557 if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) { 558 xe_walk->vma->gpuva.flags |= 559 XE_VMA_PTE_64K; 560 pte |= XE_PTE_PS64; 561 } else if (XE_WARN_ON(xe_walk->needs_64K && 562 is_vram)) { 563 return -EINVAL; 564 } 565 } 566 } 567 568 ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte); 569 if (unlikely(ret)) 570 return ret; 571 572 if (!is_null && !xe_walk->clear_pt) 573 xe_res_next(curs, next - addr); 574 xe_walk->va_curs_start = next; 575 xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level); 576 *action = ACTION_CONTINUE; 577 578 return ret; 579 } 580 581 /* 582 * Descending to lower level. Determine if we need to allocate a 583 * new page table or -directory, which we do if there is no 584 * previous one or there is one we can completely replace. 585 */ 586 if (level == 1) { 587 walk->shifts = xe_normal_pt_shifts; 588 xe_walk->l0_end_addr = next; 589 } 590 591 covers = xe_pt_covers(addr, next, level, &xe_walk->base); 592 if (covers || !*child) { 593 u64 flags = 0; 594 595 xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1, 596 xe_vm_validation_exec(vm)); 597 if (IS_ERR(xe_child)) 598 return PTR_ERR(xe_child); 599 600 xe_pt_set_addr(xe_child, 601 round_down(addr, 1ull << walk->shifts[level])); 602 603 if (!covers) 604 xe_pt_populate_empty(xe_walk->tile, xe_walk->vm, xe_child); 605 606 *child = &xe_child->base; 607 608 /* 609 * Prefer the compact pagetable layout for L0 if possible. Only 610 * possible if VMA covers entire 2MB region as compact 64k and 611 * 4k pages cannot be mixed within a 2MB region. 612 * TODO: Suballocate the pt bo to avoid wasting a lot of 613 * memory. 614 */ 615 if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 && 616 covers && xe_pt_scan_64K(addr, next, xe_walk)) { 617 walk->shifts = xe_compact_pt_shifts; 618 xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT; 619 flags |= XE_PDE_64K; 620 xe_child->is_compact = true; 621 } 622 623 pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0) | flags; 624 ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child, 625 pte); 626 } 627 628 *action = ACTION_SUBTREE; 629 return ret; 630 } 631 632 static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = { 633 .pt_entry = xe_pt_stage_bind_entry, 634 }; 635 636 /* 637 * Default atomic expectations for different allocation scenarios are as follows: 638 * 639 * 1. Traditional API: When the VM is not in LR mode: 640 * - Device atomics are expected to function with all allocations. 641 * 642 * 2. Compute/SVM API: When the VM is in LR mode: 643 * - Device atomics are the default behavior when the bo is placed in a single region. 644 * - In all other cases device atomics will be disabled with AE=0 until an application 645 * request differently using a ioctl like madvise. 646 */ 647 static bool xe_atomic_for_vram(struct xe_vm *vm, struct xe_vma *vma) 648 { 649 if (vma->attr.atomic_access == DRM_XE_ATOMIC_CPU) 650 return false; 651 652 return true; 653 } 654 655 static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_vma *vma) 656 { 657 struct xe_device *xe = vm->xe; 658 struct xe_bo *bo = xe_vma_bo(vma); 659 660 if (!xe->info.has_device_atomics_on_smem || 661 vma->attr.atomic_access == DRM_XE_ATOMIC_CPU) 662 return false; 663 664 if (vma->attr.atomic_access == DRM_XE_ATOMIC_DEVICE) 665 return true; 666 667 /* 668 * If a SMEM+LMEM allocation is backed by SMEM, a device 669 * atomics will cause a gpu page fault and which then 670 * gets migrated to LMEM, bind such allocations with 671 * device atomics enabled. 672 */ 673 return (!IS_DGFX(xe) || (!xe_vm_in_lr_mode(vm) || 674 (bo && xe_bo_has_single_placement(bo)))); 675 } 676 677 /** 678 * xe_pt_stage_bind() - Build a disconnected page-table tree for a given address 679 * range. 680 * @tile: The tile we're building for. 681 * @vma: The vma indicating the address range. 682 * @range: The range indicating the address range. 683 * @entries: Storage for the update entries used for connecting the tree to 684 * the main tree at commit time. 685 * @num_entries: On output contains the number of @entries used. 686 * @clear_pt: Clear the page table entries. 687 * 688 * This function builds a disconnected page-table tree for a given address 689 * range. The tree is connected to the main vm tree for the gpu using 690 * xe_migrate_update_pgtables() and for the cpu using xe_pt_commit_bind(). 691 * The function builds xe_vm_pgtable_update structures for already existing 692 * shared page-tables, and non-existing shared and non-shared page-tables 693 * are built and populated directly. 694 * 695 * Return 0 on success, negative error code on error. 696 */ 697 static int 698 xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, 699 struct xe_svm_range *range, 700 struct xe_vm_pgtable_update *entries, 701 u32 *num_entries, bool clear_pt) 702 { 703 struct xe_device *xe = tile_to_xe(tile); 704 struct xe_bo *bo = xe_vma_bo(vma); 705 struct xe_res_cursor curs; 706 struct xe_vm *vm = xe_vma_vm(vma); 707 struct xe_pt_stage_bind_walk xe_walk = { 708 .base = { 709 .ops = &xe_pt_stage_bind_ops, 710 .shifts = xe_normal_pt_shifts, 711 .max_level = XE_PT_HIGHEST_LEVEL, 712 .staging = true, 713 }, 714 .vm = vm, 715 .tile = tile, 716 .curs = &curs, 717 .va_curs_start = range ? xe_svm_range_start(range) : 718 xe_vma_start(vma), 719 .vma = vma, 720 .wupd.entries = entries, 721 .clear_pt = clear_pt, 722 }; 723 struct xe_pt *pt = vm->pt_root[tile->id]; 724 int ret; 725 726 if (range) { 727 /* Move this entire thing to xe_svm.c? */ 728 xe_svm_notifier_lock(vm); 729 if (!xe_svm_range_pages_valid(range)) { 730 xe_svm_range_debug(range, "BIND PREPARE - RETRY"); 731 xe_svm_notifier_unlock(vm); 732 return -EAGAIN; 733 } 734 if (xe_svm_range_has_dma_mapping(range)) { 735 xe_res_first_dma(range->base.pages.dma_addr, 0, 736 xe_svm_range_size(range), 737 &curs); 738 xe_svm_range_debug(range, "BIND PREPARE - MIXED"); 739 } else { 740 xe_assert(xe, false); 741 } 742 /* 743 * Note, when unlocking the resource cursor dma addresses may become 744 * stale, but the bind will be aborted anyway at commit time. 745 */ 746 xe_svm_notifier_unlock(vm); 747 } 748 749 xe_walk.needs_64K = (vm->flags & XE_VM_FLAG_64K); 750 if (clear_pt) 751 goto walk_pt; 752 753 if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) { 754 xe_walk.default_vram_pte = xe_atomic_for_vram(vm, vma) ? XE_USM_PPGTT_PTE_AE : 0; 755 xe_walk.default_system_pte = xe_atomic_for_system(vm, vma) ? 756 XE_USM_PPGTT_PTE_AE : 0; 757 } 758 759 xe_walk.default_vram_pte |= XE_PPGTT_PTE_DM; 760 xe_walk.dma_offset = bo ? vram_region_gpu_offset(bo->ttm.resource) : 0; 761 if (!range) 762 xe_bo_assert_held(bo); 763 764 if (!xe_vma_is_null(vma) && !range) { 765 if (xe_vma_is_userptr(vma)) 766 xe_res_first_dma(to_userptr_vma(vma)->userptr.pages.dma_addr, 0, 767 xe_vma_size(vma), &curs); 768 else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo)) 769 xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma), 770 xe_vma_size(vma), &curs); 771 else 772 xe_res_first_sg(xe_bo_sg(bo), xe_vma_bo_offset(vma), 773 xe_vma_size(vma), &curs); 774 } else if (!range) { 775 curs.size = xe_vma_size(vma); 776 } 777 778 walk_pt: 779 ret = xe_pt_walk_range(&pt->base, pt->level, 780 range ? xe_svm_range_start(range) : xe_vma_start(vma), 781 range ? xe_svm_range_end(range) : xe_vma_end(vma), 782 &xe_walk.base); 783 784 *num_entries = xe_walk.wupd.num_used_entries; 785 return ret; 786 } 787 788 /** 789 * xe_pt_nonshared_offsets() - Determine the non-shared entry offsets of a 790 * shared pagetable. 791 * @addr: The start address within the non-shared pagetable. 792 * @end: The end address within the non-shared pagetable. 793 * @level: The level of the non-shared pagetable. 794 * @walk: Walk info. The function adjusts the walk action. 795 * @action: next action to perform (see enum page_walk_action) 796 * @offset: Ignored on input, First non-shared entry on output. 797 * @end_offset: Ignored on input, Last non-shared entry + 1 on output. 798 * 799 * A non-shared page-table has some entries that belong to the address range 800 * and others that don't. This function determines the entries that belong 801 * fully to the address range. Depending on level, some entries may 802 * partially belong to the address range (that can't happen at level 0). 803 * The function detects that and adjust those offsets to not include those 804 * partial entries. Iff it does detect partial entries, we know that there must 805 * be shared page tables also at lower levels, so it adjusts the walk action 806 * accordingly. 807 * 808 * Return: true if there were non-shared entries, false otherwise. 809 */ 810 static bool xe_pt_nonshared_offsets(u64 addr, u64 end, unsigned int level, 811 struct xe_pt_walk *walk, 812 enum page_walk_action *action, 813 pgoff_t *offset, pgoff_t *end_offset) 814 { 815 u64 size = 1ull << walk->shifts[level]; 816 817 *offset = xe_pt_offset(addr, level, walk); 818 *end_offset = xe_pt_num_entries(addr, end, level, walk) + *offset; 819 820 if (!level) 821 return true; 822 823 /* 824 * If addr or next are not size aligned, there are shared pts at lower 825 * level, so in that case traverse down the subtree 826 */ 827 *action = ACTION_CONTINUE; 828 if (!IS_ALIGNED(addr, size)) { 829 *action = ACTION_SUBTREE; 830 (*offset)++; 831 } 832 833 if (!IS_ALIGNED(end, size)) { 834 *action = ACTION_SUBTREE; 835 (*end_offset)--; 836 } 837 838 return *end_offset > *offset; 839 } 840 841 struct xe_pt_zap_ptes_walk { 842 /** @base: The walk base-class */ 843 struct xe_pt_walk base; 844 845 /* Input parameters for the walk */ 846 /** @tile: The tile we're building for */ 847 struct xe_tile *tile; 848 849 /* Output */ 850 /** @needs_invalidate: Whether we need to invalidate TLB*/ 851 bool needs_invalidate; 852 }; 853 854 static int xe_pt_zap_ptes_entry(struct xe_ptw *parent, pgoff_t offset, 855 unsigned int level, u64 addr, u64 next, 856 struct xe_ptw **child, 857 enum page_walk_action *action, 858 struct xe_pt_walk *walk) 859 { 860 struct xe_pt_zap_ptes_walk *xe_walk = 861 container_of(walk, typeof(*xe_walk), base); 862 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base); 863 pgoff_t end_offset; 864 865 XE_WARN_ON(!*child); 866 XE_WARN_ON(!level); 867 868 /* 869 * Note that we're called from an entry callback, and we're dealing 870 * with the child of that entry rather than the parent, so need to 871 * adjust level down. 872 */ 873 if (xe_pt_nonshared_offsets(addr, next, --level, walk, action, &offset, 874 &end_offset)) { 875 xe_map_memset(tile_to_xe(xe_walk->tile), &xe_child->bo->vmap, 876 offset * sizeof(u64), 0, 877 (end_offset - offset) * sizeof(u64)); 878 xe_walk->needs_invalidate = true; 879 } 880 881 return 0; 882 } 883 884 static const struct xe_pt_walk_ops xe_pt_zap_ptes_ops = { 885 .pt_entry = xe_pt_zap_ptes_entry, 886 }; 887 888 /** 889 * xe_pt_zap_ptes() - Zap (zero) gpu ptes of an address range 890 * @tile: The tile we're zapping for. 891 * @vma: GPU VMA detailing address range. 892 * 893 * Eviction and Userptr invalidation needs to be able to zap the 894 * gpu ptes of a given address range in pagefaulting mode. 895 * In order to be able to do that, that function needs access to the shared 896 * page-table entrieaso it can either clear the leaf PTEs or 897 * clear the pointers to lower-level page-tables. The caller is required 898 * to hold the necessary locks to ensure neither the page-table connectivity 899 * nor the page-table entries of the range is updated from under us. 900 * 901 * Return: Whether ptes were actually updated and a TLB invalidation is 902 * required. 903 */ 904 bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma) 905 { 906 struct xe_pt_zap_ptes_walk xe_walk = { 907 .base = { 908 .ops = &xe_pt_zap_ptes_ops, 909 .shifts = xe_normal_pt_shifts, 910 .max_level = XE_PT_HIGHEST_LEVEL, 911 }, 912 .tile = tile, 913 }; 914 struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id]; 915 u8 pt_mask = (vma->tile_present & ~vma->tile_invalidated); 916 917 if (xe_vma_bo(vma)) 918 xe_bo_assert_held(xe_vma_bo(vma)); 919 else if (xe_vma_is_userptr(vma)) 920 lockdep_assert_held(&xe_vma_vm(vma)->svm.gpusvm.notifier_lock); 921 922 if (!(pt_mask & BIT(tile->id))) 923 return false; 924 925 (void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma), 926 xe_vma_end(vma), &xe_walk.base); 927 928 return xe_walk.needs_invalidate; 929 } 930 931 /** 932 * xe_pt_zap_ptes_range() - Zap (zero) gpu ptes of a SVM range 933 * @tile: The tile we're zapping for. 934 * @vm: The VM we're zapping for. 935 * @range: The SVM range we're zapping for. 936 * 937 * SVM invalidation needs to be able to zap the gpu ptes of a given address 938 * range. In order to be able to do that, that function needs access to the 939 * shared page-table entries so it can either clear the leaf PTEs or 940 * clear the pointers to lower-level page-tables. The caller is required 941 * to hold the SVM notifier lock. 942 * 943 * Return: Whether ptes were actually updated and a TLB invalidation is 944 * required. 945 */ 946 bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm, 947 struct xe_svm_range *range) 948 { 949 struct xe_pt_zap_ptes_walk xe_walk = { 950 .base = { 951 .ops = &xe_pt_zap_ptes_ops, 952 .shifts = xe_normal_pt_shifts, 953 .max_level = XE_PT_HIGHEST_LEVEL, 954 }, 955 .tile = tile, 956 }; 957 struct xe_pt *pt = vm->pt_root[tile->id]; 958 u8 pt_mask = (range->tile_present & ~range->tile_invalidated); 959 960 /* 961 * Locking rules: 962 * 963 * - notifier_lock (write): full protection against page table changes 964 * and MMU notifier invalidations. 965 * 966 * - notifier_lock (read) + vm_lock (write): combined protection against 967 * invalidations and concurrent page table modifications. (e.g., madvise) 968 * 969 */ 970 lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 0) || 971 (lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) && 972 lockdep_is_held_type(&vm->lock, 0))); 973 974 if (!(pt_mask & BIT(tile->id))) 975 return false; 976 977 (void)xe_pt_walk_shared(&pt->base, pt->level, xe_svm_range_start(range), 978 xe_svm_range_end(range), &xe_walk.base); 979 980 return xe_walk.needs_invalidate; 981 } 982 983 static void 984 xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *tile, 985 struct iosys_map *map, void *data, 986 u32 qword_ofs, u32 num_qwords, 987 const struct xe_vm_pgtable_update *update) 988 { 989 struct xe_pt_entry *ptes = update->pt_entries; 990 u64 *ptr = data; 991 u32 i; 992 993 for (i = 0; i < num_qwords; i++) { 994 if (map) 995 xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) * 996 sizeof(u64), u64, ptes[i].pte); 997 else 998 ptr[i] = ptes[i].pte; 999 } 1000 } 1001 1002 static void xe_pt_cancel_bind(struct xe_vma *vma, 1003 struct xe_vm_pgtable_update *entries, 1004 u32 num_entries) 1005 { 1006 u32 i, j; 1007 1008 for (i = 0; i < num_entries; i++) { 1009 struct xe_pt *pt = entries[i].pt; 1010 1011 if (!pt) 1012 continue; 1013 1014 if (pt->level) { 1015 for (j = 0; j < entries[i].qwords; j++) 1016 xe_pt_destroy(entries[i].pt_entries[j].pt, 1017 xe_vma_vm(vma)->flags, NULL); 1018 } 1019 1020 kfree(entries[i].pt_entries); 1021 entries[i].pt_entries = NULL; 1022 entries[i].qwords = 0; 1023 } 1024 } 1025 1026 #define XE_INVALID_VMA ((struct xe_vma *)(0xdeaddeadull)) 1027 1028 static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma) 1029 { 1030 struct xe_vm *vm; 1031 1032 if (vma == XE_INVALID_VMA) 1033 return; 1034 1035 vm = xe_vma_vm(vma); 1036 lockdep_assert_held(&vm->lock); 1037 1038 if (!xe_vma_has_no_bo(vma)) 1039 dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv); 1040 1041 xe_vm_assert_held(vm); 1042 } 1043 1044 static void xe_pt_commit_locks_assert(struct xe_vma *vma) 1045 { 1046 struct xe_vm *vm; 1047 1048 if (vma == XE_INVALID_VMA) 1049 return; 1050 1051 vm = xe_vma_vm(vma); 1052 xe_pt_commit_prepare_locks_assert(vma); 1053 1054 if (xe_vma_is_userptr(vma)) 1055 xe_svm_assert_held_read(vm); 1056 } 1057 1058 static void xe_pt_commit(struct xe_vma *vma, 1059 struct xe_vm_pgtable_update *entries, 1060 u32 num_entries, struct llist_head *deferred) 1061 { 1062 u32 i, j; 1063 1064 xe_pt_commit_locks_assert(vma); 1065 1066 for (i = 0; i < num_entries; i++) { 1067 struct xe_pt *pt = entries[i].pt; 1068 struct xe_pt_dir *pt_dir; 1069 1070 if (!pt->level) 1071 continue; 1072 1073 pt_dir = as_xe_pt_dir(pt); 1074 for (j = 0; j < entries[i].qwords; j++) { 1075 struct xe_pt *oldpte = entries[i].pt_entries[j].pt; 1076 int j_ = j + entries[i].ofs; 1077 1078 pt_dir->children[j_] = pt_dir->staging[j_]; 1079 xe_pt_destroy(oldpte, (vma == XE_INVALID_VMA) ? 0 : 1080 xe_vma_vm(vma)->flags, deferred); 1081 } 1082 } 1083 } 1084 1085 static void xe_pt_abort_bind(struct xe_vma *vma, 1086 struct xe_vm_pgtable_update *entries, 1087 u32 num_entries, bool rebind) 1088 { 1089 int i, j; 1090 1091 xe_pt_commit_prepare_locks_assert(vma); 1092 1093 for (i = num_entries - 1; i >= 0; --i) { 1094 struct xe_pt *pt = entries[i].pt; 1095 struct xe_pt_dir *pt_dir; 1096 1097 if (!rebind) 1098 pt->num_live -= entries[i].qwords; 1099 1100 if (!pt->level) 1101 continue; 1102 1103 pt_dir = as_xe_pt_dir(pt); 1104 for (j = 0; j < entries[i].qwords; j++) { 1105 u32 j_ = j + entries[i].ofs; 1106 struct xe_pt *newpte = xe_pt_entry_staging(pt_dir, j_); 1107 struct xe_pt *oldpte = entries[i].pt_entries[j].pt; 1108 1109 pt_dir->staging[j_] = oldpte ? &oldpte->base : 0; 1110 xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL); 1111 } 1112 } 1113 } 1114 1115 static void xe_pt_commit_prepare_bind(struct xe_vma *vma, 1116 struct xe_vm_pgtable_update *entries, 1117 u32 num_entries, bool rebind) 1118 { 1119 u32 i, j; 1120 1121 xe_pt_commit_prepare_locks_assert(vma); 1122 1123 for (i = 0; i < num_entries; i++) { 1124 struct xe_pt *pt = entries[i].pt; 1125 struct xe_pt_dir *pt_dir; 1126 1127 if (!rebind) 1128 pt->num_live += entries[i].qwords; 1129 1130 if (!pt->level) 1131 continue; 1132 1133 pt_dir = as_xe_pt_dir(pt); 1134 for (j = 0; j < entries[i].qwords; j++) { 1135 u32 j_ = j + entries[i].ofs; 1136 struct xe_pt *newpte = entries[i].pt_entries[j].pt; 1137 struct xe_pt *oldpte = NULL; 1138 1139 if (xe_pt_entry_staging(pt_dir, j_)) 1140 oldpte = xe_pt_entry_staging(pt_dir, j_); 1141 1142 pt_dir->staging[j_] = &newpte->base; 1143 entries[i].pt_entries[j].pt = oldpte; 1144 } 1145 } 1146 } 1147 1148 static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries, 1149 u32 num_entries) 1150 { 1151 u32 i; 1152 1153 for (i = 0; i < num_entries; i++) 1154 kfree(entries[i].pt_entries); 1155 } 1156 1157 static int 1158 xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma, 1159 struct xe_svm_range *range, 1160 struct xe_vm_pgtable_update *entries, 1161 u32 *num_entries, bool invalidate_on_bind) 1162 { 1163 int err; 1164 1165 *num_entries = 0; 1166 err = xe_pt_stage_bind(tile, vma, range, entries, num_entries, 1167 invalidate_on_bind); 1168 if (!err) 1169 xe_tile_assert(tile, *num_entries); 1170 1171 return err; 1172 } 1173 1174 static void xe_vm_dbg_print_entries(struct xe_device *xe, 1175 const struct xe_vm_pgtable_update *entries, 1176 unsigned int num_entries, bool bind) 1177 #if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)) 1178 { 1179 unsigned int i; 1180 1181 vm_dbg(&xe->drm, "%s: %u entries to update\n", bind ? "bind" : "unbind", 1182 num_entries); 1183 for (i = 0; i < num_entries; i++) { 1184 const struct xe_vm_pgtable_update *entry = &entries[i]; 1185 struct xe_pt *xe_pt = entry->pt; 1186 u64 page_size = 1ull << xe_pt_shift(xe_pt->level); 1187 u64 end; 1188 u64 start; 1189 1190 xe_assert(xe, !entry->pt->is_compact); 1191 start = entry->ofs * page_size; 1192 end = start + page_size * entry->qwords; 1193 vm_dbg(&xe->drm, 1194 "\t%u: Update level %u at (%u + %u) [%llx...%llx) f:%x\n", 1195 i, xe_pt->level, entry->ofs, entry->qwords, 1196 xe_pt_addr(xe_pt) + start, xe_pt_addr(xe_pt) + end, 0); 1197 } 1198 } 1199 #else 1200 {} 1201 #endif 1202 1203 static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs) 1204 { 1205 int i; 1206 1207 for (i = 0; i < num_syncs; i++) { 1208 struct dma_fence *fence = syncs[i].fence; 1209 1210 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1211 &fence->flags)) 1212 return false; 1213 } 1214 1215 return true; 1216 } 1217 1218 static int job_test_add_deps(struct xe_sched_job *job, 1219 struct dma_resv *resv, 1220 enum dma_resv_usage usage) 1221 { 1222 if (!job) { 1223 if (!dma_resv_test_signaled(resv, usage)) 1224 return -ETIME; 1225 1226 return 0; 1227 } 1228 1229 return xe_sched_job_add_deps(job, resv, usage); 1230 } 1231 1232 static int vma_add_deps(struct xe_vma *vma, struct xe_sched_job *job) 1233 { 1234 struct xe_bo *bo = xe_vma_bo(vma); 1235 1236 xe_bo_assert_held(bo); 1237 1238 if (bo && !bo->vm) 1239 return job_test_add_deps(job, bo->ttm.base.resv, 1240 DMA_RESV_USAGE_KERNEL); 1241 1242 return 0; 1243 } 1244 1245 static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op, 1246 struct xe_sched_job *job) 1247 { 1248 int err = 0; 1249 1250 /* 1251 * No need to check for is_cpu_addr_mirror here as vma_add_deps is a 1252 * NOP if VMA is_cpu_addr_mirror 1253 */ 1254 1255 switch (op->base.op) { 1256 case DRM_GPUVA_OP_MAP: 1257 if (!op->map.immediate && xe_vm_in_fault_mode(vm)) 1258 break; 1259 1260 err = vma_add_deps(op->map.vma, job); 1261 break; 1262 case DRM_GPUVA_OP_REMAP: 1263 if (op->remap.prev) 1264 err = vma_add_deps(op->remap.prev, job); 1265 if (!err && op->remap.next) 1266 err = vma_add_deps(op->remap.next, job); 1267 break; 1268 case DRM_GPUVA_OP_UNMAP: 1269 break; 1270 case DRM_GPUVA_OP_PREFETCH: 1271 err = vma_add_deps(gpuva_to_vma(op->base.prefetch.va), job); 1272 break; 1273 case DRM_GPUVA_OP_DRIVER: 1274 break; 1275 default: 1276 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 1277 } 1278 1279 return err; 1280 } 1281 1282 static int xe_pt_vm_dependencies(struct xe_sched_job *job, 1283 struct xe_tlb_inval_job *ijob, 1284 struct xe_tlb_inval_job *mjob, 1285 struct xe_vm *vm, 1286 struct xe_vma_ops *vops, 1287 struct xe_vm_pgtable_update_ops *pt_update_ops, 1288 struct xe_range_fence_tree *rftree) 1289 { 1290 struct xe_range_fence *rtfence; 1291 struct dma_fence *fence; 1292 struct xe_vma_op *op; 1293 int err = 0, i; 1294 1295 xe_vm_assert_held(vm); 1296 1297 if (!job && !no_in_syncs(vops->syncs, vops->num_syncs)) 1298 return -ETIME; 1299 1300 if (!job && !xe_exec_queue_is_idle(pt_update_ops->q)) 1301 return -ETIME; 1302 1303 if (pt_update_ops->wait_vm_bookkeep || pt_update_ops->wait_vm_kernel) { 1304 err = job_test_add_deps(job, xe_vm_resv(vm), 1305 pt_update_ops->wait_vm_bookkeep ? 1306 DMA_RESV_USAGE_BOOKKEEP : 1307 DMA_RESV_USAGE_KERNEL); 1308 if (err) 1309 return err; 1310 } 1311 1312 rtfence = xe_range_fence_tree_first(rftree, pt_update_ops->start, 1313 pt_update_ops->last); 1314 while (rtfence) { 1315 fence = rtfence->fence; 1316 1317 if (!dma_fence_is_signaled(fence)) { 1318 /* 1319 * Is this a CPU update? GPU is busy updating, so return 1320 * an error 1321 */ 1322 if (!job) 1323 return -ETIME; 1324 1325 dma_fence_get(fence); 1326 err = drm_sched_job_add_dependency(&job->drm, fence); 1327 if (err) 1328 return err; 1329 } 1330 1331 rtfence = xe_range_fence_tree_next(rtfence, 1332 pt_update_ops->start, 1333 pt_update_ops->last); 1334 } 1335 1336 list_for_each_entry(op, &vops->list, link) { 1337 err = op_add_deps(vm, op, job); 1338 if (err) 1339 return err; 1340 } 1341 1342 for (i = 0; job && !err && i < vops->num_syncs; i++) 1343 err = xe_sync_entry_add_deps(&vops->syncs[i], job); 1344 1345 if (job) { 1346 if (ijob) { 1347 err = xe_tlb_inval_job_alloc_dep(ijob); 1348 if (err) 1349 return err; 1350 } 1351 1352 if (mjob) { 1353 err = xe_tlb_inval_job_alloc_dep(mjob); 1354 if (err) 1355 return err; 1356 } 1357 } 1358 1359 return err; 1360 } 1361 1362 static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update) 1363 { 1364 struct xe_vma_ops *vops = pt_update->vops; 1365 struct xe_vm *vm = vops->vm; 1366 struct xe_range_fence_tree *rftree = &vm->rftree[pt_update->tile_id]; 1367 struct xe_vm_pgtable_update_ops *pt_update_ops = 1368 &vops->pt_update_ops[pt_update->tile_id]; 1369 1370 return xe_pt_vm_dependencies(pt_update->job, pt_update->ijob, 1371 pt_update->mjob, vm, pt_update->vops, 1372 pt_update_ops, rftree); 1373 } 1374 1375 #if IS_ENABLED(CONFIG_DRM_GPUSVM) 1376 #ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT 1377 1378 static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) 1379 { 1380 u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2; 1381 static u32 count; 1382 1383 if (count++ % divisor == divisor - 1) { 1384 uvma->userptr.divisor = divisor << 1; 1385 return true; 1386 } 1387 1388 return false; 1389 } 1390 1391 #else 1392 1393 static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) 1394 { 1395 return false; 1396 } 1397 1398 #endif 1399 1400 static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma, 1401 struct xe_vm_pgtable_update_ops *pt_update) 1402 { 1403 struct xe_userptr_vma *uvma; 1404 unsigned long notifier_seq; 1405 1406 xe_svm_assert_held_read(vm); 1407 1408 if (!xe_vma_is_userptr(vma)) 1409 return 0; 1410 1411 uvma = to_userptr_vma(vma); 1412 if (xe_pt_userptr_inject_eagain(uvma)) 1413 xe_vma_userptr_force_invalidate(uvma); 1414 1415 notifier_seq = uvma->userptr.pages.notifier_seq; 1416 1417 if (!mmu_interval_read_retry(&uvma->userptr.notifier, 1418 notifier_seq)) 1419 return 0; 1420 1421 if (xe_vm_in_fault_mode(vm)) 1422 return -EAGAIN; 1423 1424 /* 1425 * Just continue the operation since exec or rebind worker 1426 * will take care of rebinding. 1427 */ 1428 return 0; 1429 } 1430 1431 static int op_check_svm_userptr(struct xe_vm *vm, struct xe_vma_op *op, 1432 struct xe_vm_pgtable_update_ops *pt_update) 1433 { 1434 int err = 0; 1435 1436 xe_svm_assert_held_read(vm); 1437 1438 switch (op->base.op) { 1439 case DRM_GPUVA_OP_MAP: 1440 if (!op->map.immediate && xe_vm_in_fault_mode(vm)) 1441 break; 1442 1443 err = vma_check_userptr(vm, op->map.vma, pt_update); 1444 break; 1445 case DRM_GPUVA_OP_REMAP: 1446 if (op->remap.prev) 1447 err = vma_check_userptr(vm, op->remap.prev, pt_update); 1448 if (!err && op->remap.next) 1449 err = vma_check_userptr(vm, op->remap.next, pt_update); 1450 break; 1451 case DRM_GPUVA_OP_UNMAP: 1452 break; 1453 case DRM_GPUVA_OP_PREFETCH: 1454 if (xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))) { 1455 struct xe_svm_range *range = op->map_range.range; 1456 unsigned long i; 1457 1458 xe_assert(vm->xe, 1459 xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))); 1460 xa_for_each(&op->prefetch_range.range, i, range) { 1461 xe_svm_range_debug(range, "PRE-COMMIT"); 1462 1463 if (!xe_svm_range_pages_valid(range)) { 1464 xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); 1465 return -ENODATA; 1466 } 1467 } 1468 } else { 1469 err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va), pt_update); 1470 } 1471 break; 1472 #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) 1473 case DRM_GPUVA_OP_DRIVER: 1474 if (op->subop == XE_VMA_SUBOP_MAP_RANGE) { 1475 struct xe_svm_range *range = op->map_range.range; 1476 1477 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma)); 1478 1479 xe_svm_range_debug(range, "PRE-COMMIT"); 1480 1481 if (!xe_svm_range_pages_valid(range)) { 1482 xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); 1483 return -EAGAIN; 1484 } 1485 } 1486 break; 1487 #endif 1488 default: 1489 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 1490 } 1491 1492 return err; 1493 } 1494 1495 static int xe_pt_svm_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) 1496 { 1497 struct xe_vm *vm = pt_update->vops->vm; 1498 struct xe_vma_ops *vops = pt_update->vops; 1499 struct xe_vm_pgtable_update_ops *pt_update_ops = 1500 &vops->pt_update_ops[pt_update->tile_id]; 1501 struct xe_vma_op *op; 1502 int err; 1503 1504 err = xe_pt_pre_commit(pt_update); 1505 if (err) 1506 return err; 1507 1508 xe_svm_notifier_lock(vm); 1509 1510 list_for_each_entry(op, &vops->list, link) { 1511 err = op_check_svm_userptr(vm, op, pt_update_ops); 1512 if (err) { 1513 xe_svm_notifier_unlock(vm); 1514 break; 1515 } 1516 } 1517 1518 return err; 1519 } 1520 #endif 1521 1522 struct xe_pt_stage_unbind_walk { 1523 /** @base: The pagewalk base-class. */ 1524 struct xe_pt_walk base; 1525 1526 /* Input parameters for the walk */ 1527 /** @tile: The tile we're unbinding from. */ 1528 struct xe_tile *tile; 1529 1530 /** 1531 * @modified_start: Walk range start, modified to include any 1532 * shared pagetables that we're the only user of and can thus 1533 * treat as private. 1534 */ 1535 u64 modified_start; 1536 /** @modified_end: Walk range start, modified like @modified_start. */ 1537 u64 modified_end; 1538 1539 /** @prl: Backing pointer to page reclaim list in pt_update_ops */ 1540 struct xe_page_reclaim_list *prl; 1541 1542 /* Output */ 1543 /* @wupd: Structure to track the page-table updates we're building */ 1544 struct xe_walk_update wupd; 1545 }; 1546 1547 /* 1548 * Check whether this range is the only one populating this pagetable, 1549 * and in that case, update the walk range checks so that higher levels don't 1550 * view us as a shared pagetable. 1551 */ 1552 static bool xe_pt_check_kill(u64 addr, u64 next, unsigned int level, 1553 const struct xe_pt *child, 1554 enum page_walk_action *action, 1555 struct xe_pt_walk *walk) 1556 { 1557 struct xe_pt_stage_unbind_walk *xe_walk = 1558 container_of(walk, typeof(*xe_walk), base); 1559 unsigned int shift = walk->shifts[level]; 1560 u64 size = 1ull << shift; 1561 1562 if (IS_ALIGNED(addr, size) && IS_ALIGNED(next, size) && 1563 ((next - addr) >> shift) == child->num_live) { 1564 u64 size = 1ull << walk->shifts[level + 1]; 1565 1566 *action = ACTION_CONTINUE; 1567 1568 if (xe_walk->modified_start >= addr) 1569 xe_walk->modified_start = round_down(addr, size); 1570 if (xe_walk->modified_end <= next) 1571 xe_walk->modified_end = round_up(next, size); 1572 1573 return true; 1574 } 1575 1576 return false; 1577 } 1578 1579 /* page_size = 2^(reclamation_size + XE_PTE_SHIFT) */ 1580 #define COMPUTE_RECLAIM_ADDRESS_MASK(page_size) \ 1581 ({ \ 1582 BUILD_BUG_ON(!__builtin_constant_p(page_size)); \ 1583 ilog2(page_size) - XE_PTE_SHIFT; \ 1584 }) 1585 1586 static int generate_reclaim_entry(struct xe_tile *tile, 1587 struct xe_page_reclaim_list *prl, 1588 u64 pte, struct xe_pt *xe_child) 1589 { 1590 struct xe_gt *gt = tile->primary_gt; 1591 struct xe_guc_page_reclaim_entry *reclaim_entries = prl->entries; 1592 u64 phys_addr = pte & XE_PTE_ADDR_MASK; 1593 u64 phys_page = phys_addr >> XE_PTE_SHIFT; 1594 int num_entries = prl->num_entries; 1595 u32 reclamation_size; 1596 1597 xe_tile_assert(tile, xe_child->level <= MAX_HUGEPTE_LEVEL); 1598 xe_tile_assert(tile, reclaim_entries); 1599 xe_tile_assert(tile, num_entries < XE_PAGE_RECLAIM_MAX_ENTRIES - 1); 1600 1601 if (!xe_page_reclaim_list_valid(prl)) 1602 return -EINVAL; 1603 1604 /** 1605 * reclamation_size indicates the size of the page to be 1606 * invalidated and flushed from non-coherent cache. 1607 * Page size is computed as 2^(reclamation_size + XE_PTE_SHIFT) bytes. 1608 * Only 4K, 64K (level 0), and 2M pages are supported by hardware for page reclaim 1609 */ 1610 if (xe_child->level == 0 && !(pte & XE_PTE_PS64)) { 1611 xe_gt_stats_incr(gt, XE_GT_STATS_ID_PRL_4K_ENTRY_COUNT, 1); 1612 reclamation_size = COMPUTE_RECLAIM_ADDRESS_MASK(SZ_4K); /* reclamation_size = 0 */ 1613 xe_tile_assert(tile, phys_addr % SZ_4K == 0); 1614 } else if (xe_child->level == 0) { 1615 xe_gt_stats_incr(gt, XE_GT_STATS_ID_PRL_64K_ENTRY_COUNT, 1); 1616 reclamation_size = COMPUTE_RECLAIM_ADDRESS_MASK(SZ_64K); /* reclamation_size = 4 */ 1617 xe_tile_assert(tile, phys_addr % SZ_64K == 0); 1618 } else if (xe_child->level == 1 && pte & XE_PDE_PS_2M) { 1619 xe_gt_stats_incr(gt, XE_GT_STATS_ID_PRL_2M_ENTRY_COUNT, 1); 1620 reclamation_size = COMPUTE_RECLAIM_ADDRESS_MASK(SZ_2M); /* reclamation_size = 9 */ 1621 xe_tile_assert(tile, phys_addr % SZ_2M == 0); 1622 } else { 1623 xe_page_reclaim_list_abort(tile->primary_gt, prl, 1624 "unsupported PTE level=%u pte=%#llx", 1625 xe_child->level, pte); 1626 return -EINVAL; 1627 } 1628 1629 reclaim_entries[num_entries].qw = 1630 FIELD_PREP(XE_PAGE_RECLAIM_VALID, 1) | 1631 FIELD_PREP(XE_PAGE_RECLAIM_SIZE, reclamation_size) | 1632 FIELD_PREP(XE_PAGE_RECLAIM_ADDR_LO, phys_page) | 1633 FIELD_PREP(XE_PAGE_RECLAIM_ADDR_HI, phys_page >> 20); 1634 prl->num_entries++; 1635 vm_dbg(&tile_to_xe(tile)->drm, 1636 "PRL add entry: level=%u pte=%#llx reclamation_size=%u prl_idx=%d\n", 1637 xe_child->level, pte, reclamation_size, num_entries); 1638 1639 return 0; 1640 } 1641 1642 static int xe_pt_stage_unbind_entry(struct xe_ptw *parent, pgoff_t offset, 1643 unsigned int level, u64 addr, u64 next, 1644 struct xe_ptw **child, 1645 enum page_walk_action *action, 1646 struct xe_pt_walk *walk) 1647 { 1648 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base); 1649 struct xe_pt_stage_unbind_walk *xe_walk = 1650 container_of(walk, typeof(*xe_walk), base); 1651 struct xe_device *xe = tile_to_xe(xe_walk->tile); 1652 pgoff_t first = xe_pt_offset(addr, xe_child->level, walk); 1653 bool killed; 1654 1655 XE_WARN_ON(!*child); 1656 XE_WARN_ON(!level); 1657 /* Check for leaf node */ 1658 if (xe_walk->prl && xe_page_reclaim_list_valid(xe_walk->prl) && 1659 (!xe_child->base.children || !xe_child->base.children[first])) { 1660 struct iosys_map *leaf_map = &xe_child->bo->vmap; 1661 pgoff_t count = xe_pt_num_entries(addr, next, xe_child->level, walk); 1662 1663 for (pgoff_t i = 0; i < count; i++) { 1664 u64 pte = xe_map_rd(xe, leaf_map, (first + i) * sizeof(u64), u64); 1665 int ret; 1666 1667 /* 1668 * In rare scenarios, pte may not be written yet due to racy conditions. 1669 * In such cases, invalidate the PRL and fallback to full PPC invalidation. 1670 */ 1671 if (!pte) { 1672 xe_page_reclaim_list_abort(xe_walk->tile->primary_gt, xe_walk->prl, 1673 "found zero pte at addr=%#llx", addr); 1674 break; 1675 } 1676 1677 /* Ensure it is a defined page */ 1678 xe_tile_assert(xe_walk->tile, 1679 xe_child->level == 0 || 1680 (pte & (XE_PTE_PS64 | XE_PDE_PS_2M | XE_PDPE_PS_1G))); 1681 1682 /* An entry should be added for 64KB but contigious 4K have XE_PTE_PS64 */ 1683 if (pte & XE_PTE_PS64) 1684 i += 15; /* Skip other 15 consecutive 4K pages in the 64K page */ 1685 1686 /* Account for NULL terminated entry on end (-1) */ 1687 if (xe_walk->prl->num_entries < XE_PAGE_RECLAIM_MAX_ENTRIES - 1) { 1688 ret = generate_reclaim_entry(xe_walk->tile, xe_walk->prl, 1689 pte, xe_child); 1690 if (ret) 1691 break; 1692 } else { 1693 /* overflow, mark as invalid */ 1694 xe_page_reclaim_list_abort(xe_walk->tile->primary_gt, xe_walk->prl, 1695 "overflow while adding pte=%#llx", 1696 pte); 1697 break; 1698 } 1699 } 1700 } 1701 1702 killed = xe_pt_check_kill(addr, next, level - 1, xe_child, action, walk); 1703 1704 /* 1705 * Verify PRL is active and if entry is not a leaf pte (base.children conditions), 1706 * there is a potential need to invalidate the PRL if any PTE (num_live) are dropped. 1707 */ 1708 if (xe_walk->prl && level > 1 && xe_child->num_live && 1709 xe_child->base.children && xe_child->base.children[first]) { 1710 bool covered = xe_pt_covers(addr, next, xe_child->level, &xe_walk->base); 1711 1712 /* 1713 * If aborting page walk early (kill) or page walk completes the full range 1714 * we need to invalidate the PRL. 1715 */ 1716 if (killed || covered) 1717 xe_page_reclaim_list_abort(xe_walk->tile->primary_gt, xe_walk->prl, 1718 "kill at level=%u addr=%#llx next=%#llx num_live=%u", 1719 level, addr, next, xe_child->num_live); 1720 } 1721 1722 return 0; 1723 } 1724 1725 static int 1726 xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset, 1727 unsigned int level, u64 addr, u64 next, 1728 struct xe_ptw **child, 1729 enum page_walk_action *action, 1730 struct xe_pt_walk *walk) 1731 { 1732 struct xe_pt_stage_unbind_walk *xe_walk = 1733 container_of(walk, typeof(*xe_walk), base); 1734 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base); 1735 pgoff_t end_offset; 1736 u64 size = 1ull << walk->shifts[--level]; 1737 int err; 1738 1739 if (!IS_ALIGNED(addr, size)) 1740 addr = xe_walk->modified_start; 1741 if (!IS_ALIGNED(next, size)) 1742 next = xe_walk->modified_end; 1743 1744 /* Parent == *child is the root pt. Don't kill it. */ 1745 if (parent != *child && 1746 xe_pt_check_kill(addr, next, level, xe_child, action, walk)) 1747 return 0; 1748 1749 if (!xe_pt_nonshared_offsets(addr, next, level, walk, action, &offset, 1750 &end_offset)) 1751 return 0; 1752 1753 err = xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, true); 1754 if (err) 1755 return err; 1756 1757 xe_walk->wupd.updates[level].update->qwords = end_offset - offset; 1758 1759 return 0; 1760 } 1761 1762 static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = { 1763 .pt_entry = xe_pt_stage_unbind_entry, 1764 .pt_post_descend = xe_pt_stage_unbind_post_descend, 1765 }; 1766 1767 /** 1768 * xe_pt_stage_unbind() - Build page-table update structures for an unbind 1769 * operation 1770 * @tile: The tile we're unbinding for. 1771 * @vm: The vm 1772 * @vma: The vma we're unbinding. 1773 * @range: The range we're unbinding. 1774 * @entries: Caller-provided storage for the update structures. 1775 * 1776 * Builds page-table update structures for an unbind operation. The function 1777 * will attempt to remove all page-tables that we're the only user 1778 * of, and for that to work, the unbind operation must be committed in the 1779 * same critical section that blocks racing binds to the same page-table tree. 1780 * 1781 * Return: The number of entries used. 1782 */ 1783 static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, 1784 struct xe_vm *vm, 1785 struct xe_vma *vma, 1786 struct xe_svm_range *range, 1787 struct xe_vm_pgtable_update *entries) 1788 { 1789 u64 start = range ? xe_svm_range_start(range) : xe_vma_start(vma); 1790 u64 end = range ? xe_svm_range_end(range) : xe_vma_end(vma); 1791 struct xe_vm_pgtable_update_op *pt_update_op = 1792 container_of(entries, struct xe_vm_pgtable_update_op, entries[0]); 1793 struct xe_pt_stage_unbind_walk xe_walk = { 1794 .base = { 1795 .ops = &xe_pt_stage_unbind_ops, 1796 .shifts = xe_normal_pt_shifts, 1797 .max_level = XE_PT_HIGHEST_LEVEL, 1798 .staging = true, 1799 }, 1800 .tile = tile, 1801 .modified_start = start, 1802 .modified_end = end, 1803 .wupd.entries = entries, 1804 .prl = pt_update_op->prl, 1805 }; 1806 struct xe_pt *pt = vm->pt_root[tile->id]; 1807 1808 (void)xe_pt_walk_shared(&pt->base, pt->level, start, end, 1809 &xe_walk.base); 1810 1811 return xe_walk.wupd.num_used_entries; 1812 } 1813 1814 static void 1815 xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update, 1816 struct xe_tile *tile, struct iosys_map *map, 1817 void *ptr, u32 qword_ofs, u32 num_qwords, 1818 const struct xe_vm_pgtable_update *update) 1819 { 1820 struct xe_vm *vm = pt_update->vops->vm; 1821 u64 empty = __xe_pt_empty_pte(tile, vm, update->pt->level); 1822 int i; 1823 1824 if (map && map->is_iomem) 1825 for (i = 0; i < num_qwords; ++i) 1826 xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) * 1827 sizeof(u64), u64, empty); 1828 else if (map) 1829 memset64(map->vaddr + qword_ofs * sizeof(u64), empty, 1830 num_qwords); 1831 else 1832 memset64(ptr, empty, num_qwords); 1833 } 1834 1835 static void xe_pt_abort_unbind(struct xe_vma *vma, 1836 struct xe_vm_pgtable_update *entries, 1837 u32 num_entries) 1838 { 1839 int i, j; 1840 1841 xe_pt_commit_prepare_locks_assert(vma); 1842 1843 for (i = num_entries - 1; i >= 0; --i) { 1844 struct xe_vm_pgtable_update *entry = &entries[i]; 1845 struct xe_pt *pt = entry->pt; 1846 struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt); 1847 1848 pt->num_live += entry->qwords; 1849 1850 if (!pt->level) 1851 continue; 1852 1853 for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) 1854 pt_dir->staging[j] = 1855 entries[i].pt_entries[j - entry->ofs].pt ? 1856 &entries[i].pt_entries[j - entry->ofs].pt->base : NULL; 1857 } 1858 } 1859 1860 static void 1861 xe_pt_commit_prepare_unbind(struct xe_vma *vma, 1862 struct xe_vm_pgtable_update *entries, 1863 u32 num_entries) 1864 { 1865 int i, j; 1866 1867 xe_pt_commit_prepare_locks_assert(vma); 1868 1869 for (i = 0; i < num_entries; ++i) { 1870 struct xe_vm_pgtable_update *entry = &entries[i]; 1871 struct xe_pt *pt = entry->pt; 1872 struct xe_pt_dir *pt_dir; 1873 1874 pt->num_live -= entry->qwords; 1875 if (!pt->level) 1876 continue; 1877 1878 pt_dir = as_xe_pt_dir(pt); 1879 for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) { 1880 entry->pt_entries[j - entry->ofs].pt = 1881 xe_pt_entry_staging(pt_dir, j); 1882 pt_dir->staging[j] = NULL; 1883 } 1884 } 1885 } 1886 1887 static void 1888 xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops, 1889 u64 start, u64 end) 1890 { 1891 u64 last; 1892 u32 current_op = pt_update_ops->current_op; 1893 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; 1894 int i, level = 0; 1895 1896 for (i = 0; i < pt_op->num_entries; i++) { 1897 const struct xe_vm_pgtable_update *entry = &pt_op->entries[i]; 1898 1899 if (entry->pt->level > level) 1900 level = entry->pt->level; 1901 } 1902 1903 /* Greedy (non-optimal) calculation but simple */ 1904 start = ALIGN_DOWN(start, 0x1ull << xe_pt_shift(level)); 1905 last = ALIGN(end, 0x1ull << xe_pt_shift(level)) - 1; 1906 1907 if (start < pt_update_ops->start) 1908 pt_update_ops->start = start; 1909 if (last > pt_update_ops->last) 1910 pt_update_ops->last = last; 1911 } 1912 1913 static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma) 1914 { 1915 int shift = xe_device_get_root_tile(xe)->media_gt ? 1 : 0; 1916 1917 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) 1918 return dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1919 xe->info.tile_count << shift); 1920 1921 return 0; 1922 } 1923 1924 static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile, 1925 struct xe_vm_pgtable_update_ops *pt_update_ops, 1926 struct xe_vma *vma, bool invalidate_on_bind) 1927 { 1928 u32 current_op = pt_update_ops->current_op; 1929 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; 1930 int err; 1931 1932 xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma)); 1933 xe_bo_assert_held(xe_vma_bo(vma)); 1934 1935 vm_dbg(&xe_vma_vm(vma)->xe->drm, 1936 "Preparing bind, with range [%llx...%llx)\n", 1937 xe_vma_start(vma), xe_vma_end(vma) - 1); 1938 1939 pt_op->vma = NULL; 1940 pt_op->bind = true; 1941 pt_op->rebind = BIT(tile->id) & vma->tile_present; 1942 1943 err = vma_reserve_fences(tile_to_xe(tile), vma); 1944 if (err) 1945 return err; 1946 1947 err = xe_pt_prepare_bind(tile, vma, NULL, pt_op->entries, 1948 &pt_op->num_entries, invalidate_on_bind); 1949 if (!err) { 1950 xe_tile_assert(tile, pt_op->num_entries <= 1951 ARRAY_SIZE(pt_op->entries)); 1952 xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries, 1953 pt_op->num_entries, true); 1954 1955 xe_pt_update_ops_rfence_interval(pt_update_ops, 1956 xe_vma_start(vma), 1957 xe_vma_end(vma)); 1958 ++pt_update_ops->current_op; 1959 pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma); 1960 1961 /* 1962 * If rebind, we have to invalidate TLB on !LR vms to invalidate 1963 * cached PTEs point to freed memory. On LR vms this is done 1964 * automatically when the context is re-enabled by the rebind worker, 1965 * or in fault mode it was invalidated on PTE zapping. 1966 * 1967 * If !rebind, and scratch enabled VMs, there is a chance the scratch 1968 * PTE is already cached in the TLB so it needs to be invalidated. 1969 * On !LR VMs this is done in the ring ops preceding a batch, but on 1970 * LR, in particular on user-space batch buffer chaining, it needs to 1971 * be done here. 1972 */ 1973 if ((!pt_op->rebind && xe_vm_has_scratch(vm) && 1974 xe_vm_in_lr_mode(vm))) 1975 pt_update_ops->needs_invalidation = true; 1976 else if (pt_op->rebind && !xe_vm_in_lr_mode(vm)) 1977 /* We bump also if batch_invalidate_tlb is true */ 1978 vm->tlb_flush_seqno++; 1979 1980 vma->tile_staged |= BIT(tile->id); 1981 pt_op->vma = vma; 1982 xe_pt_commit_prepare_bind(vma, pt_op->entries, 1983 pt_op->num_entries, pt_op->rebind); 1984 } else { 1985 xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries); 1986 } 1987 1988 return err; 1989 } 1990 1991 static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile, 1992 struct xe_vm_pgtable_update_ops *pt_update_ops, 1993 struct xe_vma *vma, struct xe_svm_range *range) 1994 { 1995 u32 current_op = pt_update_ops->current_op; 1996 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; 1997 int err; 1998 1999 xe_tile_assert(tile, xe_vma_is_cpu_addr_mirror(vma)); 2000 2001 vm_dbg(&xe_vma_vm(vma)->xe->drm, 2002 "Preparing bind, with range [%lx...%lx)\n", 2003 xe_svm_range_start(range), xe_svm_range_end(range) - 1); 2004 2005 pt_op->vma = NULL; 2006 pt_op->bind = true; 2007 pt_op->rebind = BIT(tile->id) & range->tile_present; 2008 2009 err = xe_pt_prepare_bind(tile, vma, range, pt_op->entries, 2010 &pt_op->num_entries, false); 2011 if (!err) { 2012 xe_tile_assert(tile, pt_op->num_entries <= 2013 ARRAY_SIZE(pt_op->entries)); 2014 xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries, 2015 pt_op->num_entries, true); 2016 2017 xe_pt_update_ops_rfence_interval(pt_update_ops, 2018 xe_svm_range_start(range), 2019 xe_svm_range_end(range)); 2020 ++pt_update_ops->current_op; 2021 pt_update_ops->needs_svm_lock = true; 2022 2023 pt_op->vma = vma; 2024 xe_pt_commit_prepare_bind(vma, pt_op->entries, 2025 pt_op->num_entries, pt_op->rebind); 2026 } else { 2027 xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries); 2028 } 2029 2030 return err; 2031 } 2032 2033 static int unbind_op_prepare(struct xe_tile *tile, 2034 struct xe_vm_pgtable_update_ops *pt_update_ops, 2035 struct xe_vma *vma) 2036 { 2037 struct xe_device *xe = tile_to_xe(tile); 2038 u32 current_op = pt_update_ops->current_op; 2039 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; 2040 int err; 2041 2042 if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id))) 2043 return 0; 2044 2045 xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma)); 2046 xe_bo_assert_held(xe_vma_bo(vma)); 2047 2048 vm_dbg(&xe_vma_vm(vma)->xe->drm, 2049 "Preparing unbind, with range [%llx...%llx)\n", 2050 xe_vma_start(vma), xe_vma_end(vma) - 1); 2051 2052 pt_op->vma = vma; 2053 pt_op->bind = false; 2054 pt_op->rebind = false; 2055 /* 2056 * Maintain one PRL located in pt_update_ops that all others in unbind op reference. 2057 * Ensure that PRL is allocated only once, and if invalidated, remains an invalidated PRL. 2058 */ 2059 if (xe->info.has_page_reclaim_hw_assist && 2060 xe_page_reclaim_list_is_new(&pt_update_ops->prl)) 2061 xe_page_reclaim_list_alloc_entries(&pt_update_ops->prl); 2062 2063 /* Page reclaim may not be needed due to other features, so skip the corresponding VMA */ 2064 pt_op->prl = (xe_page_reclaim_list_valid(&pt_update_ops->prl) && 2065 !xe_page_reclaim_skip(tile, vma)) ? &pt_update_ops->prl : NULL; 2066 2067 err = vma_reserve_fences(tile_to_xe(tile), vma); 2068 if (err) 2069 return err; 2070 2071 pt_op->num_entries = xe_pt_stage_unbind(tile, xe_vma_vm(vma), 2072 vma, NULL, pt_op->entries); 2073 2074 xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries, 2075 pt_op->num_entries, false); 2076 xe_pt_update_ops_rfence_interval(pt_update_ops, xe_vma_start(vma), 2077 xe_vma_end(vma)); 2078 ++pt_update_ops->current_op; 2079 pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma); 2080 pt_update_ops->needs_invalidation = true; 2081 2082 xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries); 2083 2084 return 0; 2085 } 2086 2087 static bool 2088 xe_pt_op_check_range_skip_invalidation(struct xe_vm_pgtable_update_op *pt_op, 2089 struct xe_svm_range *range) 2090 { 2091 struct xe_vm_pgtable_update *update = pt_op->entries; 2092 2093 XE_WARN_ON(!pt_op->num_entries); 2094 2095 /* 2096 * We can't skip the invalidation if we are removing PTEs that span more 2097 * than the range, do some checks to ensure we are removing PTEs that 2098 * are invalid. 2099 */ 2100 2101 if (pt_op->num_entries > 1) 2102 return false; 2103 2104 if (update->pt->level == 0) 2105 return true; 2106 2107 if (update->pt->level == 1) 2108 return xe_svm_range_size(range) >= SZ_2M; 2109 2110 return false; 2111 } 2112 2113 static int unbind_range_prepare(struct xe_vm *vm, 2114 struct xe_tile *tile, 2115 struct xe_vm_pgtable_update_ops *pt_update_ops, 2116 struct xe_svm_range *range) 2117 { 2118 u32 current_op = pt_update_ops->current_op; 2119 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; 2120 2121 if (!(range->tile_present & BIT(tile->id))) 2122 return 0; 2123 2124 vm_dbg(&vm->xe->drm, 2125 "Preparing unbind, with range [%lx...%lx)\n", 2126 xe_svm_range_start(range), xe_svm_range_end(range) - 1); 2127 2128 pt_op->vma = XE_INVALID_VMA; 2129 pt_op->bind = false; 2130 pt_op->rebind = false; 2131 pt_op->prl = NULL; 2132 2133 pt_op->num_entries = xe_pt_stage_unbind(tile, vm, NULL, range, 2134 pt_op->entries); 2135 2136 xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries, 2137 pt_op->num_entries, false); 2138 xe_pt_update_ops_rfence_interval(pt_update_ops, xe_svm_range_start(range), 2139 xe_svm_range_end(range)); 2140 ++pt_update_ops->current_op; 2141 pt_update_ops->needs_svm_lock = true; 2142 pt_update_ops->needs_invalidation |= xe_vm_has_scratch(vm) || 2143 xe_vm_has_valid_gpu_mapping(tile, range->tile_present, 2144 range->tile_invalidated) || 2145 !xe_pt_op_check_range_skip_invalidation(pt_op, range); 2146 2147 xe_pt_commit_prepare_unbind(XE_INVALID_VMA, pt_op->entries, 2148 pt_op->num_entries); 2149 2150 return 0; 2151 } 2152 2153 static int op_prepare(struct xe_vm *vm, 2154 struct xe_tile *tile, 2155 struct xe_vm_pgtable_update_ops *pt_update_ops, 2156 struct xe_vma_op *op) 2157 { 2158 int err = 0; 2159 2160 xe_vm_assert_held(vm); 2161 2162 switch (op->base.op) { 2163 case DRM_GPUVA_OP_MAP: 2164 if ((!op->map.immediate && xe_vm_in_fault_mode(vm) && 2165 !op->map.invalidate_on_bind) || 2166 (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) 2167 break; 2168 2169 err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma, 2170 op->map.invalidate_on_bind); 2171 pt_update_ops->wait_vm_kernel = true; 2172 break; 2173 case DRM_GPUVA_OP_REMAP: 2174 { 2175 struct xe_vma *old = gpuva_to_vma(op->base.remap.unmap->va); 2176 2177 if (xe_vma_is_cpu_addr_mirror(old)) 2178 break; 2179 2180 err = unbind_op_prepare(tile, pt_update_ops, old); 2181 2182 if (!err && op->remap.prev) { 2183 err = bind_op_prepare(vm, tile, pt_update_ops, 2184 op->remap.prev, false); 2185 pt_update_ops->wait_vm_bookkeep = true; 2186 } 2187 if (!err && op->remap.next) { 2188 err = bind_op_prepare(vm, tile, pt_update_ops, 2189 op->remap.next, false); 2190 pt_update_ops->wait_vm_bookkeep = true; 2191 } 2192 break; 2193 } 2194 case DRM_GPUVA_OP_UNMAP: 2195 { 2196 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va); 2197 2198 if (xe_vma_is_cpu_addr_mirror(vma)) 2199 break; 2200 2201 err = unbind_op_prepare(tile, pt_update_ops, vma); 2202 break; 2203 } 2204 case DRM_GPUVA_OP_PREFETCH: 2205 { 2206 struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va); 2207 2208 if (xe_vma_is_cpu_addr_mirror(vma)) { 2209 struct xe_svm_range *range; 2210 unsigned long i; 2211 2212 xa_for_each(&op->prefetch_range.range, i, range) { 2213 err = bind_range_prepare(vm, tile, pt_update_ops, 2214 vma, range); 2215 if (err) 2216 return err; 2217 } 2218 } else { 2219 err = bind_op_prepare(vm, tile, pt_update_ops, vma, false); 2220 pt_update_ops->wait_vm_kernel = true; 2221 } 2222 break; 2223 } 2224 case DRM_GPUVA_OP_DRIVER: 2225 if (op->subop == XE_VMA_SUBOP_MAP_RANGE) { 2226 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma)); 2227 2228 err = bind_range_prepare(vm, tile, pt_update_ops, 2229 op->map_range.vma, 2230 op->map_range.range); 2231 } else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) { 2232 err = unbind_range_prepare(vm, tile, pt_update_ops, 2233 op->unmap_range.range); 2234 } 2235 break; 2236 default: 2237 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 2238 } 2239 2240 return err; 2241 } 2242 2243 static void 2244 xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops *pt_update_ops) 2245 { 2246 init_llist_head(&pt_update_ops->deferred); 2247 pt_update_ops->start = ~0x0ull; 2248 pt_update_ops->last = 0x0ull; 2249 xe_page_reclaim_list_init(&pt_update_ops->prl); 2250 } 2251 2252 /** 2253 * xe_pt_update_ops_prepare() - Prepare PT update operations 2254 * @tile: Tile of PT update operations 2255 * @vops: VMA operationa 2256 * 2257 * Prepare PT update operations which includes updating internal PT state, 2258 * allocate memory for page tables, populate page table being pruned in, and 2259 * create PT update operations for leaf insertion / removal. 2260 * 2261 * Return: 0 on success, negative error code on error. 2262 */ 2263 int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops) 2264 { 2265 struct xe_vm_pgtable_update_ops *pt_update_ops = 2266 &vops->pt_update_ops[tile->id]; 2267 struct xe_vma_op *op; 2268 int shift = tile->media_gt ? 1 : 0; 2269 int err; 2270 2271 lockdep_assert_held(&vops->vm->lock); 2272 xe_vm_assert_held(vops->vm); 2273 2274 xe_pt_update_ops_init(pt_update_ops); 2275 2276 err = dma_resv_reserve_fences(xe_vm_resv(vops->vm), 2277 tile_to_xe(tile)->info.tile_count << shift); 2278 if (err) 2279 return err; 2280 2281 list_for_each_entry(op, &vops->list, link) { 2282 err = op_prepare(vops->vm, tile, pt_update_ops, op); 2283 2284 if (err) 2285 return err; 2286 } 2287 2288 xe_tile_assert(tile, pt_update_ops->current_op <= 2289 pt_update_ops->num_ops); 2290 2291 #ifdef TEST_VM_OPS_ERROR 2292 if (vops->inject_error && 2293 vops->vm->xe->vm_inject_error_position == FORCE_OP_ERROR_PREPARE) 2294 return -ENOSPC; 2295 #endif 2296 2297 return 0; 2298 } 2299 ALLOW_ERROR_INJECTION(xe_pt_update_ops_prepare, ERRNO); 2300 2301 static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile, 2302 struct xe_vm_pgtable_update_ops *pt_update_ops, 2303 struct xe_vma *vma, struct dma_fence *fence, 2304 struct dma_fence *fence2, bool invalidate_on_bind) 2305 { 2306 xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma)); 2307 2308 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) { 2309 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, 2310 pt_update_ops->wait_vm_bookkeep ? 2311 DMA_RESV_USAGE_KERNEL : 2312 DMA_RESV_USAGE_BOOKKEEP); 2313 if (fence2) 2314 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2, 2315 pt_update_ops->wait_vm_bookkeep ? 2316 DMA_RESV_USAGE_KERNEL : 2317 DMA_RESV_USAGE_BOOKKEEP); 2318 } 2319 /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */ 2320 WRITE_ONCE(vma->tile_present, vma->tile_present | BIT(tile->id)); 2321 if (invalidate_on_bind) 2322 WRITE_ONCE(vma->tile_invalidated, 2323 vma->tile_invalidated | BIT(tile->id)); 2324 else 2325 WRITE_ONCE(vma->tile_invalidated, 2326 vma->tile_invalidated & ~BIT(tile->id)); 2327 vma->tile_staged &= ~BIT(tile->id); 2328 if (xe_vma_is_userptr(vma)) { 2329 xe_svm_assert_held_read(vm); 2330 to_userptr_vma(vma)->userptr.initial_bind = true; 2331 } 2332 2333 /* 2334 * Kick rebind worker if this bind triggers preempt fences and not in 2335 * the rebind worker 2336 */ 2337 if (pt_update_ops->wait_vm_bookkeep && 2338 xe_vm_in_preempt_fence_mode(vm) && 2339 !current->mm) 2340 xe_vm_queue_rebind_worker(vm); 2341 } 2342 2343 static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile, 2344 struct xe_vm_pgtable_update_ops *pt_update_ops, 2345 struct xe_vma *vma, struct dma_fence *fence, 2346 struct dma_fence *fence2) 2347 { 2348 xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma)); 2349 2350 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) { 2351 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, 2352 pt_update_ops->wait_vm_bookkeep ? 2353 DMA_RESV_USAGE_KERNEL : 2354 DMA_RESV_USAGE_BOOKKEEP); 2355 if (fence2) 2356 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2, 2357 pt_update_ops->wait_vm_bookkeep ? 2358 DMA_RESV_USAGE_KERNEL : 2359 DMA_RESV_USAGE_BOOKKEEP); 2360 } 2361 vma->tile_present &= ~BIT(tile->id); 2362 if (!vma->tile_present) { 2363 list_del_init(&vma->combined_links.rebind); 2364 if (xe_vma_is_userptr(vma)) { 2365 xe_svm_assert_held_read(vm); 2366 2367 spin_lock(&vm->userptr.invalidated_lock); 2368 list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link); 2369 spin_unlock(&vm->userptr.invalidated_lock); 2370 } 2371 } 2372 } 2373 2374 static void range_present_and_invalidated_tile(struct xe_vm *vm, 2375 struct xe_svm_range *range, 2376 u8 tile_id) 2377 { 2378 /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */ 2379 2380 lockdep_assert_held(&vm->svm.gpusvm.notifier_lock); 2381 2382 WRITE_ONCE(range->tile_present, range->tile_present | BIT(tile_id)); 2383 WRITE_ONCE(range->tile_invalidated, range->tile_invalidated & ~BIT(tile_id)); 2384 } 2385 2386 static void op_commit(struct xe_vm *vm, 2387 struct xe_tile *tile, 2388 struct xe_vm_pgtable_update_ops *pt_update_ops, 2389 struct xe_vma_op *op, struct dma_fence *fence, 2390 struct dma_fence *fence2) 2391 { 2392 xe_vm_assert_held(vm); 2393 2394 switch (op->base.op) { 2395 case DRM_GPUVA_OP_MAP: 2396 if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) || 2397 (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) 2398 break; 2399 2400 bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence, 2401 fence2, op->map.invalidate_on_bind); 2402 break; 2403 case DRM_GPUVA_OP_REMAP: 2404 { 2405 struct xe_vma *old = gpuva_to_vma(op->base.remap.unmap->va); 2406 2407 if (xe_vma_is_cpu_addr_mirror(old)) 2408 break; 2409 2410 unbind_op_commit(vm, tile, pt_update_ops, old, fence, fence2); 2411 2412 if (op->remap.prev) 2413 bind_op_commit(vm, tile, pt_update_ops, op->remap.prev, 2414 fence, fence2, false); 2415 if (op->remap.next) 2416 bind_op_commit(vm, tile, pt_update_ops, op->remap.next, 2417 fence, fence2, false); 2418 break; 2419 } 2420 case DRM_GPUVA_OP_UNMAP: 2421 { 2422 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va); 2423 2424 if (!xe_vma_is_cpu_addr_mirror(vma)) 2425 unbind_op_commit(vm, tile, pt_update_ops, vma, fence, 2426 fence2); 2427 break; 2428 } 2429 case DRM_GPUVA_OP_PREFETCH: 2430 { 2431 struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va); 2432 2433 if (xe_vma_is_cpu_addr_mirror(vma)) { 2434 struct xe_svm_range *range = NULL; 2435 unsigned long i; 2436 2437 xa_for_each(&op->prefetch_range.range, i, range) 2438 range_present_and_invalidated_tile(vm, range, tile->id); 2439 } else { 2440 bind_op_commit(vm, tile, pt_update_ops, vma, fence, 2441 fence2, false); 2442 } 2443 break; 2444 } 2445 case DRM_GPUVA_OP_DRIVER: 2446 { 2447 /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */ 2448 if (op->subop == XE_VMA_SUBOP_MAP_RANGE) 2449 range_present_and_invalidated_tile(vm, op->map_range.range, tile->id); 2450 else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) 2451 WRITE_ONCE(op->unmap_range.range->tile_present, 2452 op->unmap_range.range->tile_present & 2453 ~BIT(tile->id)); 2454 2455 break; 2456 } 2457 default: 2458 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 2459 } 2460 } 2461 2462 static const struct xe_migrate_pt_update_ops migrate_ops = { 2463 .populate = xe_vm_populate_pgtable, 2464 .clear = xe_migrate_clear_pgtable_callback, 2465 .pre_commit = xe_pt_pre_commit, 2466 }; 2467 2468 #if IS_ENABLED(CONFIG_DRM_GPUSVM) 2469 static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops = { 2470 .populate = xe_vm_populate_pgtable, 2471 .clear = xe_migrate_clear_pgtable_callback, 2472 .pre_commit = xe_pt_svm_userptr_pre_commit, 2473 }; 2474 #else 2475 static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops; 2476 #endif 2477 2478 static struct xe_dep_scheduler *to_dep_scheduler(struct xe_exec_queue *q, 2479 struct xe_gt *gt) 2480 { 2481 if (xe_gt_is_media_type(gt)) 2482 return q->tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT].dep_scheduler; 2483 2484 return q->tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT].dep_scheduler; 2485 } 2486 2487 /** 2488 * xe_pt_update_ops_run() - Run PT update operations 2489 * @tile: Tile of PT update operations 2490 * @vops: VMA operationa 2491 * 2492 * Run PT update operations which includes committing internal PT state changes, 2493 * creating job for PT update operations for leaf insertion / removal, and 2494 * installing job fence in various places. 2495 * 2496 * Return: fence on success, negative ERR_PTR on error. 2497 */ 2498 struct dma_fence * 2499 xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) 2500 { 2501 struct xe_vm *vm = vops->vm; 2502 struct xe_vm_pgtable_update_ops *pt_update_ops = 2503 &vops->pt_update_ops[tile->id]; 2504 struct xe_exec_queue *q = pt_update_ops->q; 2505 struct dma_fence *fence, *ifence = NULL, *mfence = NULL; 2506 struct xe_tlb_inval_job *ijob = NULL, *mjob = NULL; 2507 struct xe_range_fence *rfence; 2508 struct xe_vma_op *op; 2509 int err = 0, i; 2510 struct xe_migrate_pt_update update = { 2511 .ops = pt_update_ops->needs_svm_lock ? 2512 &svm_userptr_migrate_ops : 2513 &migrate_ops, 2514 .vops = vops, 2515 .tile_id = tile->id, 2516 }; 2517 2518 lockdep_assert_held(&vm->lock); 2519 xe_vm_assert_held(vm); 2520 2521 if (!pt_update_ops->current_op) { 2522 xe_tile_assert(tile, xe_vm_in_fault_mode(vm)); 2523 2524 return dma_fence_get_stub(); 2525 } 2526 2527 #ifdef TEST_VM_OPS_ERROR 2528 if (vops->inject_error && 2529 vm->xe->vm_inject_error_position == FORCE_OP_ERROR_RUN) 2530 return ERR_PTR(-ENOSPC); 2531 #endif 2532 2533 if (pt_update_ops->needs_invalidation) { 2534 struct xe_dep_scheduler *dep_scheduler = 2535 to_dep_scheduler(q, tile->primary_gt); 2536 2537 ijob = xe_tlb_inval_job_create(q, &tile->primary_gt->tlb_inval, 2538 dep_scheduler, vm, 2539 pt_update_ops->start, 2540 pt_update_ops->last, 2541 XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT); 2542 if (IS_ERR(ijob)) { 2543 err = PTR_ERR(ijob); 2544 goto kill_vm_tile1; 2545 } 2546 update.ijob = ijob; 2547 /* 2548 * Only add page reclaim for the primary GT. Media GT does not have 2549 * any PPC to flush, so enabling the PPC flush bit for media is 2550 * effectively a NOP and provides no performance benefit nor 2551 * interfere with primary GT. 2552 */ 2553 if (xe_page_reclaim_list_valid(&pt_update_ops->prl)) { 2554 xe_tlb_inval_job_add_page_reclaim(ijob, &pt_update_ops->prl); 2555 /* Release ref from alloc, job will now handle it */ 2556 xe_page_reclaim_list_invalidate(&pt_update_ops->prl); 2557 } 2558 2559 if (tile->media_gt) { 2560 dep_scheduler = to_dep_scheduler(q, tile->media_gt); 2561 2562 mjob = xe_tlb_inval_job_create(q, 2563 &tile->media_gt->tlb_inval, 2564 dep_scheduler, vm, 2565 pt_update_ops->start, 2566 pt_update_ops->last, 2567 XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT); 2568 if (IS_ERR(mjob)) { 2569 err = PTR_ERR(mjob); 2570 goto free_ijob; 2571 } 2572 update.mjob = mjob; 2573 } 2574 } 2575 2576 rfence = kzalloc_obj(*rfence, GFP_KERNEL); 2577 if (!rfence) { 2578 err = -ENOMEM; 2579 goto free_ijob; 2580 } 2581 2582 fence = xe_migrate_update_pgtables(tile->migrate, &update); 2583 if (IS_ERR(fence)) { 2584 err = PTR_ERR(fence); 2585 goto free_rfence; 2586 } 2587 2588 /* Point of no return - VM killed if failure after this */ 2589 for (i = 0; i < pt_update_ops->current_op; ++i) { 2590 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i]; 2591 2592 xe_pt_commit(pt_op->vma, pt_op->entries, 2593 pt_op->num_entries, &pt_update_ops->deferred); 2594 pt_op->vma = NULL; /* skip in xe_pt_update_ops_abort */ 2595 } 2596 2597 if (xe_range_fence_insert(&vm->rftree[tile->id], rfence, 2598 &xe_range_fence_kfree_ops, 2599 pt_update_ops->start, 2600 pt_update_ops->last, fence)) 2601 dma_fence_wait(fence, false); 2602 2603 if (ijob) 2604 ifence = xe_tlb_inval_job_push(ijob, tile->migrate, fence); 2605 if (mjob) 2606 mfence = xe_tlb_inval_job_push(mjob, tile->migrate, fence); 2607 2608 if (!mjob && !ijob) { 2609 dma_resv_add_fence(xe_vm_resv(vm), fence, 2610 pt_update_ops->wait_vm_bookkeep ? 2611 DMA_RESV_USAGE_KERNEL : 2612 DMA_RESV_USAGE_BOOKKEEP); 2613 2614 list_for_each_entry(op, &vops->list, link) 2615 op_commit(vops->vm, tile, pt_update_ops, op, fence, NULL); 2616 } else if (ijob && !mjob) { 2617 dma_resv_add_fence(xe_vm_resv(vm), ifence, 2618 pt_update_ops->wait_vm_bookkeep ? 2619 DMA_RESV_USAGE_KERNEL : 2620 DMA_RESV_USAGE_BOOKKEEP); 2621 2622 list_for_each_entry(op, &vops->list, link) 2623 op_commit(vops->vm, tile, pt_update_ops, op, ifence, NULL); 2624 } else { 2625 dma_resv_add_fence(xe_vm_resv(vm), ifence, 2626 pt_update_ops->wait_vm_bookkeep ? 2627 DMA_RESV_USAGE_KERNEL : 2628 DMA_RESV_USAGE_BOOKKEEP); 2629 2630 dma_resv_add_fence(xe_vm_resv(vm), mfence, 2631 pt_update_ops->wait_vm_bookkeep ? 2632 DMA_RESV_USAGE_KERNEL : 2633 DMA_RESV_USAGE_BOOKKEEP); 2634 2635 list_for_each_entry(op, &vops->list, link) 2636 op_commit(vops->vm, tile, pt_update_ops, op, ifence, 2637 mfence); 2638 } 2639 2640 if (pt_update_ops->needs_svm_lock) 2641 xe_svm_notifier_unlock(vm); 2642 2643 /* 2644 * The last fence is only used for zero bind queue idling; migrate 2645 * queues are not exposed to user space. 2646 */ 2647 if (!(q->flags & EXEC_QUEUE_FLAG_MIGRATE)) 2648 xe_exec_queue_last_fence_set(q, vm, fence); 2649 2650 xe_tlb_inval_job_put(mjob); 2651 xe_tlb_inval_job_put(ijob); 2652 dma_fence_put(ifence); 2653 dma_fence_put(mfence); 2654 2655 return fence; 2656 2657 free_rfence: 2658 kfree(rfence); 2659 free_ijob: 2660 xe_tlb_inval_job_put(mjob); 2661 xe_tlb_inval_job_put(ijob); 2662 kill_vm_tile1: 2663 if (err != -EAGAIN && err != -ENODATA && tile->id) 2664 xe_vm_kill(vops->vm, false); 2665 2666 return ERR_PTR(err); 2667 } 2668 ALLOW_ERROR_INJECTION(xe_pt_update_ops_run, ERRNO); 2669 2670 /** 2671 * xe_pt_update_ops_fini() - Finish PT update operations 2672 * @tile: Tile of PT update operations 2673 * @vops: VMA operations 2674 * 2675 * Finish PT update operations by committing to destroy page table memory 2676 */ 2677 void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops) 2678 { 2679 struct xe_vm_pgtable_update_ops *pt_update_ops = 2680 &vops->pt_update_ops[tile->id]; 2681 int i; 2682 2683 xe_page_reclaim_entries_put(pt_update_ops->prl.entries); 2684 2685 lockdep_assert_held(&vops->vm->lock); 2686 xe_vm_assert_held(vops->vm); 2687 2688 for (i = 0; i < pt_update_ops->current_op; ++i) { 2689 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i]; 2690 2691 xe_pt_free_bind(pt_op->entries, pt_op->num_entries); 2692 } 2693 xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred); 2694 } 2695 2696 /** 2697 * xe_pt_update_ops_abort() - Abort PT update operations 2698 * @tile: Tile of PT update operations 2699 * @vops: VMA operationa 2700 * 2701 * Abort PT update operations by unwinding internal PT state 2702 */ 2703 void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops) 2704 { 2705 struct xe_vm_pgtable_update_ops *pt_update_ops = 2706 &vops->pt_update_ops[tile->id]; 2707 int i; 2708 2709 lockdep_assert_held(&vops->vm->lock); 2710 xe_vm_assert_held(vops->vm); 2711 2712 for (i = pt_update_ops->num_ops - 1; i >= 0; --i) { 2713 struct xe_vm_pgtable_update_op *pt_op = 2714 &pt_update_ops->ops[i]; 2715 2716 if (!pt_op->vma || i >= pt_update_ops->current_op) 2717 continue; 2718 2719 if (pt_op->bind) 2720 xe_pt_abort_bind(pt_op->vma, pt_op->entries, 2721 pt_op->num_entries, 2722 pt_op->rebind); 2723 else 2724 xe_pt_abort_unbind(pt_op->vma, pt_op->entries, 2725 pt_op->num_entries); 2726 } 2727 2728 xe_pt_update_ops_fini(tile, vops); 2729 } 2730