1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_pt.h" 7 8 #include "regs/xe_gtt_defs.h" 9 #include "xe_bo.h" 10 #include "xe_device.h" 11 #include "xe_drm_client.h" 12 #include "xe_exec_queue.h" 13 #include "xe_gt.h" 14 #include "xe_gt_tlb_invalidation.h" 15 #include "xe_migrate.h" 16 #include "xe_pt_types.h" 17 #include "xe_pt_walk.h" 18 #include "xe_res_cursor.h" 19 #include "xe_sched_job.h" 20 #include "xe_sync.h" 21 #include "xe_trace.h" 22 #include "xe_ttm_stolen_mgr.h" 23 #include "xe_vm.h" 24 25 struct xe_pt_dir { 26 struct xe_pt pt; 27 /** @children: Array of page-table child nodes */ 28 struct xe_ptw *children[XE_PDES]; 29 }; 30 31 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM) 32 #define xe_pt_set_addr(__xe_pt, __addr) ((__xe_pt)->addr = (__addr)) 33 #define xe_pt_addr(__xe_pt) ((__xe_pt)->addr) 34 #else 35 #define xe_pt_set_addr(__xe_pt, __addr) 36 #define xe_pt_addr(__xe_pt) 0ull 37 #endif 38 39 static const u64 xe_normal_pt_shifts[] = {12, 21, 30, 39, 48}; 40 static const u64 xe_compact_pt_shifts[] = {16, 21, 30, 39, 48}; 41 42 #define XE_PT_HIGHEST_LEVEL (ARRAY_SIZE(xe_normal_pt_shifts) - 1) 43 44 static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt) 45 { 46 return container_of(pt, struct xe_pt_dir, pt); 47 } 48 49 static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index) 50 { 51 return container_of(pt_dir->children[index], struct xe_pt, base); 52 } 53 54 static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm, 55 unsigned int level) 56 { 57 struct xe_device *xe = tile_to_xe(tile); 58 u16 pat_index = xe->pat.idx[XE_CACHE_WB]; 59 u8 id = tile->id; 60 61 if (!xe_vm_has_scratch(vm)) 62 return 0; 63 64 if (level > MAX_HUGEPTE_LEVEL) 65 return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo, 66 0, pat_index); 67 68 return vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0) | 69 XE_PTE_NULL; 70 } 71 72 static void xe_pt_free(struct xe_pt *pt) 73 { 74 if (pt->level) 75 kfree(as_xe_pt_dir(pt)); 76 else 77 kfree(pt); 78 } 79 80 /** 81 * xe_pt_create() - Create a page-table. 82 * @vm: The vm to create for. 83 * @tile: The tile to create for. 84 * @level: The page-table level. 85 * 86 * Allocate and initialize a single struct xe_pt metadata structure. Also 87 * create the corresponding page-table bo, but don't initialize it. If the 88 * level is grater than zero, then it's assumed to be a directory page- 89 * table and the directory structure is also allocated and initialized to 90 * NULL pointers. 91 * 92 * Return: A valid struct xe_pt pointer on success, Pointer error code on 93 * error. 94 */ 95 struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, 96 unsigned int level) 97 { 98 struct xe_pt *pt; 99 struct xe_bo *bo; 100 int err; 101 102 if (level) { 103 struct xe_pt_dir *dir = kzalloc(sizeof(*dir), GFP_KERNEL); 104 105 pt = (dir) ? &dir->pt : NULL; 106 } else { 107 pt = kzalloc(sizeof(*pt), GFP_KERNEL); 108 } 109 if (!pt) 110 return ERR_PTR(-ENOMEM); 111 112 pt->level = level; 113 bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K, 114 ttm_bo_type_kernel, 115 XE_BO_FLAG_VRAM_IF_DGFX(tile) | 116 XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE | 117 XE_BO_FLAG_PINNED | 118 XE_BO_FLAG_NO_RESV_EVICT | 119 XE_BO_FLAG_PAGETABLE); 120 if (IS_ERR(bo)) { 121 err = PTR_ERR(bo); 122 goto err_kfree; 123 } 124 pt->bo = bo; 125 pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL; 126 127 if (vm->xef) 128 xe_drm_client_add_bo(vm->xef->client, pt->bo); 129 xe_tile_assert(tile, level <= XE_VM_MAX_LEVEL); 130 131 return pt; 132 133 err_kfree: 134 xe_pt_free(pt); 135 return ERR_PTR(err); 136 } 137 138 /** 139 * xe_pt_populate_empty() - Populate a page-table bo with scratch- or zero 140 * entries. 141 * @tile: The tile the scratch pagetable of which to use. 142 * @vm: The vm we populate for. 143 * @pt: The pagetable the bo of which to initialize. 144 * 145 * Populate the page-table bo of @pt with entries pointing into the tile's 146 * scratch page-table tree if any. Otherwise populate with zeros. 147 */ 148 void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm, 149 struct xe_pt *pt) 150 { 151 struct iosys_map *map = &pt->bo->vmap; 152 u64 empty; 153 int i; 154 155 if (!xe_vm_has_scratch(vm)) { 156 /* 157 * FIXME: Some memory is allocated already allocated to zero? 158 * Find out which memory that is and avoid this memset... 159 */ 160 xe_map_memset(vm->xe, map, 0, 0, SZ_4K); 161 } else { 162 empty = __xe_pt_empty_pte(tile, vm, pt->level); 163 for (i = 0; i < XE_PDES; i++) 164 xe_pt_write(vm->xe, map, i, empty); 165 } 166 } 167 168 /** 169 * xe_pt_shift() - Return the ilog2 value of the size of the address range of 170 * a page-table at a certain level. 171 * @level: The level. 172 * 173 * Return: The ilog2 value of the size of the address range of a page-table 174 * at level @level. 175 */ 176 unsigned int xe_pt_shift(unsigned int level) 177 { 178 return XE_PTE_SHIFT + XE_PDE_SHIFT * level; 179 } 180 181 /** 182 * xe_pt_destroy() - Destroy a page-table tree. 183 * @pt: The root of the page-table tree to destroy. 184 * @flags: vm flags. Currently unused. 185 * @deferred: List head of lockless list for deferred putting. NULL for 186 * immediate putting. 187 * 188 * Puts the page-table bo, recursively calls xe_pt_destroy on all children 189 * and finally frees @pt. TODO: Can we remove the @flags argument? 190 */ 191 void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred) 192 { 193 int i; 194 195 if (!pt) 196 return; 197 198 XE_WARN_ON(!list_empty(&pt->bo->ttm.base.gpuva.list)); 199 xe_bo_unpin(pt->bo); 200 xe_bo_put_deferred(pt->bo, deferred); 201 202 if (pt->level > 0 && pt->num_live) { 203 struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt); 204 205 for (i = 0; i < XE_PDES; i++) { 206 if (xe_pt_entry(pt_dir, i)) 207 xe_pt_destroy(xe_pt_entry(pt_dir, i), flags, 208 deferred); 209 } 210 } 211 xe_pt_free(pt); 212 } 213 214 /** 215 * DOC: Pagetable building 216 * 217 * Below we use the term "page-table" for both page-directories, containing 218 * pointers to lower level page-directories or page-tables, and level 0 219 * page-tables that contain only page-table-entries pointing to memory pages. 220 * 221 * When inserting an address range in an already existing page-table tree 222 * there will typically be a set of page-tables that are shared with other 223 * address ranges, and a set that are private to this address range. 224 * The set of shared page-tables can be at most two per level, 225 * and those can't be updated immediately because the entries of those 226 * page-tables may still be in use by the gpu for other mappings. Therefore 227 * when inserting entries into those, we instead stage those insertions by 228 * adding insertion data into struct xe_vm_pgtable_update structures. This 229 * data, (subtrees for the cpu and page-table-entries for the gpu) is then 230 * added in a separate commit step. CPU-data is committed while still under the 231 * vm lock, the object lock and for userptr, the notifier lock in read mode. 232 * The GPU async data is committed either by the GPU or CPU after fulfilling 233 * relevant dependencies. 234 * For non-shared page-tables (and, in fact, for shared ones that aren't 235 * existing at the time of staging), we add the data in-place without the 236 * special update structures. This private part of the page-table tree will 237 * remain disconnected from the vm page-table tree until data is committed to 238 * the shared page tables of the vm tree in the commit phase. 239 */ 240 241 struct xe_pt_update { 242 /** @update: The update structure we're building for this parent. */ 243 struct xe_vm_pgtable_update *update; 244 /** @parent: The parent. Used to detect a parent change. */ 245 struct xe_pt *parent; 246 /** @preexisting: Whether the parent was pre-existing or allocated */ 247 bool preexisting; 248 }; 249 250 struct xe_pt_stage_bind_walk { 251 /** base: The base class. */ 252 struct xe_pt_walk base; 253 254 /* Input parameters for the walk */ 255 /** @vm: The vm we're building for. */ 256 struct xe_vm *vm; 257 /** @tile: The tile we're building for. */ 258 struct xe_tile *tile; 259 /** @default_pte: PTE flag only template. No address is associated */ 260 u64 default_pte; 261 /** @dma_offset: DMA offset to add to the PTE. */ 262 u64 dma_offset; 263 /** 264 * @needs_64k: This address range enforces 64K alignment and 265 * granularity. 266 */ 267 bool needs_64K; 268 /** 269 * @vma: VMA being mapped 270 */ 271 struct xe_vma *vma; 272 273 /* Also input, but is updated during the walk*/ 274 /** @curs: The DMA address cursor. */ 275 struct xe_res_cursor *curs; 276 /** @va_curs_start: The Virtual address coresponding to @curs->start */ 277 u64 va_curs_start; 278 279 /* Output */ 280 struct xe_walk_update { 281 /** @wupd.entries: Caller provided storage. */ 282 struct xe_vm_pgtable_update *entries; 283 /** @wupd.num_used_entries: Number of update @entries used. */ 284 unsigned int num_used_entries; 285 /** @wupd.updates: Tracks the update entry at a given level */ 286 struct xe_pt_update updates[XE_VM_MAX_LEVEL + 1]; 287 } wupd; 288 289 /* Walk state */ 290 /** 291 * @l0_end_addr: The end address of the current l0 leaf. Used for 292 * 64K granularity detection. 293 */ 294 u64 l0_end_addr; 295 /** @addr_64K: The start address of the current 64K chunk. */ 296 u64 addr_64K; 297 /** @found_64: Whether @add_64K actually points to a 64K chunk. */ 298 bool found_64K; 299 }; 300 301 static int 302 xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent, 303 pgoff_t offset, bool alloc_entries) 304 { 305 struct xe_pt_update *upd = &wupd->updates[parent->level]; 306 struct xe_vm_pgtable_update *entry; 307 308 /* 309 * For *each level*, we could only have one active 310 * struct xt_pt_update at any one time. Once we move on to a 311 * new parent and page-directory, the old one is complete, and 312 * updates are either already stored in the build tree or in 313 * @wupd->entries 314 */ 315 if (likely(upd->parent == parent)) 316 return 0; 317 318 upd->parent = parent; 319 upd->preexisting = true; 320 321 if (wupd->num_used_entries == XE_VM_MAX_LEVEL * 2 + 1) 322 return -EINVAL; 323 324 entry = wupd->entries + wupd->num_used_entries++; 325 upd->update = entry; 326 entry->ofs = offset; 327 entry->pt_bo = parent->bo; 328 entry->pt = parent; 329 entry->flags = 0; 330 entry->qwords = 0; 331 entry->pt_bo->update_index = -1; 332 333 if (alloc_entries) { 334 entry->pt_entries = kmalloc_array(XE_PDES, 335 sizeof(*entry->pt_entries), 336 GFP_KERNEL); 337 if (!entry->pt_entries) 338 return -ENOMEM; 339 } 340 341 return 0; 342 } 343 344 /* 345 * NOTE: This is a very frequently called function so we allow ourselves 346 * to annotate (using branch prediction hints) the fastpath of updating a 347 * non-pre-existing pagetable with leaf ptes. 348 */ 349 static int 350 xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent, 351 pgoff_t offset, struct xe_pt *xe_child, u64 pte) 352 { 353 struct xe_pt_update *upd = &xe_walk->wupd.updates[parent->level]; 354 struct xe_pt_update *child_upd = xe_child ? 355 &xe_walk->wupd.updates[xe_child->level] : NULL; 356 int ret; 357 358 ret = xe_pt_new_shared(&xe_walk->wupd, parent, offset, true); 359 if (unlikely(ret)) 360 return ret; 361 362 /* 363 * Register this new pagetable so that it won't be recognized as 364 * a shared pagetable by a subsequent insertion. 365 */ 366 if (unlikely(child_upd)) { 367 child_upd->update = NULL; 368 child_upd->parent = xe_child; 369 child_upd->preexisting = false; 370 } 371 372 if (likely(!upd->preexisting)) { 373 /* Continue building a non-connected subtree. */ 374 struct iosys_map *map = &parent->bo->vmap; 375 376 if (unlikely(xe_child)) 377 parent->base.children[offset] = &xe_child->base; 378 379 xe_pt_write(xe_walk->vm->xe, map, offset, pte); 380 parent->num_live++; 381 } else { 382 /* Shared pt. Stage update. */ 383 unsigned int idx; 384 struct xe_vm_pgtable_update *entry = upd->update; 385 386 idx = offset - entry->ofs; 387 entry->pt_entries[idx].pt = xe_child; 388 entry->pt_entries[idx].pte = pte; 389 entry->qwords++; 390 } 391 392 return 0; 393 } 394 395 static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level, 396 struct xe_pt_stage_bind_walk *xe_walk) 397 { 398 u64 size, dma; 399 400 if (level > MAX_HUGEPTE_LEVEL) 401 return false; 402 403 /* Does the virtual range requested cover a huge pte? */ 404 if (!xe_pt_covers(addr, next, level, &xe_walk->base)) 405 return false; 406 407 /* Does the DMA segment cover the whole pte? */ 408 if (next - xe_walk->va_curs_start > xe_walk->curs->size) 409 return false; 410 411 /* null VMA's do not have dma addresses */ 412 if (xe_vma_is_null(xe_walk->vma)) 413 return true; 414 415 /* Is the DMA address huge PTE size aligned? */ 416 size = next - addr; 417 dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs); 418 419 return IS_ALIGNED(dma, size); 420 } 421 422 /* 423 * Scan the requested mapping to check whether it can be done entirely 424 * with 64K PTEs. 425 */ 426 static bool 427 xe_pt_scan_64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk) 428 { 429 struct xe_res_cursor curs = *xe_walk->curs; 430 431 if (!IS_ALIGNED(addr, SZ_64K)) 432 return false; 433 434 if (next > xe_walk->l0_end_addr) 435 return false; 436 437 /* null VMA's do not have dma addresses */ 438 if (xe_vma_is_null(xe_walk->vma)) 439 return true; 440 441 xe_res_next(&curs, addr - xe_walk->va_curs_start); 442 for (; addr < next; addr += SZ_64K) { 443 if (!IS_ALIGNED(xe_res_dma(&curs), SZ_64K) || curs.size < SZ_64K) 444 return false; 445 446 xe_res_next(&curs, SZ_64K); 447 } 448 449 return addr == next; 450 } 451 452 /* 453 * For non-compact "normal" 4K level-0 pagetables, we want to try to group 454 * addresses together in 64K-contigous regions to add a 64K TLB hint for the 455 * device to the PTE. 456 * This function determines whether the address is part of such a 457 * segment. For VRAM in normal pagetables, this is strictly necessary on 458 * some devices. 459 */ 460 static bool 461 xe_pt_is_pte_ps64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk) 462 { 463 /* Address is within an already found 64k region */ 464 if (xe_walk->found_64K && addr - xe_walk->addr_64K < SZ_64K) 465 return true; 466 467 xe_walk->found_64K = xe_pt_scan_64K(addr, addr + SZ_64K, xe_walk); 468 xe_walk->addr_64K = addr; 469 470 return xe_walk->found_64K; 471 } 472 473 static int 474 xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset, 475 unsigned int level, u64 addr, u64 next, 476 struct xe_ptw **child, 477 enum page_walk_action *action, 478 struct xe_pt_walk *walk) 479 { 480 struct xe_pt_stage_bind_walk *xe_walk = 481 container_of(walk, typeof(*xe_walk), base); 482 u16 pat_index = xe_walk->vma->pat_index; 483 struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base); 484 struct xe_vm *vm = xe_walk->vm; 485 struct xe_pt *xe_child; 486 bool covers; 487 int ret = 0; 488 u64 pte; 489 490 /* Is this a leaf entry ?*/ 491 if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) { 492 struct xe_res_cursor *curs = xe_walk->curs; 493 bool is_null = xe_vma_is_null(xe_walk->vma); 494 495 XE_WARN_ON(xe_walk->va_curs_start != addr); 496 497 pte = vm->pt_ops->pte_encode_vma(is_null ? 0 : 498 xe_res_dma(curs) + xe_walk->dma_offset, 499 xe_walk->vma, pat_index, level); 500 pte |= xe_walk->default_pte; 501 502 /* 503 * Set the XE_PTE_PS64 hint if possible, otherwise if 504 * this device *requires* 64K PTE size for VRAM, fail. 505 */ 506 if (level == 0 && !xe_parent->is_compact) { 507 if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) { 508 xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K; 509 pte |= XE_PTE_PS64; 510 } else if (XE_WARN_ON(xe_walk->needs_64K)) { 511 return -EINVAL; 512 } 513 } 514 515 ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte); 516 if (unlikely(ret)) 517 return ret; 518 519 if (!is_null) 520 xe_res_next(curs, next - addr); 521 xe_walk->va_curs_start = next; 522 xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level); 523 *action = ACTION_CONTINUE; 524 525 return ret; 526 } 527 528 /* 529 * Descending to lower level. Determine if we need to allocate a 530 * new page table or -directory, which we do if there is no 531 * previous one or there is one we can completely replace. 532 */ 533 if (level == 1) { 534 walk->shifts = xe_normal_pt_shifts; 535 xe_walk->l0_end_addr = next; 536 } 537 538 covers = xe_pt_covers(addr, next, level, &xe_walk->base); 539 if (covers || !*child) { 540 u64 flags = 0; 541 542 xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1); 543 if (IS_ERR(xe_child)) 544 return PTR_ERR(xe_child); 545 546 xe_pt_set_addr(xe_child, 547 round_down(addr, 1ull << walk->shifts[level])); 548 549 if (!covers) 550 xe_pt_populate_empty(xe_walk->tile, xe_walk->vm, xe_child); 551 552 *child = &xe_child->base; 553 554 /* 555 * Prefer the compact pagetable layout for L0 if possible. Only 556 * possible if VMA covers entire 2MB region as compact 64k and 557 * 4k pages cannot be mixed within a 2MB region. 558 * TODO: Suballocate the pt bo to avoid wasting a lot of 559 * memory. 560 */ 561 if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 && 562 covers && xe_pt_scan_64K(addr, next, xe_walk)) { 563 walk->shifts = xe_compact_pt_shifts; 564 xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT; 565 flags |= XE_PDE_64K; 566 xe_child->is_compact = true; 567 } 568 569 pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0, pat_index) | flags; 570 ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child, 571 pte); 572 } 573 574 *action = ACTION_SUBTREE; 575 return ret; 576 } 577 578 static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = { 579 .pt_entry = xe_pt_stage_bind_entry, 580 }; 581 582 /** 583 * xe_pt_stage_bind() - Build a disconnected page-table tree for a given address 584 * range. 585 * @tile: The tile we're building for. 586 * @vma: The vma indicating the address range. 587 * @entries: Storage for the update entries used for connecting the tree to 588 * the main tree at commit time. 589 * @num_entries: On output contains the number of @entries used. 590 * 591 * This function builds a disconnected page-table tree for a given address 592 * range. The tree is connected to the main vm tree for the gpu using 593 * xe_migrate_update_pgtables() and for the cpu using xe_pt_commit_bind(). 594 * The function builds xe_vm_pgtable_update structures for already existing 595 * shared page-tables, and non-existing shared and non-shared page-tables 596 * are built and populated directly. 597 * 598 * Return 0 on success, negative error code on error. 599 */ 600 static int 601 xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, 602 struct xe_vm_pgtable_update *entries, u32 *num_entries) 603 { 604 struct xe_device *xe = tile_to_xe(tile); 605 struct xe_bo *bo = xe_vma_bo(vma); 606 bool is_devmem = !xe_vma_is_userptr(vma) && bo && 607 (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo)); 608 struct xe_res_cursor curs; 609 struct xe_pt_stage_bind_walk xe_walk = { 610 .base = { 611 .ops = &xe_pt_stage_bind_ops, 612 .shifts = xe_normal_pt_shifts, 613 .max_level = XE_PT_HIGHEST_LEVEL, 614 }, 615 .vm = xe_vma_vm(vma), 616 .tile = tile, 617 .curs = &curs, 618 .va_curs_start = xe_vma_start(vma), 619 .vma = vma, 620 .wupd.entries = entries, 621 .needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem, 622 }; 623 struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id]; 624 int ret; 625 626 /** 627 * Default atomic expectations for different allocation scenarios are as follows: 628 * 629 * 1. Traditional API: When the VM is not in LR mode: 630 * - Device atomics are expected to function with all allocations. 631 * 632 * 2. Compute/SVM API: When the VM is in LR mode: 633 * - Device atomics are the default behavior when the bo is placed in a single region. 634 * - In all other cases device atomics will be disabled with AE=0 until an application 635 * request differently using a ioctl like madvise. 636 */ 637 if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) { 638 if (xe_vm_in_lr_mode(xe_vma_vm(vma))) { 639 if (bo && xe_bo_has_single_placement(bo)) 640 xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE; 641 /** 642 * If a SMEM+LMEM allocation is backed by SMEM, a device 643 * atomics will cause a gpu page fault and which then 644 * gets migrated to LMEM, bind such allocations with 645 * device atomics enabled. 646 */ 647 else if (is_devmem && !xe_bo_has_single_placement(bo)) 648 xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE; 649 } else { 650 xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE; 651 } 652 653 /** 654 * Unset AE if the platform(PVC) doesn't support it on an 655 * allocation 656 */ 657 if (!xe->info.has_device_atomics_on_smem && !is_devmem) 658 xe_walk.default_pte &= ~XE_USM_PPGTT_PTE_AE; 659 } 660 661 if (is_devmem) { 662 xe_walk.default_pte |= XE_PPGTT_PTE_DM; 663 xe_walk.dma_offset = vram_region_gpu_offset(bo->ttm.resource); 664 } 665 666 if (!xe_vma_has_no_bo(vma) && xe_bo_is_stolen(bo)) 667 xe_walk.dma_offset = xe_ttm_stolen_gpu_offset(xe_bo_device(bo)); 668 669 xe_bo_assert_held(bo); 670 671 if (!xe_vma_is_null(vma)) { 672 if (xe_vma_is_userptr(vma)) 673 xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0, 674 xe_vma_size(vma), &curs); 675 else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo)) 676 xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma), 677 xe_vma_size(vma), &curs); 678 else 679 xe_res_first_sg(xe_bo_sg(bo), xe_vma_bo_offset(vma), 680 xe_vma_size(vma), &curs); 681 } else { 682 curs.size = xe_vma_size(vma); 683 } 684 685 ret = xe_pt_walk_range(&pt->base, pt->level, xe_vma_start(vma), 686 xe_vma_end(vma), &xe_walk.base); 687 688 *num_entries = xe_walk.wupd.num_used_entries; 689 return ret; 690 } 691 692 /** 693 * xe_pt_nonshared_offsets() - Determine the non-shared entry offsets of a 694 * shared pagetable. 695 * @addr: The start address within the non-shared pagetable. 696 * @end: The end address within the non-shared pagetable. 697 * @level: The level of the non-shared pagetable. 698 * @walk: Walk info. The function adjusts the walk action. 699 * @action: next action to perform (see enum page_walk_action) 700 * @offset: Ignored on input, First non-shared entry on output. 701 * @end_offset: Ignored on input, Last non-shared entry + 1 on output. 702 * 703 * A non-shared page-table has some entries that belong to the address range 704 * and others that don't. This function determines the entries that belong 705 * fully to the address range. Depending on level, some entries may 706 * partially belong to the address range (that can't happen at level 0). 707 * The function detects that and adjust those offsets to not include those 708 * partial entries. Iff it does detect partial entries, we know that there must 709 * be shared page tables also at lower levels, so it adjusts the walk action 710 * accordingly. 711 * 712 * Return: true if there were non-shared entries, false otherwise. 713 */ 714 static bool xe_pt_nonshared_offsets(u64 addr, u64 end, unsigned int level, 715 struct xe_pt_walk *walk, 716 enum page_walk_action *action, 717 pgoff_t *offset, pgoff_t *end_offset) 718 { 719 u64 size = 1ull << walk->shifts[level]; 720 721 *offset = xe_pt_offset(addr, level, walk); 722 *end_offset = xe_pt_num_entries(addr, end, level, walk) + *offset; 723 724 if (!level) 725 return true; 726 727 /* 728 * If addr or next are not size aligned, there are shared pts at lower 729 * level, so in that case traverse down the subtree 730 */ 731 *action = ACTION_CONTINUE; 732 if (!IS_ALIGNED(addr, size)) { 733 *action = ACTION_SUBTREE; 734 (*offset)++; 735 } 736 737 if (!IS_ALIGNED(end, size)) { 738 *action = ACTION_SUBTREE; 739 (*end_offset)--; 740 } 741 742 return *end_offset > *offset; 743 } 744 745 struct xe_pt_zap_ptes_walk { 746 /** @base: The walk base-class */ 747 struct xe_pt_walk base; 748 749 /* Input parameters for the walk */ 750 /** @tile: The tile we're building for */ 751 struct xe_tile *tile; 752 753 /* Output */ 754 /** @needs_invalidate: Whether we need to invalidate TLB*/ 755 bool needs_invalidate; 756 }; 757 758 static int xe_pt_zap_ptes_entry(struct xe_ptw *parent, pgoff_t offset, 759 unsigned int level, u64 addr, u64 next, 760 struct xe_ptw **child, 761 enum page_walk_action *action, 762 struct xe_pt_walk *walk) 763 { 764 struct xe_pt_zap_ptes_walk *xe_walk = 765 container_of(walk, typeof(*xe_walk), base); 766 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base); 767 pgoff_t end_offset; 768 769 XE_WARN_ON(!*child); 770 XE_WARN_ON(!level); 771 772 /* 773 * Note that we're called from an entry callback, and we're dealing 774 * with the child of that entry rather than the parent, so need to 775 * adjust level down. 776 */ 777 if (xe_pt_nonshared_offsets(addr, next, --level, walk, action, &offset, 778 &end_offset)) { 779 xe_map_memset(tile_to_xe(xe_walk->tile), &xe_child->bo->vmap, 780 offset * sizeof(u64), 0, 781 (end_offset - offset) * sizeof(u64)); 782 xe_walk->needs_invalidate = true; 783 } 784 785 return 0; 786 } 787 788 static const struct xe_pt_walk_ops xe_pt_zap_ptes_ops = { 789 .pt_entry = xe_pt_zap_ptes_entry, 790 }; 791 792 /** 793 * xe_pt_zap_ptes() - Zap (zero) gpu ptes of an address range 794 * @tile: The tile we're zapping for. 795 * @vma: GPU VMA detailing address range. 796 * 797 * Eviction and Userptr invalidation needs to be able to zap the 798 * gpu ptes of a given address range in pagefaulting mode. 799 * In order to be able to do that, that function needs access to the shared 800 * page-table entrieaso it can either clear the leaf PTEs or 801 * clear the pointers to lower-level page-tables. The caller is required 802 * to hold the necessary locks to ensure neither the page-table connectivity 803 * nor the page-table entries of the range is updated from under us. 804 * 805 * Return: Whether ptes were actually updated and a TLB invalidation is 806 * required. 807 */ 808 bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma) 809 { 810 struct xe_pt_zap_ptes_walk xe_walk = { 811 .base = { 812 .ops = &xe_pt_zap_ptes_ops, 813 .shifts = xe_normal_pt_shifts, 814 .max_level = XE_PT_HIGHEST_LEVEL, 815 }, 816 .tile = tile, 817 }; 818 struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id]; 819 u8 pt_mask = (vma->tile_present & ~vma->tile_invalidated); 820 821 if (!(pt_mask & BIT(tile->id))) 822 return false; 823 824 (void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma), 825 xe_vma_end(vma), &xe_walk.base); 826 827 return xe_walk.needs_invalidate; 828 } 829 830 static void 831 xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *tile, 832 struct iosys_map *map, void *data, 833 u32 qword_ofs, u32 num_qwords, 834 const struct xe_vm_pgtable_update *update) 835 { 836 struct xe_pt_entry *ptes = update->pt_entries; 837 u64 *ptr = data; 838 u32 i; 839 840 for (i = 0; i < num_qwords; i++) { 841 if (map) 842 xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) * 843 sizeof(u64), u64, ptes[i].pte); 844 else 845 ptr[i] = ptes[i].pte; 846 } 847 } 848 849 static void xe_pt_cancel_bind(struct xe_vma *vma, 850 struct xe_vm_pgtable_update *entries, 851 u32 num_entries) 852 { 853 u32 i, j; 854 855 for (i = 0; i < num_entries; i++) { 856 struct xe_pt *pt = entries[i].pt; 857 858 if (!pt) 859 continue; 860 861 if (pt->level) { 862 for (j = 0; j < entries[i].qwords; j++) 863 xe_pt_destroy(entries[i].pt_entries[j].pt, 864 xe_vma_vm(vma)->flags, NULL); 865 } 866 867 kfree(entries[i].pt_entries); 868 entries[i].pt_entries = NULL; 869 entries[i].qwords = 0; 870 } 871 } 872 873 static void xe_pt_commit_locks_assert(struct xe_vma *vma) 874 { 875 struct xe_vm *vm = xe_vma_vm(vma); 876 877 lockdep_assert_held(&vm->lock); 878 879 if (!xe_vma_is_userptr(vma) && !xe_vma_is_null(vma)) 880 dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv); 881 882 xe_vm_assert_held(vm); 883 } 884 885 static void xe_pt_commit(struct xe_vma *vma, 886 struct xe_vm_pgtable_update *entries, 887 u32 num_entries, struct llist_head *deferred) 888 { 889 u32 i, j; 890 891 xe_pt_commit_locks_assert(vma); 892 893 for (i = 0; i < num_entries; i++) { 894 struct xe_pt *pt = entries[i].pt; 895 896 if (!pt->level) 897 continue; 898 899 for (j = 0; j < entries[i].qwords; j++) { 900 struct xe_pt *oldpte = entries[i].pt_entries[j].pt; 901 902 xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred); 903 } 904 } 905 } 906 907 static void xe_pt_abort_bind(struct xe_vma *vma, 908 struct xe_vm_pgtable_update *entries, 909 u32 num_entries, bool rebind) 910 { 911 int i, j; 912 913 xe_pt_commit_locks_assert(vma); 914 915 for (i = num_entries - 1; i >= 0; --i) { 916 struct xe_pt *pt = entries[i].pt; 917 struct xe_pt_dir *pt_dir; 918 919 if (!rebind) 920 pt->num_live -= entries[i].qwords; 921 922 if (!pt->level) 923 continue; 924 925 pt_dir = as_xe_pt_dir(pt); 926 for (j = 0; j < entries[i].qwords; j++) { 927 u32 j_ = j + entries[i].ofs; 928 struct xe_pt *newpte = xe_pt_entry(pt_dir, j_); 929 struct xe_pt *oldpte = entries[i].pt_entries[j].pt; 930 931 pt_dir->children[j_] = oldpte ? &oldpte->base : 0; 932 xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL); 933 } 934 } 935 } 936 937 static void xe_pt_commit_prepare_bind(struct xe_vma *vma, 938 struct xe_vm_pgtable_update *entries, 939 u32 num_entries, bool rebind) 940 { 941 u32 i, j; 942 943 xe_pt_commit_locks_assert(vma); 944 945 for (i = 0; i < num_entries; i++) { 946 struct xe_pt *pt = entries[i].pt; 947 struct xe_pt_dir *pt_dir; 948 949 if (!rebind) 950 pt->num_live += entries[i].qwords; 951 952 if (!pt->level) 953 continue; 954 955 pt_dir = as_xe_pt_dir(pt); 956 for (j = 0; j < entries[i].qwords; j++) { 957 u32 j_ = j + entries[i].ofs; 958 struct xe_pt *newpte = entries[i].pt_entries[j].pt; 959 struct xe_pt *oldpte = NULL; 960 961 if (xe_pt_entry(pt_dir, j_)) 962 oldpte = xe_pt_entry(pt_dir, j_); 963 964 pt_dir->children[j_] = &newpte->base; 965 entries[i].pt_entries[j].pt = oldpte; 966 } 967 } 968 } 969 970 static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries, 971 u32 num_entries) 972 { 973 u32 i; 974 975 for (i = 0; i < num_entries; i++) 976 kfree(entries[i].pt_entries); 977 } 978 979 static int 980 xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma, 981 struct xe_vm_pgtable_update *entries, u32 *num_entries) 982 { 983 int err; 984 985 *num_entries = 0; 986 err = xe_pt_stage_bind(tile, vma, entries, num_entries); 987 if (!err) 988 xe_tile_assert(tile, *num_entries); 989 990 return err; 991 } 992 993 static void xe_vm_dbg_print_entries(struct xe_device *xe, 994 const struct xe_vm_pgtable_update *entries, 995 unsigned int num_entries, bool bind) 996 #if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)) 997 { 998 unsigned int i; 999 1000 vm_dbg(&xe->drm, "%s: %u entries to update\n", bind ? "bind" : "unbind", 1001 num_entries); 1002 for (i = 0; i < num_entries; i++) { 1003 const struct xe_vm_pgtable_update *entry = &entries[i]; 1004 struct xe_pt *xe_pt = entry->pt; 1005 u64 page_size = 1ull << xe_pt_shift(xe_pt->level); 1006 u64 end; 1007 u64 start; 1008 1009 xe_assert(xe, !entry->pt->is_compact); 1010 start = entry->ofs * page_size; 1011 end = start + page_size * entry->qwords; 1012 vm_dbg(&xe->drm, 1013 "\t%u: Update level %u at (%u + %u) [%llx...%llx) f:%x\n", 1014 i, xe_pt->level, entry->ofs, entry->qwords, 1015 xe_pt_addr(xe_pt) + start, xe_pt_addr(xe_pt) + end, 0); 1016 } 1017 } 1018 #else 1019 {} 1020 #endif 1021 1022 static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs) 1023 { 1024 int i; 1025 1026 for (i = 0; i < num_syncs; i++) { 1027 struct dma_fence *fence = syncs[i].fence; 1028 1029 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1030 &fence->flags)) 1031 return false; 1032 } 1033 1034 return true; 1035 } 1036 1037 static int job_test_add_deps(struct xe_sched_job *job, 1038 struct dma_resv *resv, 1039 enum dma_resv_usage usage) 1040 { 1041 if (!job) { 1042 if (!dma_resv_test_signaled(resv, usage)) 1043 return -ETIME; 1044 1045 return 0; 1046 } 1047 1048 return xe_sched_job_add_deps(job, resv, usage); 1049 } 1050 1051 static int vma_add_deps(struct xe_vma *vma, struct xe_sched_job *job) 1052 { 1053 struct xe_bo *bo = xe_vma_bo(vma); 1054 1055 xe_bo_assert_held(bo); 1056 1057 if (bo && !bo->vm) 1058 return job_test_add_deps(job, bo->ttm.base.resv, 1059 DMA_RESV_USAGE_KERNEL); 1060 1061 return 0; 1062 } 1063 1064 static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op, 1065 struct xe_sched_job *job) 1066 { 1067 int err = 0; 1068 1069 switch (op->base.op) { 1070 case DRM_GPUVA_OP_MAP: 1071 if (!op->map.immediate && xe_vm_in_fault_mode(vm)) 1072 break; 1073 1074 err = vma_add_deps(op->map.vma, job); 1075 break; 1076 case DRM_GPUVA_OP_REMAP: 1077 if (op->remap.prev) 1078 err = vma_add_deps(op->remap.prev, job); 1079 if (!err && op->remap.next) 1080 err = vma_add_deps(op->remap.next, job); 1081 break; 1082 case DRM_GPUVA_OP_UNMAP: 1083 break; 1084 case DRM_GPUVA_OP_PREFETCH: 1085 err = vma_add_deps(gpuva_to_vma(op->base.prefetch.va), job); 1086 break; 1087 default: 1088 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 1089 } 1090 1091 return err; 1092 } 1093 1094 static int xe_pt_vm_dependencies(struct xe_sched_job *job, 1095 struct xe_vm *vm, 1096 struct xe_vma_ops *vops, 1097 struct xe_vm_pgtable_update_ops *pt_update_ops, 1098 struct xe_range_fence_tree *rftree) 1099 { 1100 struct xe_range_fence *rtfence; 1101 struct dma_fence *fence; 1102 struct xe_vma_op *op; 1103 int err = 0, i; 1104 1105 xe_vm_assert_held(vm); 1106 1107 if (!job && !no_in_syncs(vops->syncs, vops->num_syncs)) 1108 return -ETIME; 1109 1110 if (!job && !xe_exec_queue_is_idle(pt_update_ops->q)) 1111 return -ETIME; 1112 1113 if (pt_update_ops->wait_vm_bookkeep || pt_update_ops->wait_vm_kernel) { 1114 err = job_test_add_deps(job, xe_vm_resv(vm), 1115 pt_update_ops->wait_vm_bookkeep ? 1116 DMA_RESV_USAGE_BOOKKEEP : 1117 DMA_RESV_USAGE_KERNEL); 1118 if (err) 1119 return err; 1120 } 1121 1122 rtfence = xe_range_fence_tree_first(rftree, pt_update_ops->start, 1123 pt_update_ops->last); 1124 while (rtfence) { 1125 fence = rtfence->fence; 1126 1127 if (!dma_fence_is_signaled(fence)) { 1128 /* 1129 * Is this a CPU update? GPU is busy updating, so return 1130 * an error 1131 */ 1132 if (!job) 1133 return -ETIME; 1134 1135 dma_fence_get(fence); 1136 err = drm_sched_job_add_dependency(&job->drm, fence); 1137 if (err) 1138 return err; 1139 } 1140 1141 rtfence = xe_range_fence_tree_next(rtfence, 1142 pt_update_ops->start, 1143 pt_update_ops->last); 1144 } 1145 1146 list_for_each_entry(op, &vops->list, link) { 1147 err = op_add_deps(vm, op, job); 1148 if (err) 1149 return err; 1150 } 1151 1152 if (!(pt_update_ops->q->flags & EXEC_QUEUE_FLAG_KERNEL)) { 1153 if (job) 1154 err = xe_sched_job_last_fence_add_dep(job, vm); 1155 else 1156 err = xe_exec_queue_last_fence_test_dep(pt_update_ops->q, vm); 1157 } 1158 1159 for (i = 0; job && !err && i < vops->num_syncs; i++) 1160 err = xe_sync_entry_add_deps(&vops->syncs[i], job); 1161 1162 return err; 1163 } 1164 1165 static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update) 1166 { 1167 struct xe_vma_ops *vops = pt_update->vops; 1168 struct xe_vm *vm = vops->vm; 1169 struct xe_range_fence_tree *rftree = &vm->rftree[pt_update->tile_id]; 1170 struct xe_vm_pgtable_update_ops *pt_update_ops = 1171 &vops->pt_update_ops[pt_update->tile_id]; 1172 1173 return xe_pt_vm_dependencies(pt_update->job, vm, pt_update->vops, 1174 pt_update_ops, rftree); 1175 } 1176 1177 #ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT 1178 1179 static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) 1180 { 1181 u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2; 1182 static u32 count; 1183 1184 if (count++ % divisor == divisor - 1) { 1185 uvma->userptr.divisor = divisor << 1; 1186 return true; 1187 } 1188 1189 return false; 1190 } 1191 1192 #else 1193 1194 static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) 1195 { 1196 return false; 1197 } 1198 1199 #endif 1200 1201 static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma, 1202 struct xe_vm_pgtable_update_ops *pt_update) 1203 { 1204 struct xe_userptr_vma *uvma; 1205 unsigned long notifier_seq; 1206 1207 lockdep_assert_held_read(&vm->userptr.notifier_lock); 1208 1209 if (!xe_vma_is_userptr(vma)) 1210 return 0; 1211 1212 uvma = to_userptr_vma(vma); 1213 notifier_seq = uvma->userptr.notifier_seq; 1214 1215 if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm)) 1216 return 0; 1217 1218 if (!mmu_interval_read_retry(&uvma->userptr.notifier, 1219 notifier_seq) && 1220 !xe_pt_userptr_inject_eagain(uvma)) 1221 return 0; 1222 1223 if (xe_vm_in_fault_mode(vm)) { 1224 return -EAGAIN; 1225 } else { 1226 spin_lock(&vm->userptr.invalidated_lock); 1227 list_move_tail(&uvma->userptr.invalidate_link, 1228 &vm->userptr.invalidated); 1229 spin_unlock(&vm->userptr.invalidated_lock); 1230 1231 if (xe_vm_in_preempt_fence_mode(vm)) { 1232 struct dma_resv_iter cursor; 1233 struct dma_fence *fence; 1234 long err; 1235 1236 dma_resv_iter_begin(&cursor, xe_vm_resv(vm), 1237 DMA_RESV_USAGE_BOOKKEEP); 1238 dma_resv_for_each_fence_unlocked(&cursor, fence) 1239 dma_fence_enable_sw_signaling(fence); 1240 dma_resv_iter_end(&cursor); 1241 1242 err = dma_resv_wait_timeout(xe_vm_resv(vm), 1243 DMA_RESV_USAGE_BOOKKEEP, 1244 false, MAX_SCHEDULE_TIMEOUT); 1245 XE_WARN_ON(err <= 0); 1246 } 1247 } 1248 1249 return 0; 1250 } 1251 1252 static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op, 1253 struct xe_vm_pgtable_update_ops *pt_update) 1254 { 1255 int err = 0; 1256 1257 lockdep_assert_held_read(&vm->userptr.notifier_lock); 1258 1259 switch (op->base.op) { 1260 case DRM_GPUVA_OP_MAP: 1261 if (!op->map.immediate && xe_vm_in_fault_mode(vm)) 1262 break; 1263 1264 err = vma_check_userptr(vm, op->map.vma, pt_update); 1265 break; 1266 case DRM_GPUVA_OP_REMAP: 1267 if (op->remap.prev) 1268 err = vma_check_userptr(vm, op->remap.prev, pt_update); 1269 if (!err && op->remap.next) 1270 err = vma_check_userptr(vm, op->remap.next, pt_update); 1271 break; 1272 case DRM_GPUVA_OP_UNMAP: 1273 break; 1274 case DRM_GPUVA_OP_PREFETCH: 1275 err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va), 1276 pt_update); 1277 break; 1278 default: 1279 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 1280 } 1281 1282 return err; 1283 } 1284 1285 static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) 1286 { 1287 struct xe_vm *vm = pt_update->vops->vm; 1288 struct xe_vma_ops *vops = pt_update->vops; 1289 struct xe_vm_pgtable_update_ops *pt_update_ops = 1290 &vops->pt_update_ops[pt_update->tile_id]; 1291 struct xe_vma_op *op; 1292 int err; 1293 1294 err = xe_pt_pre_commit(pt_update); 1295 if (err) 1296 return err; 1297 1298 down_read(&vm->userptr.notifier_lock); 1299 1300 list_for_each_entry(op, &vops->list, link) { 1301 err = op_check_userptr(vm, op, pt_update_ops); 1302 if (err) { 1303 up_read(&vm->userptr.notifier_lock); 1304 break; 1305 } 1306 } 1307 1308 return err; 1309 } 1310 1311 struct invalidation_fence { 1312 struct xe_gt_tlb_invalidation_fence base; 1313 struct xe_gt *gt; 1314 struct dma_fence *fence; 1315 struct dma_fence_cb cb; 1316 struct work_struct work; 1317 u64 start; 1318 u64 end; 1319 u32 asid; 1320 }; 1321 1322 static void invalidation_fence_cb(struct dma_fence *fence, 1323 struct dma_fence_cb *cb) 1324 { 1325 struct invalidation_fence *ifence = 1326 container_of(cb, struct invalidation_fence, cb); 1327 struct xe_device *xe = gt_to_xe(ifence->gt); 1328 1329 trace_xe_gt_tlb_invalidation_fence_cb(xe, &ifence->base); 1330 if (!ifence->fence->error) { 1331 queue_work(system_wq, &ifence->work); 1332 } else { 1333 ifence->base.base.error = ifence->fence->error; 1334 dma_fence_signal(&ifence->base.base); 1335 dma_fence_put(&ifence->base.base); 1336 } 1337 dma_fence_put(ifence->fence); 1338 } 1339 1340 static void invalidation_fence_work_func(struct work_struct *w) 1341 { 1342 struct invalidation_fence *ifence = 1343 container_of(w, struct invalidation_fence, work); 1344 struct xe_device *xe = gt_to_xe(ifence->gt); 1345 1346 trace_xe_gt_tlb_invalidation_fence_work_func(xe, &ifence->base); 1347 xe_gt_tlb_invalidation_range(ifence->gt, &ifence->base, ifence->start, 1348 ifence->end, ifence->asid); 1349 } 1350 1351 static void invalidation_fence_init(struct xe_gt *gt, 1352 struct invalidation_fence *ifence, 1353 struct dma_fence *fence, 1354 u64 start, u64 end, u32 asid) 1355 { 1356 int ret; 1357 1358 trace_xe_gt_tlb_invalidation_fence_create(gt_to_xe(gt), &ifence->base); 1359 1360 xe_gt_tlb_invalidation_fence_init(gt, &ifence->base, false); 1361 1362 ifence->fence = fence; 1363 ifence->gt = gt; 1364 ifence->start = start; 1365 ifence->end = end; 1366 ifence->asid = asid; 1367 1368 INIT_WORK(&ifence->work, invalidation_fence_work_func); 1369 ret = dma_fence_add_callback(fence, &ifence->cb, invalidation_fence_cb); 1370 if (ret == -ENOENT) { 1371 dma_fence_put(ifence->fence); /* Usually dropped in CB */ 1372 invalidation_fence_work_func(&ifence->work); 1373 } else if (ret) { 1374 dma_fence_put(&ifence->base.base); /* Caller ref */ 1375 dma_fence_put(&ifence->base.base); /* Creation ref */ 1376 } 1377 1378 xe_gt_assert(gt, !ret || ret == -ENOENT); 1379 } 1380 1381 struct xe_pt_stage_unbind_walk { 1382 /** @base: The pagewalk base-class. */ 1383 struct xe_pt_walk base; 1384 1385 /* Input parameters for the walk */ 1386 /** @tile: The tile we're unbinding from. */ 1387 struct xe_tile *tile; 1388 1389 /** 1390 * @modified_start: Walk range start, modified to include any 1391 * shared pagetables that we're the only user of and can thus 1392 * treat as private. 1393 */ 1394 u64 modified_start; 1395 /** @modified_end: Walk range start, modified like @modified_start. */ 1396 u64 modified_end; 1397 1398 /* Output */ 1399 /* @wupd: Structure to track the page-table updates we're building */ 1400 struct xe_walk_update wupd; 1401 }; 1402 1403 /* 1404 * Check whether this range is the only one populating this pagetable, 1405 * and in that case, update the walk range checks so that higher levels don't 1406 * view us as a shared pagetable. 1407 */ 1408 static bool xe_pt_check_kill(u64 addr, u64 next, unsigned int level, 1409 const struct xe_pt *child, 1410 enum page_walk_action *action, 1411 struct xe_pt_walk *walk) 1412 { 1413 struct xe_pt_stage_unbind_walk *xe_walk = 1414 container_of(walk, typeof(*xe_walk), base); 1415 unsigned int shift = walk->shifts[level]; 1416 u64 size = 1ull << shift; 1417 1418 if (IS_ALIGNED(addr, size) && IS_ALIGNED(next, size) && 1419 ((next - addr) >> shift) == child->num_live) { 1420 u64 size = 1ull << walk->shifts[level + 1]; 1421 1422 *action = ACTION_CONTINUE; 1423 1424 if (xe_walk->modified_start >= addr) 1425 xe_walk->modified_start = round_down(addr, size); 1426 if (xe_walk->modified_end <= next) 1427 xe_walk->modified_end = round_up(next, size); 1428 1429 return true; 1430 } 1431 1432 return false; 1433 } 1434 1435 static int xe_pt_stage_unbind_entry(struct xe_ptw *parent, pgoff_t offset, 1436 unsigned int level, u64 addr, u64 next, 1437 struct xe_ptw **child, 1438 enum page_walk_action *action, 1439 struct xe_pt_walk *walk) 1440 { 1441 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base); 1442 1443 XE_WARN_ON(!*child); 1444 XE_WARN_ON(!level); 1445 1446 xe_pt_check_kill(addr, next, level - 1, xe_child, action, walk); 1447 1448 return 0; 1449 } 1450 1451 static int 1452 xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset, 1453 unsigned int level, u64 addr, u64 next, 1454 struct xe_ptw **child, 1455 enum page_walk_action *action, 1456 struct xe_pt_walk *walk) 1457 { 1458 struct xe_pt_stage_unbind_walk *xe_walk = 1459 container_of(walk, typeof(*xe_walk), base); 1460 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base); 1461 pgoff_t end_offset; 1462 u64 size = 1ull << walk->shifts[--level]; 1463 int err; 1464 1465 if (!IS_ALIGNED(addr, size)) 1466 addr = xe_walk->modified_start; 1467 if (!IS_ALIGNED(next, size)) 1468 next = xe_walk->modified_end; 1469 1470 /* Parent == *child is the root pt. Don't kill it. */ 1471 if (parent != *child && 1472 xe_pt_check_kill(addr, next, level, xe_child, action, walk)) 1473 return 0; 1474 1475 if (!xe_pt_nonshared_offsets(addr, next, level, walk, action, &offset, 1476 &end_offset)) 1477 return 0; 1478 1479 err = xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, true); 1480 if (err) 1481 return err; 1482 1483 xe_walk->wupd.updates[level].update->qwords = end_offset - offset; 1484 1485 return 0; 1486 } 1487 1488 static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = { 1489 .pt_entry = xe_pt_stage_unbind_entry, 1490 .pt_post_descend = xe_pt_stage_unbind_post_descend, 1491 }; 1492 1493 /** 1494 * xe_pt_stage_unbind() - Build page-table update structures for an unbind 1495 * operation 1496 * @tile: The tile we're unbinding for. 1497 * @vma: The vma we're unbinding. 1498 * @entries: Caller-provided storage for the update structures. 1499 * 1500 * Builds page-table update structures for an unbind operation. The function 1501 * will attempt to remove all page-tables that we're the only user 1502 * of, and for that to work, the unbind operation must be committed in the 1503 * same critical section that blocks racing binds to the same page-table tree. 1504 * 1505 * Return: The number of entries used. 1506 */ 1507 static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma, 1508 struct xe_vm_pgtable_update *entries) 1509 { 1510 struct xe_pt_stage_unbind_walk xe_walk = { 1511 .base = { 1512 .ops = &xe_pt_stage_unbind_ops, 1513 .shifts = xe_normal_pt_shifts, 1514 .max_level = XE_PT_HIGHEST_LEVEL, 1515 }, 1516 .tile = tile, 1517 .modified_start = xe_vma_start(vma), 1518 .modified_end = xe_vma_end(vma), 1519 .wupd.entries = entries, 1520 }; 1521 struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id]; 1522 1523 (void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma), 1524 xe_vma_end(vma), &xe_walk.base); 1525 1526 return xe_walk.wupd.num_used_entries; 1527 } 1528 1529 static void 1530 xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update, 1531 struct xe_tile *tile, struct iosys_map *map, 1532 void *ptr, u32 qword_ofs, u32 num_qwords, 1533 const struct xe_vm_pgtable_update *update) 1534 { 1535 struct xe_vm *vm = pt_update->vops->vm; 1536 u64 empty = __xe_pt_empty_pte(tile, vm, update->pt->level); 1537 int i; 1538 1539 if (map && map->is_iomem) 1540 for (i = 0; i < num_qwords; ++i) 1541 xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) * 1542 sizeof(u64), u64, empty); 1543 else if (map) 1544 memset64(map->vaddr + qword_ofs * sizeof(u64), empty, 1545 num_qwords); 1546 else 1547 memset64(ptr, empty, num_qwords); 1548 } 1549 1550 static void xe_pt_abort_unbind(struct xe_vma *vma, 1551 struct xe_vm_pgtable_update *entries, 1552 u32 num_entries) 1553 { 1554 int i, j; 1555 1556 xe_pt_commit_locks_assert(vma); 1557 1558 for (i = num_entries - 1; i >= 0; --i) { 1559 struct xe_vm_pgtable_update *entry = &entries[i]; 1560 struct xe_pt *pt = entry->pt; 1561 struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt); 1562 1563 pt->num_live += entry->qwords; 1564 1565 if (!pt->level) 1566 continue; 1567 1568 for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) 1569 pt_dir->children[j] = 1570 entries[i].pt_entries[j - entry->ofs].pt ? 1571 &entries[i].pt_entries[j - entry->ofs].pt->base : NULL; 1572 } 1573 } 1574 1575 static void 1576 xe_pt_commit_prepare_unbind(struct xe_vma *vma, 1577 struct xe_vm_pgtable_update *entries, 1578 u32 num_entries) 1579 { 1580 int i, j; 1581 1582 xe_pt_commit_locks_assert(vma); 1583 1584 for (i = 0; i < num_entries; ++i) { 1585 struct xe_vm_pgtable_update *entry = &entries[i]; 1586 struct xe_pt *pt = entry->pt; 1587 struct xe_pt_dir *pt_dir; 1588 1589 pt->num_live -= entry->qwords; 1590 if (!pt->level) 1591 continue; 1592 1593 pt_dir = as_xe_pt_dir(pt); 1594 for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) { 1595 entry->pt_entries[j - entry->ofs].pt = 1596 xe_pt_entry(pt_dir, j); 1597 pt_dir->children[j] = NULL; 1598 } 1599 } 1600 } 1601 1602 static void 1603 xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops, 1604 struct xe_vma *vma) 1605 { 1606 u32 current_op = pt_update_ops->current_op; 1607 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; 1608 int i, level = 0; 1609 u64 start, last; 1610 1611 for (i = 0; i < pt_op->num_entries; i++) { 1612 const struct xe_vm_pgtable_update *entry = &pt_op->entries[i]; 1613 1614 if (entry->pt->level > level) 1615 level = entry->pt->level; 1616 } 1617 1618 /* Greedy (non-optimal) calculation but simple */ 1619 start = ALIGN_DOWN(xe_vma_start(vma), 0x1ull << xe_pt_shift(level)); 1620 last = ALIGN(xe_vma_end(vma), 0x1ull << xe_pt_shift(level)) - 1; 1621 1622 if (start < pt_update_ops->start) 1623 pt_update_ops->start = start; 1624 if (last > pt_update_ops->last) 1625 pt_update_ops->last = last; 1626 } 1627 1628 static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma) 1629 { 1630 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) 1631 return dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1632 xe->info.tile_count); 1633 1634 return 0; 1635 } 1636 1637 static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile, 1638 struct xe_vm_pgtable_update_ops *pt_update_ops, 1639 struct xe_vma *vma) 1640 { 1641 u32 current_op = pt_update_ops->current_op; 1642 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; 1643 int err; 1644 1645 xe_bo_assert_held(xe_vma_bo(vma)); 1646 1647 vm_dbg(&xe_vma_vm(vma)->xe->drm, 1648 "Preparing bind, with range [%llx...%llx)\n", 1649 xe_vma_start(vma), xe_vma_end(vma) - 1); 1650 1651 pt_op->vma = NULL; 1652 pt_op->bind = true; 1653 pt_op->rebind = BIT(tile->id) & vma->tile_present; 1654 1655 err = vma_reserve_fences(tile_to_xe(tile), vma); 1656 if (err) 1657 return err; 1658 1659 err = xe_pt_prepare_bind(tile, vma, pt_op->entries, 1660 &pt_op->num_entries); 1661 if (!err) { 1662 xe_tile_assert(tile, pt_op->num_entries <= 1663 ARRAY_SIZE(pt_op->entries)); 1664 xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries, 1665 pt_op->num_entries, true); 1666 1667 xe_pt_update_ops_rfence_interval(pt_update_ops, vma); 1668 ++pt_update_ops->current_op; 1669 pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma); 1670 1671 /* 1672 * If rebind, we have to invalidate TLB on !LR vms to invalidate 1673 * cached PTEs point to freed memory. On LR vms this is done 1674 * automatically when the context is re-enabled by the rebind worker, 1675 * or in fault mode it was invalidated on PTE zapping. 1676 * 1677 * If !rebind, and scratch enabled VMs, there is a chance the scratch 1678 * PTE is already cached in the TLB so it needs to be invalidated. 1679 * On !LR VMs this is done in the ring ops preceding a batch, but on 1680 * non-faulting LR, in particular on user-space batch buffer chaining, 1681 * it needs to be done here. 1682 */ 1683 if ((!pt_op->rebind && xe_vm_has_scratch(vm) && 1684 xe_vm_in_preempt_fence_mode(vm))) 1685 pt_update_ops->needs_invalidation = true; 1686 else if (pt_op->rebind && !xe_vm_in_lr_mode(vm)) 1687 /* We bump also if batch_invalidate_tlb is true */ 1688 vm->tlb_flush_seqno++; 1689 1690 vma->tile_staged |= BIT(tile->id); 1691 pt_op->vma = vma; 1692 xe_pt_commit_prepare_bind(vma, pt_op->entries, 1693 pt_op->num_entries, pt_op->rebind); 1694 } else { 1695 xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries); 1696 } 1697 1698 return err; 1699 } 1700 1701 static int unbind_op_prepare(struct xe_tile *tile, 1702 struct xe_vm_pgtable_update_ops *pt_update_ops, 1703 struct xe_vma *vma) 1704 { 1705 u32 current_op = pt_update_ops->current_op; 1706 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; 1707 int err; 1708 1709 if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id))) 1710 return 0; 1711 1712 xe_bo_assert_held(xe_vma_bo(vma)); 1713 1714 vm_dbg(&xe_vma_vm(vma)->xe->drm, 1715 "Preparing unbind, with range [%llx...%llx)\n", 1716 xe_vma_start(vma), xe_vma_end(vma) - 1); 1717 1718 /* 1719 * Wait for invalidation to complete. Can corrupt internal page table 1720 * state if an invalidation is running while preparing an unbind. 1721 */ 1722 if (xe_vma_is_userptr(vma) && xe_vm_in_fault_mode(xe_vma_vm(vma))) 1723 mmu_interval_read_begin(&to_userptr_vma(vma)->userptr.notifier); 1724 1725 pt_op->vma = vma; 1726 pt_op->bind = false; 1727 pt_op->rebind = false; 1728 1729 err = vma_reserve_fences(tile_to_xe(tile), vma); 1730 if (err) 1731 return err; 1732 1733 pt_op->num_entries = xe_pt_stage_unbind(tile, vma, pt_op->entries); 1734 1735 xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries, 1736 pt_op->num_entries, false); 1737 xe_pt_update_ops_rfence_interval(pt_update_ops, vma); 1738 ++pt_update_ops->current_op; 1739 pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma); 1740 pt_update_ops->needs_invalidation = true; 1741 1742 xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries); 1743 1744 return 0; 1745 } 1746 1747 static int op_prepare(struct xe_vm *vm, 1748 struct xe_tile *tile, 1749 struct xe_vm_pgtable_update_ops *pt_update_ops, 1750 struct xe_vma_op *op) 1751 { 1752 int err = 0; 1753 1754 xe_vm_assert_held(vm); 1755 1756 switch (op->base.op) { 1757 case DRM_GPUVA_OP_MAP: 1758 if (!op->map.immediate && xe_vm_in_fault_mode(vm)) 1759 break; 1760 1761 err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma); 1762 pt_update_ops->wait_vm_kernel = true; 1763 break; 1764 case DRM_GPUVA_OP_REMAP: 1765 err = unbind_op_prepare(tile, pt_update_ops, 1766 gpuva_to_vma(op->base.remap.unmap->va)); 1767 1768 if (!err && op->remap.prev) { 1769 err = bind_op_prepare(vm, tile, pt_update_ops, 1770 op->remap.prev); 1771 pt_update_ops->wait_vm_bookkeep = true; 1772 } 1773 if (!err && op->remap.next) { 1774 err = bind_op_prepare(vm, tile, pt_update_ops, 1775 op->remap.next); 1776 pt_update_ops->wait_vm_bookkeep = true; 1777 } 1778 break; 1779 case DRM_GPUVA_OP_UNMAP: 1780 err = unbind_op_prepare(tile, pt_update_ops, 1781 gpuva_to_vma(op->base.unmap.va)); 1782 break; 1783 case DRM_GPUVA_OP_PREFETCH: 1784 err = bind_op_prepare(vm, tile, pt_update_ops, 1785 gpuva_to_vma(op->base.prefetch.va)); 1786 pt_update_ops->wait_vm_kernel = true; 1787 break; 1788 default: 1789 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 1790 } 1791 1792 return err; 1793 } 1794 1795 static void 1796 xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops *pt_update_ops) 1797 { 1798 init_llist_head(&pt_update_ops->deferred); 1799 pt_update_ops->start = ~0x0ull; 1800 pt_update_ops->last = 0x0ull; 1801 } 1802 1803 /** 1804 * xe_pt_update_ops_prepare() - Prepare PT update operations 1805 * @tile: Tile of PT update operations 1806 * @vops: VMA operationa 1807 * 1808 * Prepare PT update operations which includes updating internal PT state, 1809 * allocate memory for page tables, populate page table being pruned in, and 1810 * create PT update operations for leaf insertion / removal. 1811 * 1812 * Return: 0 on success, negative error code on error. 1813 */ 1814 int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops) 1815 { 1816 struct xe_vm_pgtable_update_ops *pt_update_ops = 1817 &vops->pt_update_ops[tile->id]; 1818 struct xe_vma_op *op; 1819 int err; 1820 1821 lockdep_assert_held(&vops->vm->lock); 1822 xe_vm_assert_held(vops->vm); 1823 1824 xe_pt_update_ops_init(pt_update_ops); 1825 1826 err = dma_resv_reserve_fences(xe_vm_resv(vops->vm), 1827 tile_to_xe(tile)->info.tile_count); 1828 if (err) 1829 return err; 1830 1831 list_for_each_entry(op, &vops->list, link) { 1832 err = op_prepare(vops->vm, tile, pt_update_ops, op); 1833 1834 if (err) 1835 return err; 1836 } 1837 1838 xe_tile_assert(tile, pt_update_ops->current_op <= 1839 pt_update_ops->num_ops); 1840 1841 #ifdef TEST_VM_OPS_ERROR 1842 if (vops->inject_error && 1843 vops->vm->xe->vm_inject_error_position == FORCE_OP_ERROR_PREPARE) 1844 return -ENOSPC; 1845 #endif 1846 1847 return 0; 1848 } 1849 1850 static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile, 1851 struct xe_vm_pgtable_update_ops *pt_update_ops, 1852 struct xe_vma *vma, struct dma_fence *fence) 1853 { 1854 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) 1855 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, 1856 pt_update_ops->wait_vm_bookkeep ? 1857 DMA_RESV_USAGE_KERNEL : 1858 DMA_RESV_USAGE_BOOKKEEP); 1859 vma->tile_present |= BIT(tile->id); 1860 vma->tile_staged &= ~BIT(tile->id); 1861 if (xe_vma_is_userptr(vma)) { 1862 lockdep_assert_held_read(&vm->userptr.notifier_lock); 1863 to_userptr_vma(vma)->userptr.initial_bind = true; 1864 } 1865 1866 /* 1867 * Kick rebind worker if this bind triggers preempt fences and not in 1868 * the rebind worker 1869 */ 1870 if (pt_update_ops->wait_vm_bookkeep && 1871 xe_vm_in_preempt_fence_mode(vm) && 1872 !current->mm) 1873 xe_vm_queue_rebind_worker(vm); 1874 } 1875 1876 static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile, 1877 struct xe_vm_pgtable_update_ops *pt_update_ops, 1878 struct xe_vma *vma, struct dma_fence *fence) 1879 { 1880 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) 1881 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, 1882 pt_update_ops->wait_vm_bookkeep ? 1883 DMA_RESV_USAGE_KERNEL : 1884 DMA_RESV_USAGE_BOOKKEEP); 1885 vma->tile_present &= ~BIT(tile->id); 1886 if (!vma->tile_present) { 1887 list_del_init(&vma->combined_links.rebind); 1888 if (xe_vma_is_userptr(vma)) { 1889 lockdep_assert_held_read(&vm->userptr.notifier_lock); 1890 1891 spin_lock(&vm->userptr.invalidated_lock); 1892 list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link); 1893 spin_unlock(&vm->userptr.invalidated_lock); 1894 } 1895 } 1896 } 1897 1898 static void op_commit(struct xe_vm *vm, 1899 struct xe_tile *tile, 1900 struct xe_vm_pgtable_update_ops *pt_update_ops, 1901 struct xe_vma_op *op, struct dma_fence *fence) 1902 { 1903 xe_vm_assert_held(vm); 1904 1905 switch (op->base.op) { 1906 case DRM_GPUVA_OP_MAP: 1907 if (!op->map.immediate && xe_vm_in_fault_mode(vm)) 1908 break; 1909 1910 bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence); 1911 break; 1912 case DRM_GPUVA_OP_REMAP: 1913 unbind_op_commit(vm, tile, pt_update_ops, 1914 gpuva_to_vma(op->base.remap.unmap->va), fence); 1915 1916 if (op->remap.prev) 1917 bind_op_commit(vm, tile, pt_update_ops, op->remap.prev, 1918 fence); 1919 if (op->remap.next) 1920 bind_op_commit(vm, tile, pt_update_ops, op->remap.next, 1921 fence); 1922 break; 1923 case DRM_GPUVA_OP_UNMAP: 1924 unbind_op_commit(vm, tile, pt_update_ops, 1925 gpuva_to_vma(op->base.unmap.va), fence); 1926 break; 1927 case DRM_GPUVA_OP_PREFETCH: 1928 bind_op_commit(vm, tile, pt_update_ops, 1929 gpuva_to_vma(op->base.prefetch.va), fence); 1930 break; 1931 default: 1932 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 1933 } 1934 } 1935 1936 static const struct xe_migrate_pt_update_ops migrate_ops = { 1937 .populate = xe_vm_populate_pgtable, 1938 .clear = xe_migrate_clear_pgtable_callback, 1939 .pre_commit = xe_pt_pre_commit, 1940 }; 1941 1942 static const struct xe_migrate_pt_update_ops userptr_migrate_ops = { 1943 .populate = xe_vm_populate_pgtable, 1944 .clear = xe_migrate_clear_pgtable_callback, 1945 .pre_commit = xe_pt_userptr_pre_commit, 1946 }; 1947 1948 /** 1949 * xe_pt_update_ops_run() - Run PT update operations 1950 * @tile: Tile of PT update operations 1951 * @vops: VMA operationa 1952 * 1953 * Run PT update operations which includes committing internal PT state changes, 1954 * creating job for PT update operations for leaf insertion / removal, and 1955 * installing job fence in various places. 1956 * 1957 * Return: fence on success, negative ERR_PTR on error. 1958 */ 1959 struct dma_fence * 1960 xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) 1961 { 1962 struct xe_vm *vm = vops->vm; 1963 struct xe_vm_pgtable_update_ops *pt_update_ops = 1964 &vops->pt_update_ops[tile->id]; 1965 struct dma_fence *fence; 1966 struct invalidation_fence *ifence = NULL; 1967 struct xe_range_fence *rfence; 1968 struct xe_vma_op *op; 1969 int err = 0, i; 1970 struct xe_migrate_pt_update update = { 1971 .ops = pt_update_ops->needs_userptr_lock ? 1972 &userptr_migrate_ops : 1973 &migrate_ops, 1974 .vops = vops, 1975 .tile_id = tile->id, 1976 }; 1977 1978 lockdep_assert_held(&vm->lock); 1979 xe_vm_assert_held(vm); 1980 1981 if (!pt_update_ops->current_op) { 1982 xe_tile_assert(tile, xe_vm_in_fault_mode(vm)); 1983 1984 return dma_fence_get_stub(); 1985 } 1986 1987 #ifdef TEST_VM_OPS_ERROR 1988 if (vops->inject_error && 1989 vm->xe->vm_inject_error_position == FORCE_OP_ERROR_RUN) 1990 return ERR_PTR(-ENOSPC); 1991 #endif 1992 1993 if (pt_update_ops->needs_invalidation) { 1994 ifence = kzalloc(sizeof(*ifence), GFP_KERNEL); 1995 if (!ifence) { 1996 err = -ENOMEM; 1997 goto kill_vm_tile1; 1998 } 1999 } 2000 2001 rfence = kzalloc(sizeof(*rfence), GFP_KERNEL); 2002 if (!rfence) { 2003 err = -ENOMEM; 2004 goto free_ifence; 2005 } 2006 2007 fence = xe_migrate_update_pgtables(tile->migrate, &update); 2008 if (IS_ERR(fence)) { 2009 err = PTR_ERR(fence); 2010 goto free_rfence; 2011 } 2012 2013 /* Point of no return - VM killed if failure after this */ 2014 for (i = 0; i < pt_update_ops->current_op; ++i) { 2015 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i]; 2016 2017 xe_pt_commit(pt_op->vma, pt_op->entries, 2018 pt_op->num_entries, &pt_update_ops->deferred); 2019 pt_op->vma = NULL; /* skip in xe_pt_update_ops_abort */ 2020 } 2021 2022 if (xe_range_fence_insert(&vm->rftree[tile->id], rfence, 2023 &xe_range_fence_kfree_ops, 2024 pt_update_ops->start, 2025 pt_update_ops->last, fence)) 2026 dma_fence_wait(fence, false); 2027 2028 /* tlb invalidation must be done before signaling rebind */ 2029 if (ifence) { 2030 invalidation_fence_init(tile->primary_gt, ifence, fence, 2031 pt_update_ops->start, 2032 pt_update_ops->last, vm->usm.asid); 2033 fence = &ifence->base.base; 2034 } 2035 2036 dma_resv_add_fence(xe_vm_resv(vm), fence, 2037 pt_update_ops->wait_vm_bookkeep ? 2038 DMA_RESV_USAGE_KERNEL : 2039 DMA_RESV_USAGE_BOOKKEEP); 2040 2041 list_for_each_entry(op, &vops->list, link) 2042 op_commit(vops->vm, tile, pt_update_ops, op, fence); 2043 2044 if (pt_update_ops->needs_userptr_lock) 2045 up_read(&vm->userptr.notifier_lock); 2046 2047 return fence; 2048 2049 free_rfence: 2050 kfree(rfence); 2051 free_ifence: 2052 kfree(ifence); 2053 kill_vm_tile1: 2054 if (err != -EAGAIN && tile->id) 2055 xe_vm_kill(vops->vm, false); 2056 2057 return ERR_PTR(err); 2058 } 2059 2060 /** 2061 * xe_pt_update_ops_fini() - Finish PT update operations 2062 * @tile: Tile of PT update operations 2063 * @vops: VMA operations 2064 * 2065 * Finish PT update operations by committing to destroy page table memory 2066 */ 2067 void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops) 2068 { 2069 struct xe_vm_pgtable_update_ops *pt_update_ops = 2070 &vops->pt_update_ops[tile->id]; 2071 int i; 2072 2073 lockdep_assert_held(&vops->vm->lock); 2074 xe_vm_assert_held(vops->vm); 2075 2076 for (i = 0; i < pt_update_ops->current_op; ++i) { 2077 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i]; 2078 2079 xe_pt_free_bind(pt_op->entries, pt_op->num_entries); 2080 } 2081 xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred); 2082 } 2083 2084 /** 2085 * xe_pt_update_ops_abort() - Abort PT update operations 2086 * @tile: Tile of PT update operations 2087 * @vops: VMA operationa 2088 * 2089 * Abort PT update operations by unwinding internal PT state 2090 */ 2091 void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops) 2092 { 2093 struct xe_vm_pgtable_update_ops *pt_update_ops = 2094 &vops->pt_update_ops[tile->id]; 2095 int i; 2096 2097 lockdep_assert_held(&vops->vm->lock); 2098 xe_vm_assert_held(vops->vm); 2099 2100 for (i = pt_update_ops->num_ops - 1; i >= 0; --i) { 2101 struct xe_vm_pgtable_update_op *pt_op = 2102 &pt_update_ops->ops[i]; 2103 2104 if (!pt_op->vma || i >= pt_update_ops->current_op) 2105 continue; 2106 2107 if (pt_op->bind) 2108 xe_pt_abort_bind(pt_op->vma, pt_op->entries, 2109 pt_op->num_entries, 2110 pt_op->rebind); 2111 else 2112 xe_pt_abort_unbind(pt_op->vma, pt_op->entries, 2113 pt_op->num_entries); 2114 } 2115 2116 xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred); 2117 } 2118