1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_pt.h" 7 8 #include "xe_bo.h" 9 #include "xe_device.h" 10 #include "xe_drm_client.h" 11 #include "xe_gt.h" 12 #include "xe_gt_tlb_invalidation.h" 13 #include "xe_migrate.h" 14 #include "xe_pt_types.h" 15 #include "xe_pt_walk.h" 16 #include "xe_res_cursor.h" 17 #include "xe_trace.h" 18 #include "xe_ttm_stolen_mgr.h" 19 #include "xe_vm.h" 20 21 struct xe_pt_dir { 22 struct xe_pt pt; 23 /** @dir: Directory structure for the xe_pt_walk functionality */ 24 struct xe_ptw_dir dir; 25 }; 26 27 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM) 28 #define xe_pt_set_addr(__xe_pt, __addr) ((__xe_pt)->addr = (__addr)) 29 #define xe_pt_addr(__xe_pt) ((__xe_pt)->addr) 30 #else 31 #define xe_pt_set_addr(__xe_pt, __addr) 32 #define xe_pt_addr(__xe_pt) 0ull 33 #endif 34 35 static const u64 xe_normal_pt_shifts[] = {12, 21, 30, 39, 48}; 36 static const u64 xe_compact_pt_shifts[] = {16, 21, 30, 39, 48}; 37 38 #define XE_PT_HIGHEST_LEVEL (ARRAY_SIZE(xe_normal_pt_shifts) - 1) 39 40 static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt) 41 { 42 return container_of(pt, struct xe_pt_dir, pt); 43 } 44 45 static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index) 46 { 47 return container_of(pt_dir->dir.entries[index], struct xe_pt, base); 48 } 49 50 static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm, 51 unsigned int level) 52 { 53 struct xe_device *xe = tile_to_xe(tile); 54 u16 pat_index = xe->pat.idx[XE_CACHE_WB]; 55 u8 id = tile->id; 56 57 if (!xe_vm_has_scratch(vm)) 58 return 0; 59 60 if (level > MAX_HUGEPTE_LEVEL) 61 return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo, 62 0, pat_index); 63 64 return vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0) | 65 XE_PTE_NULL; 66 } 67 68 /** 69 * xe_pt_create() - Create a page-table. 70 * @vm: The vm to create for. 71 * @tile: The tile to create for. 72 * @level: The page-table level. 73 * 74 * Allocate and initialize a single struct xe_pt metadata structure. Also 75 * create the corresponding page-table bo, but don't initialize it. If the 76 * level is grater than zero, then it's assumed to be a directory page- 77 * table and the directory structure is also allocated and initialized to 78 * NULL pointers. 79 * 80 * Return: A valid struct xe_pt pointer on success, Pointer error code on 81 * error. 82 */ 83 struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, 84 unsigned int level) 85 { 86 struct xe_pt *pt; 87 struct xe_bo *bo; 88 size_t size; 89 int err; 90 91 size = !level ? sizeof(struct xe_pt) : sizeof(struct xe_pt_dir) + 92 XE_PDES * sizeof(struct xe_ptw *); 93 pt = kzalloc(size, GFP_KERNEL); 94 if (!pt) 95 return ERR_PTR(-ENOMEM); 96 97 bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K, 98 ttm_bo_type_kernel, 99 XE_BO_CREATE_VRAM_IF_DGFX(tile) | 100 XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT | 101 XE_BO_CREATE_PINNED_BIT | 102 XE_BO_CREATE_NO_RESV_EVICT | 103 XE_BO_PAGETABLE); 104 if (IS_ERR(bo)) { 105 err = PTR_ERR(bo); 106 goto err_kfree; 107 } 108 pt->bo = bo; 109 pt->level = level; 110 pt->base.dir = level ? &as_xe_pt_dir(pt)->dir : NULL; 111 112 if (vm->xef) 113 xe_drm_client_add_bo(vm->xef->client, pt->bo); 114 xe_tile_assert(tile, level <= XE_VM_MAX_LEVEL); 115 116 return pt; 117 118 err_kfree: 119 kfree(pt); 120 return ERR_PTR(err); 121 } 122 123 /** 124 * xe_pt_populate_empty() - Populate a page-table bo with scratch- or zero 125 * entries. 126 * @tile: The tile the scratch pagetable of which to use. 127 * @vm: The vm we populate for. 128 * @pt: The pagetable the bo of which to initialize. 129 * 130 * Populate the page-table bo of @pt with entries pointing into the tile's 131 * scratch page-table tree if any. Otherwise populate with zeros. 132 */ 133 void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm, 134 struct xe_pt *pt) 135 { 136 struct iosys_map *map = &pt->bo->vmap; 137 u64 empty; 138 int i; 139 140 if (!xe_vm_has_scratch(vm)) { 141 /* 142 * FIXME: Some memory is allocated already allocated to zero? 143 * Find out which memory that is and avoid this memset... 144 */ 145 xe_map_memset(vm->xe, map, 0, 0, SZ_4K); 146 } else { 147 empty = __xe_pt_empty_pte(tile, vm, pt->level); 148 for (i = 0; i < XE_PDES; i++) 149 xe_pt_write(vm->xe, map, i, empty); 150 } 151 } 152 153 /** 154 * xe_pt_shift() - Return the ilog2 value of the size of the address range of 155 * a page-table at a certain level. 156 * @level: The level. 157 * 158 * Return: The ilog2 value of the size of the address range of a page-table 159 * at level @level. 160 */ 161 unsigned int xe_pt_shift(unsigned int level) 162 { 163 return XE_PTE_SHIFT + XE_PDE_SHIFT * level; 164 } 165 166 /** 167 * xe_pt_destroy() - Destroy a page-table tree. 168 * @pt: The root of the page-table tree to destroy. 169 * @flags: vm flags. Currently unused. 170 * @deferred: List head of lockless list for deferred putting. NULL for 171 * immediate putting. 172 * 173 * Puts the page-table bo, recursively calls xe_pt_destroy on all children 174 * and finally frees @pt. TODO: Can we remove the @flags argument? 175 */ 176 void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred) 177 { 178 int i; 179 180 if (!pt) 181 return; 182 183 XE_WARN_ON(!list_empty(&pt->bo->ttm.base.gpuva.list)); 184 xe_bo_unpin(pt->bo); 185 xe_bo_put_deferred(pt->bo, deferred); 186 187 if (pt->level > 0 && pt->num_live) { 188 struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt); 189 190 for (i = 0; i < XE_PDES; i++) { 191 if (xe_pt_entry(pt_dir, i)) 192 xe_pt_destroy(xe_pt_entry(pt_dir, i), flags, 193 deferred); 194 } 195 } 196 kfree(pt); 197 } 198 199 /** 200 * DOC: Pagetable building 201 * 202 * Below we use the term "page-table" for both page-directories, containing 203 * pointers to lower level page-directories or page-tables, and level 0 204 * page-tables that contain only page-table-entries pointing to memory pages. 205 * 206 * When inserting an address range in an already existing page-table tree 207 * there will typically be a set of page-tables that are shared with other 208 * address ranges, and a set that are private to this address range. 209 * The set of shared page-tables can be at most two per level, 210 * and those can't be updated immediately because the entries of those 211 * page-tables may still be in use by the gpu for other mappings. Therefore 212 * when inserting entries into those, we instead stage those insertions by 213 * adding insertion data into struct xe_vm_pgtable_update structures. This 214 * data, (subtrees for the cpu and page-table-entries for the gpu) is then 215 * added in a separate commit step. CPU-data is committed while still under the 216 * vm lock, the object lock and for userptr, the notifier lock in read mode. 217 * The GPU async data is committed either by the GPU or CPU after fulfilling 218 * relevant dependencies. 219 * For non-shared page-tables (and, in fact, for shared ones that aren't 220 * existing at the time of staging), we add the data in-place without the 221 * special update structures. This private part of the page-table tree will 222 * remain disconnected from the vm page-table tree until data is committed to 223 * the shared page tables of the vm tree in the commit phase. 224 */ 225 226 struct xe_pt_update { 227 /** @update: The update structure we're building for this parent. */ 228 struct xe_vm_pgtable_update *update; 229 /** @parent: The parent. Used to detect a parent change. */ 230 struct xe_pt *parent; 231 /** @preexisting: Whether the parent was pre-existing or allocated */ 232 bool preexisting; 233 }; 234 235 struct xe_pt_stage_bind_walk { 236 /** base: The base class. */ 237 struct xe_pt_walk base; 238 239 /* Input parameters for the walk */ 240 /** @vm: The vm we're building for. */ 241 struct xe_vm *vm; 242 /** @tile: The tile we're building for. */ 243 struct xe_tile *tile; 244 /** @default_pte: PTE flag only template. No address is associated */ 245 u64 default_pte; 246 /** @dma_offset: DMA offset to add to the PTE. */ 247 u64 dma_offset; 248 /** 249 * @needs_64k: This address range enforces 64K alignment and 250 * granularity. 251 */ 252 bool needs_64K; 253 /** 254 * @vma: VMA being mapped 255 */ 256 struct xe_vma *vma; 257 258 /* Also input, but is updated during the walk*/ 259 /** @curs: The DMA address cursor. */ 260 struct xe_res_cursor *curs; 261 /** @va_curs_start: The Virtual address coresponding to @curs->start */ 262 u64 va_curs_start; 263 264 /* Output */ 265 struct xe_walk_update { 266 /** @wupd.entries: Caller provided storage. */ 267 struct xe_vm_pgtable_update *entries; 268 /** @wupd.num_used_entries: Number of update @entries used. */ 269 unsigned int num_used_entries; 270 /** @wupd.updates: Tracks the update entry at a given level */ 271 struct xe_pt_update updates[XE_VM_MAX_LEVEL + 1]; 272 } wupd; 273 274 /* Walk state */ 275 /** 276 * @l0_end_addr: The end address of the current l0 leaf. Used for 277 * 64K granularity detection. 278 */ 279 u64 l0_end_addr; 280 /** @addr_64K: The start address of the current 64K chunk. */ 281 u64 addr_64K; 282 /** @found_64: Whether @add_64K actually points to a 64K chunk. */ 283 bool found_64K; 284 }; 285 286 static int 287 xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent, 288 pgoff_t offset, bool alloc_entries) 289 { 290 struct xe_pt_update *upd = &wupd->updates[parent->level]; 291 struct xe_vm_pgtable_update *entry; 292 293 /* 294 * For *each level*, we could only have one active 295 * struct xt_pt_update at any one time. Once we move on to a 296 * new parent and page-directory, the old one is complete, and 297 * updates are either already stored in the build tree or in 298 * @wupd->entries 299 */ 300 if (likely(upd->parent == parent)) 301 return 0; 302 303 upd->parent = parent; 304 upd->preexisting = true; 305 306 if (wupd->num_used_entries == XE_VM_MAX_LEVEL * 2 + 1) 307 return -EINVAL; 308 309 entry = wupd->entries + wupd->num_used_entries++; 310 upd->update = entry; 311 entry->ofs = offset; 312 entry->pt_bo = parent->bo; 313 entry->pt = parent; 314 entry->flags = 0; 315 entry->qwords = 0; 316 317 if (alloc_entries) { 318 entry->pt_entries = kmalloc_array(XE_PDES, 319 sizeof(*entry->pt_entries), 320 GFP_KERNEL); 321 if (!entry->pt_entries) 322 return -ENOMEM; 323 } 324 325 return 0; 326 } 327 328 /* 329 * NOTE: This is a very frequently called function so we allow ourselves 330 * to annotate (using branch prediction hints) the fastpath of updating a 331 * non-pre-existing pagetable with leaf ptes. 332 */ 333 static int 334 xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent, 335 pgoff_t offset, struct xe_pt *xe_child, u64 pte) 336 { 337 struct xe_pt_update *upd = &xe_walk->wupd.updates[parent->level]; 338 struct xe_pt_update *child_upd = xe_child ? 339 &xe_walk->wupd.updates[xe_child->level] : NULL; 340 int ret; 341 342 ret = xe_pt_new_shared(&xe_walk->wupd, parent, offset, true); 343 if (unlikely(ret)) 344 return ret; 345 346 /* 347 * Register this new pagetable so that it won't be recognized as 348 * a shared pagetable by a subsequent insertion. 349 */ 350 if (unlikely(child_upd)) { 351 child_upd->update = NULL; 352 child_upd->parent = xe_child; 353 child_upd->preexisting = false; 354 } 355 356 if (likely(!upd->preexisting)) { 357 /* Continue building a non-connected subtree. */ 358 struct iosys_map *map = &parent->bo->vmap; 359 360 if (unlikely(xe_child)) 361 parent->base.dir->entries[offset] = &xe_child->base; 362 363 xe_pt_write(xe_walk->vm->xe, map, offset, pte); 364 parent->num_live++; 365 } else { 366 /* Shared pt. Stage update. */ 367 unsigned int idx; 368 struct xe_vm_pgtable_update *entry = upd->update; 369 370 idx = offset - entry->ofs; 371 entry->pt_entries[idx].pt = xe_child; 372 entry->pt_entries[idx].pte = pte; 373 entry->qwords++; 374 } 375 376 return 0; 377 } 378 379 static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level, 380 struct xe_pt_stage_bind_walk *xe_walk) 381 { 382 u64 size, dma; 383 384 if (level > MAX_HUGEPTE_LEVEL) 385 return false; 386 387 /* Does the virtual range requested cover a huge pte? */ 388 if (!xe_pt_covers(addr, next, level, &xe_walk->base)) 389 return false; 390 391 /* Does the DMA segment cover the whole pte? */ 392 if (next - xe_walk->va_curs_start > xe_walk->curs->size) 393 return false; 394 395 /* null VMA's do not have dma addresses */ 396 if (xe_vma_is_null(xe_walk->vma)) 397 return true; 398 399 /* Is the DMA address huge PTE size aligned? */ 400 size = next - addr; 401 dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs); 402 403 return IS_ALIGNED(dma, size); 404 } 405 406 /* 407 * Scan the requested mapping to check whether it can be done entirely 408 * with 64K PTEs. 409 */ 410 static bool 411 xe_pt_scan_64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk) 412 { 413 struct xe_res_cursor curs = *xe_walk->curs; 414 415 if (!IS_ALIGNED(addr, SZ_64K)) 416 return false; 417 418 if (next > xe_walk->l0_end_addr) 419 return false; 420 421 /* null VMA's do not have dma addresses */ 422 if (xe_vma_is_null(xe_walk->vma)) 423 return true; 424 425 xe_res_next(&curs, addr - xe_walk->va_curs_start); 426 for (; addr < next; addr += SZ_64K) { 427 if (!IS_ALIGNED(xe_res_dma(&curs), SZ_64K) || curs.size < SZ_64K) 428 return false; 429 430 xe_res_next(&curs, SZ_64K); 431 } 432 433 return addr == next; 434 } 435 436 /* 437 * For non-compact "normal" 4K level-0 pagetables, we want to try to group 438 * addresses together in 64K-contigous regions to add a 64K TLB hint for the 439 * device to the PTE. 440 * This function determines whether the address is part of such a 441 * segment. For VRAM in normal pagetables, this is strictly necessary on 442 * some devices. 443 */ 444 static bool 445 xe_pt_is_pte_ps64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk) 446 { 447 /* Address is within an already found 64k region */ 448 if (xe_walk->found_64K && addr - xe_walk->addr_64K < SZ_64K) 449 return true; 450 451 xe_walk->found_64K = xe_pt_scan_64K(addr, addr + SZ_64K, xe_walk); 452 xe_walk->addr_64K = addr; 453 454 return xe_walk->found_64K; 455 } 456 457 static int 458 xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset, 459 unsigned int level, u64 addr, u64 next, 460 struct xe_ptw **child, 461 enum page_walk_action *action, 462 struct xe_pt_walk *walk) 463 { 464 struct xe_pt_stage_bind_walk *xe_walk = 465 container_of(walk, typeof(*xe_walk), base); 466 u16 pat_index = xe_walk->vma->pat_index; 467 struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base); 468 struct xe_vm *vm = xe_walk->vm; 469 struct xe_pt *xe_child; 470 bool covers; 471 int ret = 0; 472 u64 pte; 473 474 /* Is this a leaf entry ?*/ 475 if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) { 476 struct xe_res_cursor *curs = xe_walk->curs; 477 bool is_null = xe_vma_is_null(xe_walk->vma); 478 479 XE_WARN_ON(xe_walk->va_curs_start != addr); 480 481 pte = vm->pt_ops->pte_encode_vma(is_null ? 0 : 482 xe_res_dma(curs) + xe_walk->dma_offset, 483 xe_walk->vma, pat_index, level); 484 pte |= xe_walk->default_pte; 485 486 /* 487 * Set the XE_PTE_PS64 hint if possible, otherwise if 488 * this device *requires* 64K PTE size for VRAM, fail. 489 */ 490 if (level == 0 && !xe_parent->is_compact) { 491 if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) 492 pte |= XE_PTE_PS64; 493 else if (XE_WARN_ON(xe_walk->needs_64K)) 494 return -EINVAL; 495 } 496 497 ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte); 498 if (unlikely(ret)) 499 return ret; 500 501 if (!is_null) 502 xe_res_next(curs, next - addr); 503 xe_walk->va_curs_start = next; 504 xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level); 505 *action = ACTION_CONTINUE; 506 507 return ret; 508 } 509 510 /* 511 * Descending to lower level. Determine if we need to allocate a 512 * new page table or -directory, which we do if there is no 513 * previous one or there is one we can completely replace. 514 */ 515 if (level == 1) { 516 walk->shifts = xe_normal_pt_shifts; 517 xe_walk->l0_end_addr = next; 518 } 519 520 covers = xe_pt_covers(addr, next, level, &xe_walk->base); 521 if (covers || !*child) { 522 u64 flags = 0; 523 524 xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1); 525 if (IS_ERR(xe_child)) 526 return PTR_ERR(xe_child); 527 528 xe_pt_set_addr(xe_child, 529 round_down(addr, 1ull << walk->shifts[level])); 530 531 if (!covers) 532 xe_pt_populate_empty(xe_walk->tile, xe_walk->vm, xe_child); 533 534 *child = &xe_child->base; 535 536 /* 537 * Prefer the compact pagetable layout for L0 if possible. 538 * TODO: Suballocate the pt bo to avoid wasting a lot of 539 * memory. 540 */ 541 if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 && 542 covers && xe_pt_scan_64K(addr, next, xe_walk)) { 543 walk->shifts = xe_compact_pt_shifts; 544 flags |= XE_PDE_64K; 545 xe_child->is_compact = true; 546 } 547 548 pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0, pat_index) | flags; 549 ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child, 550 pte); 551 } 552 553 *action = ACTION_SUBTREE; 554 return ret; 555 } 556 557 static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = { 558 .pt_entry = xe_pt_stage_bind_entry, 559 }; 560 561 /** 562 * xe_pt_stage_bind() - Build a disconnected page-table tree for a given address 563 * range. 564 * @tile: The tile we're building for. 565 * @vma: The vma indicating the address range. 566 * @entries: Storage for the update entries used for connecting the tree to 567 * the main tree at commit time. 568 * @num_entries: On output contains the number of @entries used. 569 * 570 * This function builds a disconnected page-table tree for a given address 571 * range. The tree is connected to the main vm tree for the gpu using 572 * xe_migrate_update_pgtables() and for the cpu using xe_pt_commit_bind(). 573 * The function builds xe_vm_pgtable_update structures for already existing 574 * shared page-tables, and non-existing shared and non-shared page-tables 575 * are built and populated directly. 576 * 577 * Return 0 on success, negative error code on error. 578 */ 579 static int 580 xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, 581 struct xe_vm_pgtable_update *entries, u32 *num_entries) 582 { 583 struct xe_device *xe = tile_to_xe(tile); 584 struct xe_bo *bo = xe_vma_bo(vma); 585 bool is_devmem = !xe_vma_is_userptr(vma) && bo && 586 (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo)); 587 struct xe_res_cursor curs; 588 struct xe_pt_stage_bind_walk xe_walk = { 589 .base = { 590 .ops = &xe_pt_stage_bind_ops, 591 .shifts = xe_normal_pt_shifts, 592 .max_level = XE_PT_HIGHEST_LEVEL, 593 }, 594 .vm = xe_vma_vm(vma), 595 .tile = tile, 596 .curs = &curs, 597 .va_curs_start = xe_vma_start(vma), 598 .vma = vma, 599 .wupd.entries = entries, 600 .needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem, 601 }; 602 struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id]; 603 int ret; 604 605 if (vma && (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) && 606 (is_devmem || !IS_DGFX(xe))) 607 xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE; 608 609 if (is_devmem) { 610 xe_walk.default_pte |= XE_PPGTT_PTE_DM; 611 xe_walk.dma_offset = vram_region_gpu_offset(bo->ttm.resource); 612 } 613 614 if (!xe_vma_has_no_bo(vma) && xe_bo_is_stolen(bo)) 615 xe_walk.dma_offset = xe_ttm_stolen_gpu_offset(xe_bo_device(bo)); 616 617 xe_bo_assert_held(bo); 618 619 if (!xe_vma_is_null(vma)) { 620 if (xe_vma_is_userptr(vma)) 621 xe_res_first_sg(vma->userptr.sg, 0, xe_vma_size(vma), 622 &curs); 623 else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo)) 624 xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma), 625 xe_vma_size(vma), &curs); 626 else 627 xe_res_first_sg(xe_bo_sg(bo), xe_vma_bo_offset(vma), 628 xe_vma_size(vma), &curs); 629 } else { 630 curs.size = xe_vma_size(vma); 631 } 632 633 ret = xe_pt_walk_range(&pt->base, pt->level, xe_vma_start(vma), 634 xe_vma_end(vma), &xe_walk.base); 635 636 *num_entries = xe_walk.wupd.num_used_entries; 637 return ret; 638 } 639 640 /** 641 * xe_pt_nonshared_offsets() - Determine the non-shared entry offsets of a 642 * shared pagetable. 643 * @addr: The start address within the non-shared pagetable. 644 * @end: The end address within the non-shared pagetable. 645 * @level: The level of the non-shared pagetable. 646 * @walk: Walk info. The function adjusts the walk action. 647 * @action: next action to perform (see enum page_walk_action) 648 * @offset: Ignored on input, First non-shared entry on output. 649 * @end_offset: Ignored on input, Last non-shared entry + 1 on output. 650 * 651 * A non-shared page-table has some entries that belong to the address range 652 * and others that don't. This function determines the entries that belong 653 * fully to the address range. Depending on level, some entries may 654 * partially belong to the address range (that can't happen at level 0). 655 * The function detects that and adjust those offsets to not include those 656 * partial entries. Iff it does detect partial entries, we know that there must 657 * be shared page tables also at lower levels, so it adjusts the walk action 658 * accordingly. 659 * 660 * Return: true if there were non-shared entries, false otherwise. 661 */ 662 static bool xe_pt_nonshared_offsets(u64 addr, u64 end, unsigned int level, 663 struct xe_pt_walk *walk, 664 enum page_walk_action *action, 665 pgoff_t *offset, pgoff_t *end_offset) 666 { 667 u64 size = 1ull << walk->shifts[level]; 668 669 *offset = xe_pt_offset(addr, level, walk); 670 *end_offset = xe_pt_num_entries(addr, end, level, walk) + *offset; 671 672 if (!level) 673 return true; 674 675 /* 676 * If addr or next are not size aligned, there are shared pts at lower 677 * level, so in that case traverse down the subtree 678 */ 679 *action = ACTION_CONTINUE; 680 if (!IS_ALIGNED(addr, size)) { 681 *action = ACTION_SUBTREE; 682 (*offset)++; 683 } 684 685 if (!IS_ALIGNED(end, size)) { 686 *action = ACTION_SUBTREE; 687 (*end_offset)--; 688 } 689 690 return *end_offset > *offset; 691 } 692 693 struct xe_pt_zap_ptes_walk { 694 /** @base: The walk base-class */ 695 struct xe_pt_walk base; 696 697 /* Input parameters for the walk */ 698 /** @tile: The tile we're building for */ 699 struct xe_tile *tile; 700 701 /* Output */ 702 /** @needs_invalidate: Whether we need to invalidate TLB*/ 703 bool needs_invalidate; 704 }; 705 706 static int xe_pt_zap_ptes_entry(struct xe_ptw *parent, pgoff_t offset, 707 unsigned int level, u64 addr, u64 next, 708 struct xe_ptw **child, 709 enum page_walk_action *action, 710 struct xe_pt_walk *walk) 711 { 712 struct xe_pt_zap_ptes_walk *xe_walk = 713 container_of(walk, typeof(*xe_walk), base); 714 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base); 715 pgoff_t end_offset; 716 717 XE_WARN_ON(!*child); 718 XE_WARN_ON(!level && xe_child->is_compact); 719 720 /* 721 * Note that we're called from an entry callback, and we're dealing 722 * with the child of that entry rather than the parent, so need to 723 * adjust level down. 724 */ 725 if (xe_pt_nonshared_offsets(addr, next, --level, walk, action, &offset, 726 &end_offset)) { 727 xe_map_memset(tile_to_xe(xe_walk->tile), &xe_child->bo->vmap, 728 offset * sizeof(u64), 0, 729 (end_offset - offset) * sizeof(u64)); 730 xe_walk->needs_invalidate = true; 731 } 732 733 return 0; 734 } 735 736 static const struct xe_pt_walk_ops xe_pt_zap_ptes_ops = { 737 .pt_entry = xe_pt_zap_ptes_entry, 738 }; 739 740 /** 741 * xe_pt_zap_ptes() - Zap (zero) gpu ptes of an address range 742 * @tile: The tile we're zapping for. 743 * @vma: GPU VMA detailing address range. 744 * 745 * Eviction and Userptr invalidation needs to be able to zap the 746 * gpu ptes of a given address range in pagefaulting mode. 747 * In order to be able to do that, that function needs access to the shared 748 * page-table entrieaso it can either clear the leaf PTEs or 749 * clear the pointers to lower-level page-tables. The caller is required 750 * to hold the necessary locks to ensure neither the page-table connectivity 751 * nor the page-table entries of the range is updated from under us. 752 * 753 * Return: Whether ptes were actually updated and a TLB invalidation is 754 * required. 755 */ 756 bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma) 757 { 758 struct xe_pt_zap_ptes_walk xe_walk = { 759 .base = { 760 .ops = &xe_pt_zap_ptes_ops, 761 .shifts = xe_normal_pt_shifts, 762 .max_level = XE_PT_HIGHEST_LEVEL, 763 }, 764 .tile = tile, 765 }; 766 struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id]; 767 768 if (!(vma->tile_present & BIT(tile->id))) 769 return false; 770 771 (void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma), 772 xe_vma_end(vma), &xe_walk.base); 773 774 return xe_walk.needs_invalidate; 775 } 776 777 static void 778 xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *tile, 779 struct iosys_map *map, void *data, 780 u32 qword_ofs, u32 num_qwords, 781 const struct xe_vm_pgtable_update *update) 782 { 783 struct xe_pt_entry *ptes = update->pt_entries; 784 u64 *ptr = data; 785 u32 i; 786 787 for (i = 0; i < num_qwords; i++) { 788 if (map) 789 xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) * 790 sizeof(u64), u64, ptes[i].pte); 791 else 792 ptr[i] = ptes[i].pte; 793 } 794 } 795 796 static void xe_pt_abort_bind(struct xe_vma *vma, 797 struct xe_vm_pgtable_update *entries, 798 u32 num_entries) 799 { 800 u32 i, j; 801 802 for (i = 0; i < num_entries; i++) { 803 if (!entries[i].pt_entries) 804 continue; 805 806 for (j = 0; j < entries[i].qwords; j++) 807 xe_pt_destroy(entries[i].pt_entries[j].pt, xe_vma_vm(vma)->flags, NULL); 808 kfree(entries[i].pt_entries); 809 } 810 } 811 812 static void xe_pt_commit_locks_assert(struct xe_vma *vma) 813 { 814 struct xe_vm *vm = xe_vma_vm(vma); 815 816 lockdep_assert_held(&vm->lock); 817 818 if (xe_vma_is_userptr(vma)) 819 lockdep_assert_held_read(&vm->userptr.notifier_lock); 820 else if (!xe_vma_is_null(vma)) 821 dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv); 822 823 xe_vm_assert_held(vm); 824 } 825 826 static void xe_pt_commit_bind(struct xe_vma *vma, 827 struct xe_vm_pgtable_update *entries, 828 u32 num_entries, bool rebind, 829 struct llist_head *deferred) 830 { 831 u32 i, j; 832 833 xe_pt_commit_locks_assert(vma); 834 835 for (i = 0; i < num_entries; i++) { 836 struct xe_pt *pt = entries[i].pt; 837 struct xe_pt_dir *pt_dir; 838 839 if (!rebind) 840 pt->num_live += entries[i].qwords; 841 842 if (!pt->level) { 843 kfree(entries[i].pt_entries); 844 continue; 845 } 846 847 pt_dir = as_xe_pt_dir(pt); 848 for (j = 0; j < entries[i].qwords; j++) { 849 u32 j_ = j + entries[i].ofs; 850 struct xe_pt *newpte = entries[i].pt_entries[j].pt; 851 852 if (xe_pt_entry(pt_dir, j_)) 853 xe_pt_destroy(xe_pt_entry(pt_dir, j_), 854 xe_vma_vm(vma)->flags, deferred); 855 856 pt_dir->dir.entries[j_] = &newpte->base; 857 } 858 kfree(entries[i].pt_entries); 859 } 860 } 861 862 static int 863 xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma, 864 struct xe_vm_pgtable_update *entries, u32 *num_entries, 865 bool rebind) 866 { 867 int err; 868 869 *num_entries = 0; 870 err = xe_pt_stage_bind(tile, vma, entries, num_entries); 871 if (!err) 872 xe_tile_assert(tile, *num_entries); 873 else /* abort! */ 874 xe_pt_abort_bind(vma, entries, *num_entries); 875 876 return err; 877 } 878 879 static void xe_vm_dbg_print_entries(struct xe_device *xe, 880 const struct xe_vm_pgtable_update *entries, 881 unsigned int num_entries) 882 #if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)) 883 { 884 unsigned int i; 885 886 vm_dbg(&xe->drm, "%u entries to update\n", num_entries); 887 for (i = 0; i < num_entries; i++) { 888 const struct xe_vm_pgtable_update *entry = &entries[i]; 889 struct xe_pt *xe_pt = entry->pt; 890 u64 page_size = 1ull << xe_pt_shift(xe_pt->level); 891 u64 end; 892 u64 start; 893 894 xe_assert(xe, !entry->pt->is_compact); 895 start = entry->ofs * page_size; 896 end = start + page_size * entry->qwords; 897 vm_dbg(&xe->drm, 898 "\t%u: Update level %u at (%u + %u) [%llx...%llx) f:%x\n", 899 i, xe_pt->level, entry->ofs, entry->qwords, 900 xe_pt_addr(xe_pt) + start, xe_pt_addr(xe_pt) + end, 0); 901 } 902 } 903 #else 904 {} 905 #endif 906 907 #ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT 908 909 static int xe_pt_userptr_inject_eagain(struct xe_vma *vma) 910 { 911 u32 divisor = vma->userptr.divisor ? vma->userptr.divisor : 2; 912 static u32 count; 913 914 if (count++ % divisor == divisor - 1) { 915 struct xe_vm *vm = xe_vma_vm(vma); 916 917 vma->userptr.divisor = divisor << 1; 918 spin_lock(&vm->userptr.invalidated_lock); 919 list_move_tail(&vma->userptr.invalidate_link, 920 &vm->userptr.invalidated); 921 spin_unlock(&vm->userptr.invalidated_lock); 922 return true; 923 } 924 925 return false; 926 } 927 928 #else 929 930 static bool xe_pt_userptr_inject_eagain(struct xe_vma *vma) 931 { 932 return false; 933 } 934 935 #endif 936 937 /** 938 * struct xe_pt_migrate_pt_update - Callback argument for pre-commit callbacks 939 * @base: Base we derive from. 940 * @bind: Whether this is a bind or an unbind operation. A bind operation 941 * makes the pre-commit callback error with -EAGAIN if it detects a 942 * pending invalidation. 943 * @locked: Whether the pre-commit callback locked the userptr notifier lock 944 * and it needs unlocking. 945 */ 946 struct xe_pt_migrate_pt_update { 947 struct xe_migrate_pt_update base; 948 bool bind; 949 bool locked; 950 }; 951 952 /* 953 * This function adds the needed dependencies to a page-table update job 954 * to make sure racing jobs for separate bind engines don't race writing 955 * to the same page-table range, wreaking havoc. Initially use a single 956 * fence for the entire VM. An optimization would use smaller granularity. 957 */ 958 static int xe_pt_vm_dependencies(struct xe_sched_job *job, 959 struct xe_range_fence_tree *rftree, 960 u64 start, u64 last) 961 { 962 struct xe_range_fence *rtfence; 963 struct dma_fence *fence; 964 int err; 965 966 rtfence = xe_range_fence_tree_first(rftree, start, last); 967 while (rtfence) { 968 fence = rtfence->fence; 969 970 if (!dma_fence_is_signaled(fence)) { 971 /* 972 * Is this a CPU update? GPU is busy updating, so return 973 * an error 974 */ 975 if (!job) 976 return -ETIME; 977 978 dma_fence_get(fence); 979 err = drm_sched_job_add_dependency(&job->drm, fence); 980 if (err) 981 return err; 982 } 983 984 rtfence = xe_range_fence_tree_next(rtfence, start, last); 985 } 986 987 return 0; 988 } 989 990 static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update) 991 { 992 struct xe_range_fence_tree *rftree = 993 &xe_vma_vm(pt_update->vma)->rftree[pt_update->tile_id]; 994 995 return xe_pt_vm_dependencies(pt_update->job, rftree, 996 pt_update->start, pt_update->last); 997 } 998 999 static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) 1000 { 1001 struct xe_pt_migrate_pt_update *userptr_update = 1002 container_of(pt_update, typeof(*userptr_update), base); 1003 struct xe_vma *vma = pt_update->vma; 1004 unsigned long notifier_seq = vma->userptr.notifier_seq; 1005 struct xe_vm *vm = xe_vma_vm(vma); 1006 int err = xe_pt_vm_dependencies(pt_update->job, 1007 &vm->rftree[pt_update->tile_id], 1008 pt_update->start, 1009 pt_update->last); 1010 1011 if (err) 1012 return err; 1013 1014 userptr_update->locked = false; 1015 1016 /* 1017 * Wait until nobody is running the invalidation notifier, and 1018 * since we're exiting the loop holding the notifier lock, 1019 * nobody can proceed invalidating either. 1020 * 1021 * Note that we don't update the vma->userptr.notifier_seq since 1022 * we don't update the userptr pages. 1023 */ 1024 do { 1025 down_read(&vm->userptr.notifier_lock); 1026 if (!mmu_interval_read_retry(&vma->userptr.notifier, 1027 notifier_seq)) 1028 break; 1029 1030 up_read(&vm->userptr.notifier_lock); 1031 1032 if (userptr_update->bind) 1033 return -EAGAIN; 1034 1035 notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier); 1036 } while (true); 1037 1038 /* Inject errors to test_whether they are handled correctly */ 1039 if (userptr_update->bind && xe_pt_userptr_inject_eagain(vma)) { 1040 up_read(&vm->userptr.notifier_lock); 1041 return -EAGAIN; 1042 } 1043 1044 userptr_update->locked = true; 1045 1046 return 0; 1047 } 1048 1049 static const struct xe_migrate_pt_update_ops bind_ops = { 1050 .populate = xe_vm_populate_pgtable, 1051 .pre_commit = xe_pt_pre_commit, 1052 }; 1053 1054 static const struct xe_migrate_pt_update_ops userptr_bind_ops = { 1055 .populate = xe_vm_populate_pgtable, 1056 .pre_commit = xe_pt_userptr_pre_commit, 1057 }; 1058 1059 struct invalidation_fence { 1060 struct xe_gt_tlb_invalidation_fence base; 1061 struct xe_gt *gt; 1062 struct xe_vma *vma; 1063 struct dma_fence *fence; 1064 struct dma_fence_cb cb; 1065 struct work_struct work; 1066 }; 1067 1068 static const char * 1069 invalidation_fence_get_driver_name(struct dma_fence *dma_fence) 1070 { 1071 return "xe"; 1072 } 1073 1074 static const char * 1075 invalidation_fence_get_timeline_name(struct dma_fence *dma_fence) 1076 { 1077 return "invalidation_fence"; 1078 } 1079 1080 static const struct dma_fence_ops invalidation_fence_ops = { 1081 .get_driver_name = invalidation_fence_get_driver_name, 1082 .get_timeline_name = invalidation_fence_get_timeline_name, 1083 }; 1084 1085 static void invalidation_fence_cb(struct dma_fence *fence, 1086 struct dma_fence_cb *cb) 1087 { 1088 struct invalidation_fence *ifence = 1089 container_of(cb, struct invalidation_fence, cb); 1090 1091 trace_xe_gt_tlb_invalidation_fence_cb(&ifence->base); 1092 if (!ifence->fence->error) { 1093 queue_work(system_wq, &ifence->work); 1094 } else { 1095 ifence->base.base.error = ifence->fence->error; 1096 dma_fence_signal(&ifence->base.base); 1097 dma_fence_put(&ifence->base.base); 1098 } 1099 dma_fence_put(ifence->fence); 1100 } 1101 1102 static void invalidation_fence_work_func(struct work_struct *w) 1103 { 1104 struct invalidation_fence *ifence = 1105 container_of(w, struct invalidation_fence, work); 1106 1107 trace_xe_gt_tlb_invalidation_fence_work_func(&ifence->base); 1108 xe_gt_tlb_invalidation_vma(ifence->gt, &ifence->base, ifence->vma); 1109 } 1110 1111 static int invalidation_fence_init(struct xe_gt *gt, 1112 struct invalidation_fence *ifence, 1113 struct dma_fence *fence, 1114 struct xe_vma *vma) 1115 { 1116 int ret; 1117 1118 trace_xe_gt_tlb_invalidation_fence_create(&ifence->base); 1119 1120 spin_lock_irq(>->tlb_invalidation.lock); 1121 dma_fence_init(&ifence->base.base, &invalidation_fence_ops, 1122 >->tlb_invalidation.lock, 1123 gt->tlb_invalidation.fence_context, 1124 ++gt->tlb_invalidation.fence_seqno); 1125 spin_unlock_irq(>->tlb_invalidation.lock); 1126 1127 INIT_LIST_HEAD(&ifence->base.link); 1128 1129 dma_fence_get(&ifence->base.base); /* Ref for caller */ 1130 ifence->fence = fence; 1131 ifence->gt = gt; 1132 ifence->vma = vma; 1133 1134 INIT_WORK(&ifence->work, invalidation_fence_work_func); 1135 ret = dma_fence_add_callback(fence, &ifence->cb, invalidation_fence_cb); 1136 if (ret == -ENOENT) { 1137 dma_fence_put(ifence->fence); /* Usually dropped in CB */ 1138 invalidation_fence_work_func(&ifence->work); 1139 } else if (ret) { 1140 dma_fence_put(&ifence->base.base); /* Caller ref */ 1141 dma_fence_put(&ifence->base.base); /* Creation ref */ 1142 } 1143 1144 xe_gt_assert(gt, !ret || ret == -ENOENT); 1145 1146 return ret && ret != -ENOENT ? ret : 0; 1147 } 1148 1149 static void xe_pt_calc_rfence_interval(struct xe_vma *vma, 1150 struct xe_pt_migrate_pt_update *update, 1151 struct xe_vm_pgtable_update *entries, 1152 u32 num_entries) 1153 { 1154 int i, level = 0; 1155 1156 for (i = 0; i < num_entries; i++) { 1157 const struct xe_vm_pgtable_update *entry = &entries[i]; 1158 1159 if (entry->pt->level > level) 1160 level = entry->pt->level; 1161 } 1162 1163 /* Greedy (non-optimal) calculation but simple */ 1164 update->base.start = ALIGN_DOWN(xe_vma_start(vma), 1165 0x1ull << xe_pt_shift(level)); 1166 update->base.last = ALIGN(xe_vma_end(vma), 1167 0x1ull << xe_pt_shift(level)) - 1; 1168 } 1169 1170 /** 1171 * __xe_pt_bind_vma() - Build and connect a page-table tree for the vma 1172 * address range. 1173 * @tile: The tile to bind for. 1174 * @vma: The vma to bind. 1175 * @q: The exec_queue with which to do pipelined page-table updates. 1176 * @syncs: Entries to sync on before binding the built tree to the live vm tree. 1177 * @num_syncs: Number of @sync entries. 1178 * @rebind: Whether we're rebinding this vma to the same address range without 1179 * an unbind in-between. 1180 * 1181 * This function builds a page-table tree (see xe_pt_stage_bind() for more 1182 * information on page-table building), and the xe_vm_pgtable_update entries 1183 * abstracting the operations needed to attach it to the main vm tree. It 1184 * then takes the relevant locks and updates the metadata side of the main 1185 * vm tree and submits the operations for pipelined attachment of the 1186 * gpu page-table to the vm main tree, (which can be done either by the 1187 * cpu and the GPU). 1188 * 1189 * Return: A valid dma-fence representing the pipelined attachment operation 1190 * on success, an error pointer on error. 1191 */ 1192 struct dma_fence * 1193 __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q, 1194 struct xe_sync_entry *syncs, u32 num_syncs, 1195 bool rebind) 1196 { 1197 struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1]; 1198 struct xe_pt_migrate_pt_update bind_pt_update = { 1199 .base = { 1200 .ops = xe_vma_is_userptr(vma) ? &userptr_bind_ops : &bind_ops, 1201 .vma = vma, 1202 .tile_id = tile->id, 1203 }, 1204 .bind = true, 1205 }; 1206 struct xe_vm *vm = xe_vma_vm(vma); 1207 u32 num_entries; 1208 struct dma_fence *fence; 1209 struct invalidation_fence *ifence = NULL; 1210 struct xe_range_fence *rfence; 1211 int err; 1212 1213 bind_pt_update.locked = false; 1214 xe_bo_assert_held(xe_vma_bo(vma)); 1215 xe_vm_assert_held(vm); 1216 1217 vm_dbg(&xe_vma_vm(vma)->xe->drm, 1218 "Preparing bind, with range [%llx...%llx) engine %p.\n", 1219 xe_vma_start(vma), xe_vma_end(vma), q); 1220 1221 err = xe_pt_prepare_bind(tile, vma, entries, &num_entries, rebind); 1222 if (err) 1223 goto err; 1224 xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries)); 1225 1226 xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries); 1227 xe_pt_calc_rfence_interval(vma, &bind_pt_update, entries, 1228 num_entries); 1229 1230 /* 1231 * If rebind, we have to invalidate TLB on !LR vms to invalidate 1232 * cached PTEs point to freed memory. on LR vms this is done 1233 * automatically when the context is re-enabled by the rebind worker, 1234 * or in fault mode it was invalidated on PTE zapping. 1235 * 1236 * If !rebind, and scratch enabled VMs, there is a chance the scratch 1237 * PTE is already cached in the TLB so it needs to be invalidated. 1238 * on !LR VMs this is done in the ring ops preceding a batch, but on 1239 * non-faulting LR, in particular on user-space batch buffer chaining, 1240 * it needs to be done here. 1241 */ 1242 if ((rebind && !xe_vm_in_lr_mode(vm) && !vm->batch_invalidate_tlb) || 1243 (!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) { 1244 ifence = kzalloc(sizeof(*ifence), GFP_KERNEL); 1245 if (!ifence) 1246 return ERR_PTR(-ENOMEM); 1247 } 1248 1249 rfence = kzalloc(sizeof(*rfence), GFP_KERNEL); 1250 if (!rfence) { 1251 kfree(ifence); 1252 return ERR_PTR(-ENOMEM); 1253 } 1254 1255 fence = xe_migrate_update_pgtables(tile->migrate, 1256 vm, xe_vma_bo(vma), q, 1257 entries, num_entries, 1258 syncs, num_syncs, 1259 &bind_pt_update.base); 1260 if (!IS_ERR(fence)) { 1261 bool last_munmap_rebind = vma->gpuva.flags & XE_VMA_LAST_REBIND; 1262 LLIST_HEAD(deferred); 1263 int err; 1264 1265 err = xe_range_fence_insert(&vm->rftree[tile->id], rfence, 1266 &xe_range_fence_kfree_ops, 1267 bind_pt_update.base.start, 1268 bind_pt_update.base.last, fence); 1269 if (err) 1270 dma_fence_wait(fence, false); 1271 1272 /* TLB invalidation must be done before signaling rebind */ 1273 if (ifence) { 1274 int err = invalidation_fence_init(tile->primary_gt, ifence, fence, 1275 vma); 1276 if (err) { 1277 dma_fence_put(fence); 1278 kfree(ifence); 1279 return ERR_PTR(err); 1280 } 1281 fence = &ifence->base.base; 1282 } 1283 1284 /* add shared fence now for pagetable delayed destroy */ 1285 dma_resv_add_fence(xe_vm_resv(vm), fence, !rebind && 1286 last_munmap_rebind ? 1287 DMA_RESV_USAGE_KERNEL : 1288 DMA_RESV_USAGE_BOOKKEEP); 1289 1290 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) 1291 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, 1292 DMA_RESV_USAGE_BOOKKEEP); 1293 xe_pt_commit_bind(vma, entries, num_entries, rebind, 1294 bind_pt_update.locked ? &deferred : NULL); 1295 1296 /* This vma is live (again?) now */ 1297 vma->tile_present |= BIT(tile->id); 1298 1299 if (bind_pt_update.locked) { 1300 vma->userptr.initial_bind = true; 1301 up_read(&vm->userptr.notifier_lock); 1302 xe_bo_put_commit(&deferred); 1303 } 1304 if (!rebind && last_munmap_rebind && 1305 xe_vm_in_preempt_fence_mode(vm)) 1306 xe_vm_queue_rebind_worker(vm); 1307 } else { 1308 kfree(rfence); 1309 kfree(ifence); 1310 if (bind_pt_update.locked) 1311 up_read(&vm->userptr.notifier_lock); 1312 xe_pt_abort_bind(vma, entries, num_entries); 1313 } 1314 1315 return fence; 1316 1317 err: 1318 return ERR_PTR(err); 1319 } 1320 1321 struct xe_pt_stage_unbind_walk { 1322 /** @base: The pagewalk base-class. */ 1323 struct xe_pt_walk base; 1324 1325 /* Input parameters for the walk */ 1326 /** @tile: The tile we're unbinding from. */ 1327 struct xe_tile *tile; 1328 1329 /** 1330 * @modified_start: Walk range start, modified to include any 1331 * shared pagetables that we're the only user of and can thus 1332 * treat as private. 1333 */ 1334 u64 modified_start; 1335 /** @modified_end: Walk range start, modified like @modified_start. */ 1336 u64 modified_end; 1337 1338 /* Output */ 1339 /* @wupd: Structure to track the page-table updates we're building */ 1340 struct xe_walk_update wupd; 1341 }; 1342 1343 /* 1344 * Check whether this range is the only one populating this pagetable, 1345 * and in that case, update the walk range checks so that higher levels don't 1346 * view us as a shared pagetable. 1347 */ 1348 static bool xe_pt_check_kill(u64 addr, u64 next, unsigned int level, 1349 const struct xe_pt *child, 1350 enum page_walk_action *action, 1351 struct xe_pt_walk *walk) 1352 { 1353 struct xe_pt_stage_unbind_walk *xe_walk = 1354 container_of(walk, typeof(*xe_walk), base); 1355 unsigned int shift = walk->shifts[level]; 1356 u64 size = 1ull << shift; 1357 1358 if (IS_ALIGNED(addr, size) && IS_ALIGNED(next, size) && 1359 ((next - addr) >> shift) == child->num_live) { 1360 u64 size = 1ull << walk->shifts[level + 1]; 1361 1362 *action = ACTION_CONTINUE; 1363 1364 if (xe_walk->modified_start >= addr) 1365 xe_walk->modified_start = round_down(addr, size); 1366 if (xe_walk->modified_end <= next) 1367 xe_walk->modified_end = round_up(next, size); 1368 1369 return true; 1370 } 1371 1372 return false; 1373 } 1374 1375 static int xe_pt_stage_unbind_entry(struct xe_ptw *parent, pgoff_t offset, 1376 unsigned int level, u64 addr, u64 next, 1377 struct xe_ptw **child, 1378 enum page_walk_action *action, 1379 struct xe_pt_walk *walk) 1380 { 1381 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base); 1382 1383 XE_WARN_ON(!*child); 1384 XE_WARN_ON(!level && xe_child->is_compact); 1385 1386 xe_pt_check_kill(addr, next, level - 1, xe_child, action, walk); 1387 1388 return 0; 1389 } 1390 1391 static int 1392 xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset, 1393 unsigned int level, u64 addr, u64 next, 1394 struct xe_ptw **child, 1395 enum page_walk_action *action, 1396 struct xe_pt_walk *walk) 1397 { 1398 struct xe_pt_stage_unbind_walk *xe_walk = 1399 container_of(walk, typeof(*xe_walk), base); 1400 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base); 1401 pgoff_t end_offset; 1402 u64 size = 1ull << walk->shifts[--level]; 1403 1404 if (!IS_ALIGNED(addr, size)) 1405 addr = xe_walk->modified_start; 1406 if (!IS_ALIGNED(next, size)) 1407 next = xe_walk->modified_end; 1408 1409 /* Parent == *child is the root pt. Don't kill it. */ 1410 if (parent != *child && 1411 xe_pt_check_kill(addr, next, level, xe_child, action, walk)) 1412 return 0; 1413 1414 if (!xe_pt_nonshared_offsets(addr, next, level, walk, action, &offset, 1415 &end_offset)) 1416 return 0; 1417 1418 (void)xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, false); 1419 xe_walk->wupd.updates[level].update->qwords = end_offset - offset; 1420 1421 return 0; 1422 } 1423 1424 static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = { 1425 .pt_entry = xe_pt_stage_unbind_entry, 1426 .pt_post_descend = xe_pt_stage_unbind_post_descend, 1427 }; 1428 1429 /** 1430 * xe_pt_stage_unbind() - Build page-table update structures for an unbind 1431 * operation 1432 * @tile: The tile we're unbinding for. 1433 * @vma: The vma we're unbinding. 1434 * @entries: Caller-provided storage for the update structures. 1435 * 1436 * Builds page-table update structures for an unbind operation. The function 1437 * will attempt to remove all page-tables that we're the only user 1438 * of, and for that to work, the unbind operation must be committed in the 1439 * same critical section that blocks racing binds to the same page-table tree. 1440 * 1441 * Return: The number of entries used. 1442 */ 1443 static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma, 1444 struct xe_vm_pgtable_update *entries) 1445 { 1446 struct xe_pt_stage_unbind_walk xe_walk = { 1447 .base = { 1448 .ops = &xe_pt_stage_unbind_ops, 1449 .shifts = xe_normal_pt_shifts, 1450 .max_level = XE_PT_HIGHEST_LEVEL, 1451 }, 1452 .tile = tile, 1453 .modified_start = xe_vma_start(vma), 1454 .modified_end = xe_vma_end(vma), 1455 .wupd.entries = entries, 1456 }; 1457 struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id]; 1458 1459 (void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma), 1460 xe_vma_end(vma), &xe_walk.base); 1461 1462 return xe_walk.wupd.num_used_entries; 1463 } 1464 1465 static void 1466 xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update, 1467 struct xe_tile *tile, struct iosys_map *map, 1468 void *ptr, u32 qword_ofs, u32 num_qwords, 1469 const struct xe_vm_pgtable_update *update) 1470 { 1471 struct xe_vma *vma = pt_update->vma; 1472 u64 empty = __xe_pt_empty_pte(tile, xe_vma_vm(vma), update->pt->level); 1473 int i; 1474 1475 if (map && map->is_iomem) 1476 for (i = 0; i < num_qwords; ++i) 1477 xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) * 1478 sizeof(u64), u64, empty); 1479 else if (map) 1480 memset64(map->vaddr + qword_ofs * sizeof(u64), empty, 1481 num_qwords); 1482 else 1483 memset64(ptr, empty, num_qwords); 1484 } 1485 1486 static void 1487 xe_pt_commit_unbind(struct xe_vma *vma, 1488 struct xe_vm_pgtable_update *entries, u32 num_entries, 1489 struct llist_head *deferred) 1490 { 1491 u32 j; 1492 1493 xe_pt_commit_locks_assert(vma); 1494 1495 for (j = 0; j < num_entries; ++j) { 1496 struct xe_vm_pgtable_update *entry = &entries[j]; 1497 struct xe_pt *pt = entry->pt; 1498 1499 pt->num_live -= entry->qwords; 1500 if (pt->level) { 1501 struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt); 1502 u32 i; 1503 1504 for (i = entry->ofs; i < entry->ofs + entry->qwords; 1505 i++) { 1506 if (xe_pt_entry(pt_dir, i)) 1507 xe_pt_destroy(xe_pt_entry(pt_dir, i), 1508 xe_vma_vm(vma)->flags, deferred); 1509 1510 pt_dir->dir.entries[i] = NULL; 1511 } 1512 } 1513 } 1514 } 1515 1516 static const struct xe_migrate_pt_update_ops unbind_ops = { 1517 .populate = xe_migrate_clear_pgtable_callback, 1518 .pre_commit = xe_pt_pre_commit, 1519 }; 1520 1521 static const struct xe_migrate_pt_update_ops userptr_unbind_ops = { 1522 .populate = xe_migrate_clear_pgtable_callback, 1523 .pre_commit = xe_pt_userptr_pre_commit, 1524 }; 1525 1526 /** 1527 * __xe_pt_unbind_vma() - Disconnect and free a page-table tree for the vma 1528 * address range. 1529 * @tile: The tile to unbind for. 1530 * @vma: The vma to unbind. 1531 * @q: The exec_queue with which to do pipelined page-table updates. 1532 * @syncs: Entries to sync on before disconnecting the tree to be destroyed. 1533 * @num_syncs: Number of @sync entries. 1534 * 1535 * This function builds a the xe_vm_pgtable_update entries abstracting the 1536 * operations needed to detach the page-table tree to be destroyed from the 1537 * man vm tree. 1538 * It then takes the relevant locks and submits the operations for 1539 * pipelined detachment of the gpu page-table from the vm main tree, 1540 * (which can be done either by the cpu and the GPU), Finally it frees the 1541 * detached page-table tree. 1542 * 1543 * Return: A valid dma-fence representing the pipelined detachment operation 1544 * on success, an error pointer on error. 1545 */ 1546 struct dma_fence * 1547 __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q, 1548 struct xe_sync_entry *syncs, u32 num_syncs) 1549 { 1550 struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1]; 1551 struct xe_pt_migrate_pt_update unbind_pt_update = { 1552 .base = { 1553 .ops = xe_vma_is_userptr(vma) ? &userptr_unbind_ops : 1554 &unbind_ops, 1555 .vma = vma, 1556 .tile_id = tile->id, 1557 }, 1558 }; 1559 struct xe_vm *vm = xe_vma_vm(vma); 1560 u32 num_entries; 1561 struct dma_fence *fence = NULL; 1562 struct invalidation_fence *ifence; 1563 struct xe_range_fence *rfence; 1564 1565 LLIST_HEAD(deferred); 1566 1567 xe_bo_assert_held(xe_vma_bo(vma)); 1568 xe_vm_assert_held(vm); 1569 1570 vm_dbg(&xe_vma_vm(vma)->xe->drm, 1571 "Preparing unbind, with range [%llx...%llx) engine %p.\n", 1572 xe_vma_start(vma), xe_vma_end(vma), q); 1573 1574 num_entries = xe_pt_stage_unbind(tile, vma, entries); 1575 xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries)); 1576 1577 xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries); 1578 xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries, 1579 num_entries); 1580 1581 ifence = kzalloc(sizeof(*ifence), GFP_KERNEL); 1582 if (!ifence) 1583 return ERR_PTR(-ENOMEM); 1584 1585 rfence = kzalloc(sizeof(*rfence), GFP_KERNEL); 1586 if (!rfence) { 1587 kfree(ifence); 1588 return ERR_PTR(-ENOMEM); 1589 } 1590 1591 /* 1592 * Even if we were already evicted and unbind to destroy, we need to 1593 * clear again here. The eviction may have updated pagetables at a 1594 * lower level, because it needs to be more conservative. 1595 */ 1596 fence = xe_migrate_update_pgtables(tile->migrate, 1597 vm, NULL, q ? q : 1598 vm->q[tile->id], 1599 entries, num_entries, 1600 syncs, num_syncs, 1601 &unbind_pt_update.base); 1602 if (!IS_ERR(fence)) { 1603 int err; 1604 1605 err = xe_range_fence_insert(&vm->rftree[tile->id], rfence, 1606 &xe_range_fence_kfree_ops, 1607 unbind_pt_update.base.start, 1608 unbind_pt_update.base.last, fence); 1609 if (err) 1610 dma_fence_wait(fence, false); 1611 1612 /* TLB invalidation must be done before signaling unbind */ 1613 err = invalidation_fence_init(tile->primary_gt, ifence, fence, vma); 1614 if (err) { 1615 dma_fence_put(fence); 1616 kfree(ifence); 1617 return ERR_PTR(err); 1618 } 1619 fence = &ifence->base.base; 1620 1621 /* add shared fence now for pagetable delayed destroy */ 1622 dma_resv_add_fence(xe_vm_resv(vm), fence, 1623 DMA_RESV_USAGE_BOOKKEEP); 1624 1625 /* This fence will be installed by caller when doing eviction */ 1626 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) 1627 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, 1628 DMA_RESV_USAGE_BOOKKEEP); 1629 xe_pt_commit_unbind(vma, entries, num_entries, 1630 unbind_pt_update.locked ? &deferred : NULL); 1631 vma->tile_present &= ~BIT(tile->id); 1632 } else { 1633 kfree(rfence); 1634 kfree(ifence); 1635 } 1636 1637 if (!vma->tile_present) 1638 list_del_init(&vma->combined_links.rebind); 1639 1640 if (unbind_pt_update.locked) { 1641 xe_tile_assert(tile, xe_vma_is_userptr(vma)); 1642 1643 if (!vma->tile_present) { 1644 spin_lock(&vm->userptr.invalidated_lock); 1645 list_del_init(&vma->userptr.invalidate_link); 1646 spin_unlock(&vm->userptr.invalidated_lock); 1647 } 1648 up_read(&vm->userptr.notifier_lock); 1649 xe_bo_put_commit(&deferred); 1650 } 1651 1652 return fence; 1653 } 1654