1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_pt.h"
7
8 #include "regs/xe_gtt_defs.h"
9 #include "xe_bo.h"
10 #include "xe_device.h"
11 #include "xe_drm_client.h"
12 #include "xe_exec_queue.h"
13 #include "xe_gt.h"
14 #include "xe_gt_stats.h"
15 #include "xe_migrate.h"
16 #include "xe_page_reclaim.h"
17 #include "xe_pt_types.h"
18 #include "xe_pt_walk.h"
19 #include "xe_res_cursor.h"
20 #include "xe_sched_job.h"
21 #include "xe_svm.h"
22 #include "xe_sync.h"
23 #include "xe_tlb_inval_job.h"
24 #include "xe_trace.h"
25 #include "xe_ttm_stolen_mgr.h"
26 #include "xe_userptr.h"
27 #include "xe_vm.h"
28
29 struct xe_pt_dir {
30 struct xe_pt pt;
31 /** @children: Array of page-table child nodes */
32 struct xe_ptw *children[XE_PDES];
33 /** @staging: Array of page-table staging nodes */
34 struct xe_ptw *staging[XE_PDES];
35 };
36
37 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
38 #define xe_pt_set_addr(__xe_pt, __addr) ((__xe_pt)->addr = (__addr))
39 #define xe_pt_addr(__xe_pt) ((__xe_pt)->addr)
40 #else
41 #define xe_pt_set_addr(__xe_pt, __addr)
42 #define xe_pt_addr(__xe_pt) 0ull
43 #endif
44
45 static const u64 xe_normal_pt_shifts[] = {12, 21, 30, 39, 48};
46 static const u64 xe_compact_pt_shifts[] = {16, 21, 30, 39, 48};
47
48 #define XE_PT_HIGHEST_LEVEL (ARRAY_SIZE(xe_normal_pt_shifts) - 1)
49
as_xe_pt_dir(struct xe_pt * pt)50 static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
51 {
52 return container_of(pt, struct xe_pt_dir, pt);
53 }
54
55 static struct xe_pt *
xe_pt_entry_staging(struct xe_pt_dir * pt_dir,unsigned int index)56 xe_pt_entry_staging(struct xe_pt_dir *pt_dir, unsigned int index)
57 {
58 return container_of(pt_dir->staging[index], struct xe_pt, base);
59 }
60
__xe_pt_empty_pte(struct xe_tile * tile,struct xe_vm * vm,unsigned int level)61 static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
62 unsigned int level)
63 {
64 struct xe_device *xe = tile_to_xe(tile);
65 u16 pat_index = xe->pat.idx[XE_CACHE_WB];
66 u8 id = tile->id;
67
68 if (!xe_vm_has_scratch(vm))
69 return 0;
70
71 if (level > MAX_HUGEPTE_LEVEL)
72 return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo,
73 0);
74
75 return vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0) |
76 XE_PTE_NULL;
77 }
78
xe_pt_free(struct xe_pt * pt)79 static void xe_pt_free(struct xe_pt *pt)
80 {
81 if (pt->level)
82 kfree(as_xe_pt_dir(pt));
83 else
84 kfree(pt);
85 }
86
87 /**
88 * xe_pt_create() - Create a page-table.
89 * @vm: The vm to create for.
90 * @tile: The tile to create for.
91 * @level: The page-table level.
92 * @exec: The drm_exec object used to lock the vm.
93 *
94 * Allocate and initialize a single struct xe_pt metadata structure. Also
95 * create the corresponding page-table bo, but don't initialize it. If the
96 * level is grater than zero, then it's assumed to be a directory page-
97 * table and the directory structure is also allocated and initialized to
98 * NULL pointers.
99 *
100 * Return: A valid struct xe_pt pointer on success, Pointer error code on
101 * error.
102 */
xe_pt_create(struct xe_vm * vm,struct xe_tile * tile,unsigned int level,struct drm_exec * exec)103 struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
104 unsigned int level, struct drm_exec *exec)
105 {
106 struct xe_pt *pt;
107 struct xe_bo *bo;
108 u32 bo_flags;
109 int err;
110
111 if (level) {
112 struct xe_pt_dir *dir = kzalloc_obj(*dir);
113
114 pt = (dir) ? &dir->pt : NULL;
115 } else {
116 pt = kzalloc_obj(*pt);
117 }
118 if (!pt)
119 return ERR_PTR(-ENOMEM);
120
121 bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) |
122 XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
123 XE_BO_FLAG_NO_RESV_EVICT | XE_BO_FLAG_PAGETABLE;
124 if (vm->xef) /* userspace */
125 bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE | XE_BO_FLAG_FORCE_USER_VRAM;
126
127 pt->level = level;
128
129 drm_WARN_ON(&vm->xe->drm, IS_ERR_OR_NULL(exec));
130 bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
131 ttm_bo_type_kernel,
132 bo_flags, exec);
133 if (IS_ERR(bo)) {
134 err = PTR_ERR(bo);
135 goto err_kfree;
136 }
137 pt->bo = bo;
138 pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
139 pt->base.staging = level ? as_xe_pt_dir(pt)->staging : NULL;
140
141 if (vm->xef)
142 xe_drm_client_add_bo(vm->xef->client, pt->bo);
143 xe_tile_assert(tile, level <= XE_VM_MAX_LEVEL);
144
145 return pt;
146
147 err_kfree:
148 xe_pt_free(pt);
149 return ERR_PTR(err);
150 }
151 ALLOW_ERROR_INJECTION(xe_pt_create, ERRNO);
152
153 /**
154 * xe_pt_populate_empty() - Populate a page-table bo with scratch- or zero
155 * entries.
156 * @tile: The tile the scratch pagetable of which to use.
157 * @vm: The vm we populate for.
158 * @pt: The pagetable the bo of which to initialize.
159 *
160 * Populate the page-table bo of @pt with entries pointing into the tile's
161 * scratch page-table tree if any. Otherwise populate with zeros.
162 */
xe_pt_populate_empty(struct xe_tile * tile,struct xe_vm * vm,struct xe_pt * pt)163 void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
164 struct xe_pt *pt)
165 {
166 struct iosys_map *map = &pt->bo->vmap;
167 u64 empty;
168 int i;
169
170 if (!xe_vm_has_scratch(vm)) {
171 /*
172 * FIXME: Some memory is allocated already allocated to zero?
173 * Find out which memory that is and avoid this memset...
174 */
175 xe_map_memset(vm->xe, map, 0, 0, SZ_4K);
176 } else {
177 empty = __xe_pt_empty_pte(tile, vm, pt->level);
178 for (i = 0; i < XE_PDES; i++)
179 xe_pt_write(vm->xe, map, i, empty);
180 }
181 }
182
183 /**
184 * xe_pt_shift() - Return the ilog2 value of the size of the address range of
185 * a page-table at a certain level.
186 * @level: The level.
187 *
188 * Return: The ilog2 value of the size of the address range of a page-table
189 * at level @level.
190 */
xe_pt_shift(unsigned int level)191 unsigned int xe_pt_shift(unsigned int level)
192 {
193 return XE_PTE_SHIFT + XE_PDE_SHIFT * level;
194 }
195
196 /**
197 * xe_pt_destroy() - Destroy a page-table tree.
198 * @pt: The root of the page-table tree to destroy.
199 * @flags: vm flags. Currently unused.
200 * @deferred: List head of lockless list for deferred putting. NULL for
201 * immediate putting.
202 *
203 * Puts the page-table bo, recursively calls xe_pt_destroy on all children
204 * and finally frees @pt. TODO: Can we remove the @flags argument?
205 */
xe_pt_destroy(struct xe_pt * pt,u32 flags,struct llist_head * deferred)206 void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
207 {
208 int i;
209
210 if (!pt)
211 return;
212
213 XE_WARN_ON(!list_empty(&pt->bo->ttm.base.gpuva.list));
214 xe_bo_unpin(pt->bo);
215 xe_bo_put_deferred(pt->bo, deferred);
216
217 if (pt->level > 0 && pt->num_live) {
218 struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
219
220 for (i = 0; i < XE_PDES; i++) {
221 if (xe_pt_entry_staging(pt_dir, i))
222 xe_pt_destroy(xe_pt_entry_staging(pt_dir, i), flags,
223 deferred);
224 }
225 }
226 xe_pt_free(pt);
227 }
228
229 /**
230 * xe_pt_clear() - Clear a page-table.
231 * @xe: xe device.
232 * @pt: The page-table.
233 *
234 * Clears page-table by setting to zero.
235 */
xe_pt_clear(struct xe_device * xe,struct xe_pt * pt)236 void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt)
237 {
238 struct iosys_map *map = &pt->bo->vmap;
239
240 xe_map_memset(xe, map, 0, 0, SZ_4K);
241 }
242
243 /**
244 * DOC: Pagetable building
245 *
246 * Below we use the term "page-table" for both page-directories, containing
247 * pointers to lower level page-directories or page-tables, and level 0
248 * page-tables that contain only page-table-entries pointing to memory pages.
249 *
250 * When inserting an address range in an already existing page-table tree
251 * there will typically be a set of page-tables that are shared with other
252 * address ranges, and a set that are private to this address range.
253 * The set of shared page-tables can be at most two per level,
254 * and those can't be updated immediately because the entries of those
255 * page-tables may still be in use by the gpu for other mappings. Therefore
256 * when inserting entries into those, we instead stage those insertions by
257 * adding insertion data into struct xe_vm_pgtable_update structures. This
258 * data, (subtrees for the cpu and page-table-entries for the gpu) is then
259 * added in a separate commit step. CPU-data is committed while still under the
260 * vm lock, the object lock and for userptr, the notifier lock in read mode.
261 * The GPU async data is committed either by the GPU or CPU after fulfilling
262 * relevant dependencies.
263 * For non-shared page-tables (and, in fact, for shared ones that aren't
264 * existing at the time of staging), we add the data in-place without the
265 * special update structures. This private part of the page-table tree will
266 * remain disconnected from the vm page-table tree until data is committed to
267 * the shared page tables of the vm tree in the commit phase.
268 */
269
270 struct xe_pt_update {
271 /** @update: The update structure we're building for this parent. */
272 struct xe_vm_pgtable_update *update;
273 /** @parent: The parent. Used to detect a parent change. */
274 struct xe_pt *parent;
275 /** @preexisting: Whether the parent was pre-existing or allocated */
276 bool preexisting;
277 };
278
279 /**
280 * struct xe_pt_stage_bind_walk - Walk state for the stage_bind walk.
281 */
282 struct xe_pt_stage_bind_walk {
283 /** @base: The base class. */
284 struct xe_pt_walk base;
285
286 /* Input parameters for the walk */
287 /** @vm: The vm we're building for. */
288 struct xe_vm *vm;
289 /** @tile: The tile we're building for. */
290 struct xe_tile *tile;
291 /** @default_vram_pte: PTE flag only template for VRAM. No address is associated */
292 u64 default_vram_pte;
293 /** @default_system_pte: PTE flag only template for System. No address is associated */
294 u64 default_system_pte;
295 /** @dma_offset: DMA offset to add to the PTE. */
296 u64 dma_offset;
297 /**
298 * @needs_64K: This address range enforces 64K alignment and
299 * granularity on VRAM.
300 */
301 bool needs_64K;
302 /** @clear_pt: clear page table entries during the bind walk */
303 bool clear_pt;
304 /**
305 * @vma: VMA being mapped
306 */
307 struct xe_vma *vma;
308
309 /* Also input, but is updated during the walk*/
310 /** @curs: The DMA address cursor. */
311 struct xe_res_cursor *curs;
312 /** @va_curs_start: The Virtual address corresponding to @curs->start */
313 u64 va_curs_start;
314
315 /* Output */
316 /** @wupd: Walk output data for page-table updates. */
317 struct xe_walk_update {
318 /** @wupd.entries: Caller provided storage. */
319 struct xe_vm_pgtable_update *entries;
320 /** @wupd.num_used_entries: Number of update @entries used. */
321 unsigned int num_used_entries;
322 /** @wupd.updates: Tracks the update entry at a given level */
323 struct xe_pt_update updates[XE_VM_MAX_LEVEL + 1];
324 } wupd;
325
326 /* Walk state */
327 /**
328 * @l0_end_addr: The end address of the current l0 leaf. Used for
329 * 64K granularity detection.
330 */
331 u64 l0_end_addr;
332 /** @addr_64K: The start address of the current 64K chunk. */
333 u64 addr_64K;
334 /** @found_64K: Whether @add_64K actually points to a 64K chunk. */
335 bool found_64K;
336 };
337
338 static int
xe_pt_new_shared(struct xe_walk_update * wupd,struct xe_pt * parent,pgoff_t offset,bool alloc_entries)339 xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent,
340 pgoff_t offset, bool alloc_entries)
341 {
342 struct xe_pt_update *upd = &wupd->updates[parent->level];
343 struct xe_vm_pgtable_update *entry;
344
345 /*
346 * For *each level*, we could only have one active
347 * struct xt_pt_update at any one time. Once we move on to a
348 * new parent and page-directory, the old one is complete, and
349 * updates are either already stored in the build tree or in
350 * @wupd->entries
351 */
352 if (likely(upd->parent == parent))
353 return 0;
354
355 upd->parent = parent;
356 upd->preexisting = true;
357
358 if (wupd->num_used_entries == XE_VM_MAX_LEVEL * 2 + 1)
359 return -EINVAL;
360
361 entry = wupd->entries + wupd->num_used_entries++;
362 upd->update = entry;
363 entry->ofs = offset;
364 entry->pt_bo = parent->bo;
365 entry->pt = parent;
366 entry->flags = 0;
367 entry->qwords = 0;
368 entry->pt_bo->update_index = -1;
369
370 if (alloc_entries) {
371 entry->pt_entries = kmalloc_objs(*entry->pt_entries, XE_PDES);
372 if (!entry->pt_entries)
373 return -ENOMEM;
374 }
375
376 return 0;
377 }
378
379 /*
380 * NOTE: This is a very frequently called function so we allow ourselves
381 * to annotate (using branch prediction hints) the fastpath of updating a
382 * non-pre-existing pagetable with leaf ptes.
383 */
384 static int
xe_pt_insert_entry(struct xe_pt_stage_bind_walk * xe_walk,struct xe_pt * parent,pgoff_t offset,struct xe_pt * xe_child,u64 pte)385 xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
386 pgoff_t offset, struct xe_pt *xe_child, u64 pte)
387 {
388 struct xe_pt_update *upd = &xe_walk->wupd.updates[parent->level];
389 struct xe_pt_update *child_upd = xe_child ?
390 &xe_walk->wupd.updates[xe_child->level] : NULL;
391 int ret;
392
393 ret = xe_pt_new_shared(&xe_walk->wupd, parent, offset, true);
394 if (unlikely(ret))
395 return ret;
396
397 /*
398 * Register this new pagetable so that it won't be recognized as
399 * a shared pagetable by a subsequent insertion.
400 */
401 if (unlikely(child_upd)) {
402 child_upd->update = NULL;
403 child_upd->parent = xe_child;
404 child_upd->preexisting = false;
405 }
406
407 if (likely(!upd->preexisting)) {
408 /* Continue building a non-connected subtree. */
409 struct iosys_map *map = &parent->bo->vmap;
410
411 if (unlikely(xe_child)) {
412 parent->base.children[offset] = &xe_child->base;
413 parent->base.staging[offset] = &xe_child->base;
414 }
415
416 xe_pt_write(xe_walk->vm->xe, map, offset, pte);
417 parent->num_live++;
418 } else {
419 /* Shared pt. Stage update. */
420 unsigned int idx;
421 struct xe_vm_pgtable_update *entry = upd->update;
422
423 idx = offset - entry->ofs;
424 entry->pt_entries[idx].pt = xe_child;
425 entry->pt_entries[idx].pte = pte;
426 entry->qwords++;
427 }
428
429 return 0;
430 }
431
xe_pt_hugepte_possible(u64 addr,u64 next,unsigned int level,struct xe_pt_stage_bind_walk * xe_walk)432 static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level,
433 struct xe_pt_stage_bind_walk *xe_walk)
434 {
435 u64 size, dma;
436
437 if (level > MAX_HUGEPTE_LEVEL)
438 return false;
439
440 /* Does the virtual range requested cover a huge pte? */
441 if (!xe_pt_covers(addr, next, level, &xe_walk->base))
442 return false;
443
444 /* Does the DMA segment cover the whole pte? */
445 if (next - xe_walk->va_curs_start > xe_walk->curs->size)
446 return false;
447
448 /* null VMA's do not have dma addresses */
449 if (xe_vma_is_null(xe_walk->vma))
450 return true;
451
452 /* if we are clearing page table, no dma addresses*/
453 if (xe_walk->clear_pt)
454 return true;
455
456 /* Is the DMA address huge PTE size aligned? */
457 size = next - addr;
458 dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs);
459
460 return IS_ALIGNED(dma, size);
461 }
462
463 /*
464 * Scan the requested mapping to check whether it can be done entirely
465 * with 64K PTEs.
466 */
467 static bool
xe_pt_scan_64K(u64 addr,u64 next,struct xe_pt_stage_bind_walk * xe_walk)468 xe_pt_scan_64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk)
469 {
470 struct xe_res_cursor curs = *xe_walk->curs;
471
472 if (!IS_ALIGNED(addr, SZ_64K))
473 return false;
474
475 if (next > xe_walk->l0_end_addr)
476 return false;
477
478 /* null VMA's do not have dma addresses */
479 if (xe_vma_is_null(xe_walk->vma))
480 return true;
481
482 xe_res_next(&curs, addr - xe_walk->va_curs_start);
483 for (; addr < next; addr += SZ_64K) {
484 if (!IS_ALIGNED(xe_res_dma(&curs), SZ_64K) || curs.size < SZ_64K)
485 return false;
486
487 xe_res_next(&curs, SZ_64K);
488 }
489
490 return addr == next;
491 }
492
493 /*
494 * For non-compact "normal" 4K level-0 pagetables, we want to try to group
495 * addresses together in 64K-contigous regions to add a 64K TLB hint for the
496 * device to the PTE.
497 * This function determines whether the address is part of such a
498 * segment. For VRAM in normal pagetables, this is strictly necessary on
499 * some devices.
500 */
501 static bool
xe_pt_is_pte_ps64K(u64 addr,u64 next,struct xe_pt_stage_bind_walk * xe_walk)502 xe_pt_is_pte_ps64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk)
503 {
504 /* Address is within an already found 64k region */
505 if (xe_walk->found_64K && addr - xe_walk->addr_64K < SZ_64K)
506 return true;
507
508 xe_walk->found_64K = xe_pt_scan_64K(addr, addr + SZ_64K, xe_walk);
509 xe_walk->addr_64K = addr;
510
511 return xe_walk->found_64K;
512 }
513
514 static int
xe_pt_stage_bind_entry(struct xe_ptw * parent,pgoff_t offset,unsigned int level,u64 addr,u64 next,struct xe_ptw ** child,enum page_walk_action * action,struct xe_pt_walk * walk)515 xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
516 unsigned int level, u64 addr, u64 next,
517 struct xe_ptw **child,
518 enum page_walk_action *action,
519 struct xe_pt_walk *walk)
520 {
521 struct xe_pt_stage_bind_walk *xe_walk =
522 container_of(walk, typeof(*xe_walk), base);
523 u16 pat_index = xe_walk->vma->attr.pat_index;
524 struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base);
525 struct xe_vm *vm = xe_walk->vm;
526 struct xe_pt *xe_child;
527 bool covers;
528 int ret = 0;
529 u64 pte;
530
531 /* Is this a leaf entry ?*/
532 if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) {
533 struct xe_res_cursor *curs = xe_walk->curs;
534 struct xe_bo *bo = xe_vma_bo(xe_walk->vma);
535 bool is_null_or_purged = xe_vma_is_null(xe_walk->vma) ||
536 (bo && xe_bo_is_purged(bo));
537 bool is_vram = is_null_or_purged ? false : xe_res_is_vram(curs);
538
539 XE_WARN_ON(xe_walk->va_curs_start != addr);
540
541 if (xe_walk->clear_pt) {
542 pte = 0;
543 } else {
544 /*
545 * For purged BOs, treat like null VMAs - pass address 0.
546 * The pte_encode_vma will set XE_PTE_NULL flag for scratch mapping.
547 */
548 pte = vm->pt_ops->pte_encode_vma(is_null_or_purged ? 0 :
549 xe_res_dma(curs) +
550 xe_walk->dma_offset,
551 xe_walk->vma,
552 pat_index, level);
553 if (!is_null_or_purged)
554 pte |= is_vram ? xe_walk->default_vram_pte :
555 xe_walk->default_system_pte;
556
557 /*
558 * Set the XE_PTE_PS64 hint if possible, otherwise if
559 * this device *requires* 64K PTE size for VRAM, fail.
560 */
561 if (level == 0 && !xe_parent->is_compact) {
562 if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
563 xe_walk->vma->gpuva.flags |=
564 XE_VMA_PTE_64K;
565 pte |= XE_PTE_PS64;
566 } else if (XE_WARN_ON(xe_walk->needs_64K &&
567 is_vram)) {
568 return -EINVAL;
569 }
570 }
571 }
572
573 ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte);
574 if (unlikely(ret))
575 return ret;
576
577 if (!is_null_or_purged && !xe_walk->clear_pt)
578 xe_res_next(curs, next - addr);
579 xe_walk->va_curs_start = next;
580 xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level);
581 *action = ACTION_CONTINUE;
582
583 return ret;
584 }
585
586 /*
587 * Descending to lower level. Determine if we need to allocate a
588 * new page table or -directory, which we do if there is no
589 * previous one or there is one we can completely replace.
590 */
591 if (level == 1) {
592 walk->shifts = xe_normal_pt_shifts;
593 xe_walk->l0_end_addr = next;
594 }
595
596 covers = xe_pt_covers(addr, next, level, &xe_walk->base);
597 if (covers || !*child) {
598 u64 flags = 0;
599
600 xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1,
601 xe_vm_validation_exec(vm));
602 if (IS_ERR(xe_child))
603 return PTR_ERR(xe_child);
604
605 xe_pt_set_addr(xe_child,
606 round_down(addr, 1ull << walk->shifts[level]));
607
608 if (!covers)
609 xe_pt_populate_empty(xe_walk->tile, xe_walk->vm, xe_child);
610
611 *child = &xe_child->base;
612
613 /*
614 * Prefer the compact pagetable layout for L0 if possible. Only
615 * possible if VMA covers entire 2MB region as compact 64k and
616 * 4k pages cannot be mixed within a 2MB region.
617 * TODO: Suballocate the pt bo to avoid wasting a lot of
618 * memory.
619 */
620 if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 &&
621 covers && xe_pt_scan_64K(addr, next, xe_walk)) {
622 walk->shifts = xe_compact_pt_shifts;
623 xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT;
624 flags |= XE_PDE_64K;
625 xe_child->is_compact = true;
626 }
627
628 pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0) | flags;
629 ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child,
630 pte);
631 }
632
633 *action = ACTION_SUBTREE;
634 return ret;
635 }
636
637 static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
638 .pt_entry = xe_pt_stage_bind_entry,
639 };
640
641 /*
642 * Default atomic expectations for different allocation scenarios are as follows:
643 *
644 * 1. Traditional API: When the VM is not in LR mode:
645 * - Device atomics are expected to function with all allocations.
646 *
647 * 2. Compute/SVM API: When the VM is in LR mode:
648 * - Device atomics are the default behavior when the bo is placed in a single region.
649 * - In all other cases device atomics will be disabled with AE=0 until an application
650 * request differently using a ioctl like madvise.
651 */
xe_atomic_for_vram(struct xe_vm * vm,struct xe_vma * vma)652 static bool xe_atomic_for_vram(struct xe_vm *vm, struct xe_vma *vma)
653 {
654 if (vma->attr.atomic_access == DRM_XE_ATOMIC_CPU)
655 return false;
656
657 return true;
658 }
659
xe_atomic_for_system(struct xe_vm * vm,struct xe_vma * vma)660 static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_vma *vma)
661 {
662 struct xe_device *xe = vm->xe;
663 struct xe_bo *bo = xe_vma_bo(vma);
664
665 if (!xe->info.has_device_atomics_on_smem ||
666 vma->attr.atomic_access == DRM_XE_ATOMIC_CPU)
667 return false;
668
669 if (vma->attr.atomic_access == DRM_XE_ATOMIC_DEVICE)
670 return true;
671
672 /*
673 * If a SMEM+LMEM allocation is backed by SMEM, a device
674 * atomics will cause a gpu page fault and which then
675 * gets migrated to LMEM, bind such allocations with
676 * device atomics enabled.
677 */
678 return (!IS_DGFX(xe) || (!xe_vm_in_lr_mode(vm) ||
679 (bo && xe_bo_has_single_placement(bo))));
680 }
681
682 /**
683 * xe_pt_stage_bind() - Build a disconnected page-table tree for a given address
684 * range.
685 * @tile: The tile we're building for.
686 * @vma: The vma indicating the address range.
687 * @range: The range indicating the address range.
688 * @entries: Storage for the update entries used for connecting the tree to
689 * the main tree at commit time.
690 * @num_entries: On output contains the number of @entries used.
691 * @clear_pt: Clear the page table entries.
692 *
693 * This function builds a disconnected page-table tree for a given address
694 * range. The tree is connected to the main vm tree for the gpu using
695 * xe_migrate_update_pgtables() and for the cpu using xe_pt_commit_bind().
696 * The function builds xe_vm_pgtable_update structures for already existing
697 * shared page-tables, and non-existing shared and non-shared page-tables
698 * are built and populated directly.
699 *
700 * Return 0 on success, negative error code on error.
701 */
702 static int
xe_pt_stage_bind(struct xe_tile * tile,struct xe_vma * vma,struct xe_svm_range * range,struct xe_vm_pgtable_update * entries,u32 * num_entries,bool clear_pt)703 xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
704 struct xe_svm_range *range,
705 struct xe_vm_pgtable_update *entries,
706 u32 *num_entries, bool clear_pt)
707 {
708 struct xe_device *xe = tile_to_xe(tile);
709 struct xe_bo *bo = xe_vma_bo(vma);
710 struct xe_res_cursor curs;
711 struct xe_vm *vm = xe_vma_vm(vma);
712 struct xe_pt_stage_bind_walk xe_walk = {
713 .base = {
714 .ops = &xe_pt_stage_bind_ops,
715 .shifts = xe_normal_pt_shifts,
716 .max_level = XE_PT_HIGHEST_LEVEL,
717 .staging = true,
718 },
719 .vm = vm,
720 .tile = tile,
721 .curs = &curs,
722 .va_curs_start = range ? xe_svm_range_start(range) :
723 xe_vma_start(vma),
724 .vma = vma,
725 .wupd.entries = entries,
726 .clear_pt = clear_pt,
727 };
728 struct xe_pt *pt = vm->pt_root[tile->id];
729 int ret;
730 bool is_purged = false;
731
732 /*
733 * Check if BO is purged:
734 * - Scratch VMs: Use scratch PTEs (XE_PTE_NULL) for safe zero reads
735 * - Non-scratch VMs: Clear PTEs to zero (non-present) to avoid mapping to phys addr 0
736 *
737 * For non-scratch VMs, we force clear_pt=true so leaf PTEs become completely
738 * zero instead of creating a PRESENT mapping to physical address 0.
739 */
740 if (bo && xe_bo_is_purged(bo)) {
741 is_purged = true;
742
743 /*
744 * For non-scratch VMs, a NULL rebind should use zero PTEs
745 * (non-present), not a present PTE to phys 0.
746 */
747 if (!xe_vm_has_scratch(vm))
748 xe_walk.clear_pt = true;
749 }
750
751 if (range) {
752 /* Move this entire thing to xe_svm.c? */
753 xe_svm_notifier_lock(vm);
754 if (!xe_svm_range_pages_valid(range)) {
755 xe_svm_range_debug(range, "BIND PREPARE - RETRY");
756 xe_svm_notifier_unlock(vm);
757 return -EAGAIN;
758 }
759 if (xe_svm_range_has_dma_mapping(range)) {
760 xe_res_first_dma(range->base.pages.dma_addr, 0,
761 xe_svm_range_size(range),
762 &curs);
763 xe_svm_range_debug(range, "BIND PREPARE - MIXED");
764 } else {
765 xe_assert(xe, false);
766 }
767 /*
768 * Note, when unlocking the resource cursor dma addresses may become
769 * stale, but the bind will be aborted anyway at commit time.
770 */
771 xe_svm_notifier_unlock(vm);
772 }
773
774 xe_walk.needs_64K = (vm->flags & XE_VM_FLAG_64K);
775 if (clear_pt)
776 goto walk_pt;
777
778 if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
779 xe_walk.default_vram_pte = xe_atomic_for_vram(vm, vma) ? XE_USM_PPGTT_PTE_AE : 0;
780 xe_walk.default_system_pte = xe_atomic_for_system(vm, vma) ?
781 XE_USM_PPGTT_PTE_AE : 0;
782 }
783
784 xe_walk.default_vram_pte |= XE_PPGTT_PTE_DM;
785 xe_walk.dma_offset = (bo && !is_purged) ? vram_region_gpu_offset(bo->ttm.resource) : 0;
786 if (!range)
787 xe_bo_assert_held(bo);
788
789 if (!xe_vma_is_null(vma) && !range && !is_purged) {
790 if (xe_vma_is_userptr(vma))
791 xe_res_first_dma(to_userptr_vma(vma)->userptr.pages.dma_addr, 0,
792 xe_vma_size(vma), &curs);
793 else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
794 xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
795 xe_vma_size(vma), &curs);
796 else
797 xe_res_first_sg(xe_bo_sg(bo), xe_vma_bo_offset(vma),
798 xe_vma_size(vma), &curs);
799 } else if (!range) {
800 curs.size = xe_vma_size(vma);
801 }
802
803 walk_pt:
804 ret = xe_pt_walk_range(&pt->base, pt->level,
805 range ? xe_svm_range_start(range) : xe_vma_start(vma),
806 range ? xe_svm_range_end(range) : xe_vma_end(vma),
807 &xe_walk.base);
808
809 *num_entries = xe_walk.wupd.num_used_entries;
810 return ret;
811 }
812
813 /**
814 * xe_pt_nonshared_offsets() - Determine the non-shared entry offsets of a
815 * shared pagetable.
816 * @addr: The start address within the non-shared pagetable.
817 * @end: The end address within the non-shared pagetable.
818 * @level: The level of the non-shared pagetable.
819 * @walk: Walk info. The function adjusts the walk action.
820 * @action: next action to perform (see enum page_walk_action)
821 * @offset: Ignored on input, First non-shared entry on output.
822 * @end_offset: Ignored on input, Last non-shared entry + 1 on output.
823 *
824 * A non-shared page-table has some entries that belong to the address range
825 * and others that don't. This function determines the entries that belong
826 * fully to the address range. Depending on level, some entries may
827 * partially belong to the address range (that can't happen at level 0).
828 * The function detects that and adjust those offsets to not include those
829 * partial entries. Iff it does detect partial entries, we know that there must
830 * be shared page tables also at lower levels, so it adjusts the walk action
831 * accordingly.
832 *
833 * Return: true if there were non-shared entries, false otherwise.
834 */
xe_pt_nonshared_offsets(u64 addr,u64 end,unsigned int level,struct xe_pt_walk * walk,enum page_walk_action * action,pgoff_t * offset,pgoff_t * end_offset)835 static bool xe_pt_nonshared_offsets(u64 addr, u64 end, unsigned int level,
836 struct xe_pt_walk *walk,
837 enum page_walk_action *action,
838 pgoff_t *offset, pgoff_t *end_offset)
839 {
840 u64 size = 1ull << walk->shifts[level];
841
842 *offset = xe_pt_offset(addr, level, walk);
843 *end_offset = xe_pt_num_entries(addr, end, level, walk) + *offset;
844
845 if (!level)
846 return true;
847
848 /*
849 * If addr or next are not size aligned, there are shared pts at lower
850 * level, so in that case traverse down the subtree
851 */
852 *action = ACTION_CONTINUE;
853 if (!IS_ALIGNED(addr, size)) {
854 *action = ACTION_SUBTREE;
855 (*offset)++;
856 }
857
858 if (!IS_ALIGNED(end, size)) {
859 *action = ACTION_SUBTREE;
860 (*end_offset)--;
861 }
862
863 return *end_offset > *offset;
864 }
865
866 struct xe_pt_zap_ptes_walk {
867 /** @base: The walk base-class */
868 struct xe_pt_walk base;
869
870 /* Input parameters for the walk */
871 /** @tile: The tile we're building for */
872 struct xe_tile *tile;
873
874 /* Output */
875 /** @needs_invalidate: Whether we need to invalidate TLB*/
876 bool needs_invalidate;
877 };
878
xe_pt_zap_ptes_entry(struct xe_ptw * parent,pgoff_t offset,unsigned int level,u64 addr,u64 next,struct xe_ptw ** child,enum page_walk_action * action,struct xe_pt_walk * walk)879 static int xe_pt_zap_ptes_entry(struct xe_ptw *parent, pgoff_t offset,
880 unsigned int level, u64 addr, u64 next,
881 struct xe_ptw **child,
882 enum page_walk_action *action,
883 struct xe_pt_walk *walk)
884 {
885 struct xe_pt_zap_ptes_walk *xe_walk =
886 container_of(walk, typeof(*xe_walk), base);
887 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
888 pgoff_t end_offset;
889
890 XE_WARN_ON(!*child);
891 XE_WARN_ON(!level);
892
893 /*
894 * Note that we're called from an entry callback, and we're dealing
895 * with the child of that entry rather than the parent, so need to
896 * adjust level down.
897 */
898 if (xe_pt_nonshared_offsets(addr, next, --level, walk, action, &offset,
899 &end_offset)) {
900 xe_map_memset(tile_to_xe(xe_walk->tile), &xe_child->bo->vmap,
901 offset * sizeof(u64), 0,
902 (end_offset - offset) * sizeof(u64));
903 xe_walk->needs_invalidate = true;
904 }
905
906 return 0;
907 }
908
909 static const struct xe_pt_walk_ops xe_pt_zap_ptes_ops = {
910 .pt_entry = xe_pt_zap_ptes_entry,
911 };
912
913 /**
914 * xe_pt_zap_ptes() - Zap (zero) gpu ptes of an address range
915 * @tile: The tile we're zapping for.
916 * @vma: GPU VMA detailing address range.
917 *
918 * Eviction and Userptr invalidation needs to be able to zap the
919 * gpu ptes of a given address range in pagefaulting mode.
920 * In order to be able to do that, that function needs access to the shared
921 * page-table entrieaso it can either clear the leaf PTEs or
922 * clear the pointers to lower-level page-tables. The caller is required
923 * to hold the necessary locks to ensure neither the page-table connectivity
924 * nor the page-table entries of the range is updated from under us.
925 *
926 * Return: Whether ptes were actually updated and a TLB invalidation is
927 * required.
928 */
xe_pt_zap_ptes(struct xe_tile * tile,struct xe_vma * vma)929 bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
930 {
931 struct xe_pt_zap_ptes_walk xe_walk = {
932 .base = {
933 .ops = &xe_pt_zap_ptes_ops,
934 .shifts = xe_normal_pt_shifts,
935 .max_level = XE_PT_HIGHEST_LEVEL,
936 },
937 .tile = tile,
938 };
939 struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
940 u8 pt_mask = (vma->tile_present & ~vma->tile_invalidated);
941
942 if (xe_vma_bo(vma))
943 xe_bo_assert_held(xe_vma_bo(vma));
944 else if (xe_vma_is_userptr(vma))
945 lockdep_assert_held(&xe_vma_vm(vma)->svm.gpusvm.notifier_lock);
946
947 if (!(pt_mask & BIT(tile->id)))
948 return false;
949
950 (void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma),
951 xe_vma_end(vma), &xe_walk.base);
952
953 return xe_walk.needs_invalidate;
954 }
955
956 /**
957 * xe_pt_zap_ptes_range() - Zap (zero) gpu ptes of a SVM range
958 * @tile: The tile we're zapping for.
959 * @vm: The VM we're zapping for.
960 * @range: The SVM range we're zapping for.
961 *
962 * SVM invalidation needs to be able to zap the gpu ptes of a given address
963 * range. In order to be able to do that, that function needs access to the
964 * shared page-table entries so it can either clear the leaf PTEs or
965 * clear the pointers to lower-level page-tables. The caller is required
966 * to hold the SVM notifier lock.
967 *
968 * Return: Whether ptes were actually updated and a TLB invalidation is
969 * required.
970 */
xe_pt_zap_ptes_range(struct xe_tile * tile,struct xe_vm * vm,struct xe_svm_range * range)971 bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
972 struct xe_svm_range *range)
973 {
974 struct xe_pt_zap_ptes_walk xe_walk = {
975 .base = {
976 .ops = &xe_pt_zap_ptes_ops,
977 .shifts = xe_normal_pt_shifts,
978 .max_level = XE_PT_HIGHEST_LEVEL,
979 },
980 .tile = tile,
981 };
982 struct xe_pt *pt = vm->pt_root[tile->id];
983 u8 pt_mask = (range->tile_present & ~range->tile_invalidated);
984
985 /*
986 * Locking rules:
987 *
988 * - notifier_lock (write): full protection against page table changes
989 * and MMU notifier invalidations.
990 *
991 * - notifier_lock (read) + vm_lock (write): combined protection against
992 * invalidations and concurrent page table modifications. (e.g., madvise)
993 *
994 */
995 lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 0) ||
996 (lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
997 lockdep_is_held_type(&vm->lock, 0)));
998
999 if (!(pt_mask & BIT(tile->id)))
1000 return false;
1001
1002 (void)xe_pt_walk_shared(&pt->base, pt->level, xe_svm_range_start(range),
1003 xe_svm_range_end(range), &xe_walk.base);
1004
1005 return xe_walk.needs_invalidate;
1006 }
1007
1008 static void
xe_vm_populate_pgtable(struct xe_migrate_pt_update * pt_update,struct xe_tile * tile,struct iosys_map * map,void * data,u32 qword_ofs,u32 num_qwords,const struct xe_vm_pgtable_update * update)1009 xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *tile,
1010 struct iosys_map *map, void *data,
1011 u32 qword_ofs, u32 num_qwords,
1012 const struct xe_vm_pgtable_update *update)
1013 {
1014 struct xe_pt_entry *ptes = update->pt_entries;
1015 u64 *ptr = data;
1016 u32 i;
1017
1018 for (i = 0; i < num_qwords; i++) {
1019 if (map)
1020 xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
1021 sizeof(u64), u64, ptes[i].pte);
1022 else
1023 ptr[i] = ptes[i].pte;
1024 }
1025 }
1026
xe_pt_cancel_bind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries)1027 static void xe_pt_cancel_bind(struct xe_vma *vma,
1028 struct xe_vm_pgtable_update *entries,
1029 u32 num_entries)
1030 {
1031 u32 i, j;
1032
1033 for (i = 0; i < num_entries; i++) {
1034 struct xe_pt *pt = entries[i].pt;
1035
1036 if (!pt)
1037 continue;
1038
1039 if (pt->level) {
1040 for (j = 0; j < entries[i].qwords; j++)
1041 xe_pt_destroy(entries[i].pt_entries[j].pt,
1042 xe_vma_vm(vma)->flags, NULL);
1043 }
1044
1045 kfree(entries[i].pt_entries);
1046 entries[i].pt_entries = NULL;
1047 entries[i].qwords = 0;
1048 }
1049 }
1050
1051 #define XE_INVALID_VMA ((struct xe_vma *)(0xdeaddeadull))
1052
xe_pt_commit_prepare_locks_assert(struct xe_vma * vma)1053 static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma)
1054 {
1055 struct xe_vm *vm;
1056
1057 if (vma == XE_INVALID_VMA)
1058 return;
1059
1060 vm = xe_vma_vm(vma);
1061 lockdep_assert_held(&vm->lock);
1062
1063 if (!xe_vma_has_no_bo(vma))
1064 dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv);
1065
1066 xe_vm_assert_held(vm);
1067 }
1068
xe_pt_commit_locks_assert(struct xe_vma * vma)1069 static void xe_pt_commit_locks_assert(struct xe_vma *vma)
1070 {
1071 struct xe_vm *vm;
1072
1073 if (vma == XE_INVALID_VMA)
1074 return;
1075
1076 vm = xe_vma_vm(vma);
1077 xe_pt_commit_prepare_locks_assert(vma);
1078
1079 if (xe_vma_is_userptr(vma))
1080 xe_svm_assert_held_read(vm);
1081 }
1082
xe_pt_commit(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries,struct llist_head * deferred)1083 static void xe_pt_commit(struct xe_vma *vma,
1084 struct xe_vm_pgtable_update *entries,
1085 u32 num_entries, struct llist_head *deferred)
1086 {
1087 u32 i, j;
1088
1089 xe_pt_commit_locks_assert(vma);
1090
1091 for (i = 0; i < num_entries; i++) {
1092 struct xe_pt *pt = entries[i].pt;
1093 struct xe_pt_dir *pt_dir;
1094
1095 if (!pt->level)
1096 continue;
1097
1098 pt_dir = as_xe_pt_dir(pt);
1099 for (j = 0; j < entries[i].qwords; j++) {
1100 struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
1101 int j_ = j + entries[i].ofs;
1102
1103 pt_dir->children[j_] = pt_dir->staging[j_];
1104 xe_pt_destroy(oldpte, (vma == XE_INVALID_VMA) ? 0 :
1105 xe_vma_vm(vma)->flags, deferred);
1106 }
1107 }
1108 }
1109
xe_pt_abort_bind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries,bool rebind)1110 static void xe_pt_abort_bind(struct xe_vma *vma,
1111 struct xe_vm_pgtable_update *entries,
1112 u32 num_entries, bool rebind)
1113 {
1114 int i, j;
1115
1116 xe_pt_commit_prepare_locks_assert(vma);
1117
1118 for (i = num_entries - 1; i >= 0; --i) {
1119 struct xe_pt *pt = entries[i].pt;
1120 struct xe_pt_dir *pt_dir;
1121
1122 if (!rebind)
1123 pt->num_live -= entries[i].qwords;
1124
1125 if (!pt->level)
1126 continue;
1127
1128 pt_dir = as_xe_pt_dir(pt);
1129 for (j = 0; j < entries[i].qwords; j++) {
1130 u32 j_ = j + entries[i].ofs;
1131 struct xe_pt *newpte = xe_pt_entry_staging(pt_dir, j_);
1132 struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
1133
1134 pt_dir->staging[j_] = oldpte ? &oldpte->base : 0;
1135 xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL);
1136 }
1137 }
1138 }
1139
xe_pt_commit_prepare_bind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries,bool rebind)1140 static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
1141 struct xe_vm_pgtable_update *entries,
1142 u32 num_entries, bool rebind)
1143 {
1144 u32 i, j;
1145
1146 xe_pt_commit_prepare_locks_assert(vma);
1147
1148 for (i = 0; i < num_entries; i++) {
1149 struct xe_pt *pt = entries[i].pt;
1150 struct xe_pt_dir *pt_dir;
1151
1152 if (!rebind)
1153 pt->num_live += entries[i].qwords;
1154
1155 if (!pt->level)
1156 continue;
1157
1158 pt_dir = as_xe_pt_dir(pt);
1159 for (j = 0; j < entries[i].qwords; j++) {
1160 u32 j_ = j + entries[i].ofs;
1161 struct xe_pt *newpte = entries[i].pt_entries[j].pt;
1162 struct xe_pt *oldpte = NULL;
1163
1164 if (xe_pt_entry_staging(pt_dir, j_))
1165 oldpte = xe_pt_entry_staging(pt_dir, j_);
1166
1167 pt_dir->staging[j_] = &newpte->base;
1168 entries[i].pt_entries[j].pt = oldpte;
1169 }
1170 }
1171 }
1172
xe_pt_free_bind(struct xe_vm_pgtable_update * entries,u32 num_entries)1173 static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries,
1174 u32 num_entries)
1175 {
1176 u32 i;
1177
1178 for (i = 0; i < num_entries; i++)
1179 kfree(entries[i].pt_entries);
1180 }
1181
1182 static int
xe_pt_prepare_bind(struct xe_tile * tile,struct xe_vma * vma,struct xe_svm_range * range,struct xe_vm_pgtable_update * entries,u32 * num_entries,bool invalidate_on_bind)1183 xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
1184 struct xe_svm_range *range,
1185 struct xe_vm_pgtable_update *entries,
1186 u32 *num_entries, bool invalidate_on_bind)
1187 {
1188 int err;
1189
1190 *num_entries = 0;
1191 err = xe_pt_stage_bind(tile, vma, range, entries, num_entries,
1192 invalidate_on_bind);
1193 if (!err)
1194 xe_tile_assert(tile, *num_entries);
1195
1196 return err;
1197 }
1198
xe_vm_dbg_print_entries(struct xe_device * xe,const struct xe_vm_pgtable_update * entries,unsigned int num_entries,bool bind)1199 static void xe_vm_dbg_print_entries(struct xe_device *xe,
1200 const struct xe_vm_pgtable_update *entries,
1201 unsigned int num_entries, bool bind)
1202 #if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM))
1203 {
1204 unsigned int i;
1205
1206 vm_dbg(&xe->drm, "%s: %u entries to update\n", bind ? "bind" : "unbind",
1207 num_entries);
1208 for (i = 0; i < num_entries; i++) {
1209 const struct xe_vm_pgtable_update *entry = &entries[i];
1210 struct xe_pt *xe_pt = entry->pt;
1211 u64 page_size = 1ull << xe_pt_shift(xe_pt->level);
1212 u64 end;
1213 u64 start;
1214
1215 xe_assert(xe, !entry->pt->is_compact);
1216 start = entry->ofs * page_size;
1217 end = start + page_size * entry->qwords;
1218 vm_dbg(&xe->drm,
1219 "\t%u: Update level %u at (%u + %u) [%llx...%llx) f:%x\n",
1220 i, xe_pt->level, entry->ofs, entry->qwords,
1221 xe_pt_addr(xe_pt) + start, xe_pt_addr(xe_pt) + end, 0);
1222 }
1223 }
1224 #else
1225 {}
1226 #endif
1227
no_in_syncs(struct xe_sync_entry * syncs,u32 num_syncs)1228 static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs)
1229 {
1230 int i;
1231
1232 for (i = 0; i < num_syncs; i++) {
1233 struct dma_fence *fence = syncs[i].fence;
1234
1235 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1236 &fence->flags))
1237 return false;
1238 }
1239
1240 return true;
1241 }
1242
job_test_add_deps(struct xe_sched_job * job,struct dma_resv * resv,enum dma_resv_usage usage)1243 static int job_test_add_deps(struct xe_sched_job *job,
1244 struct dma_resv *resv,
1245 enum dma_resv_usage usage)
1246 {
1247 if (!job) {
1248 if (!dma_resv_test_signaled(resv, usage))
1249 return -ETIME;
1250
1251 return 0;
1252 }
1253
1254 return xe_sched_job_add_deps(job, resv, usage);
1255 }
1256
vma_add_deps(struct xe_vma * vma,struct xe_sched_job * job)1257 static int vma_add_deps(struct xe_vma *vma, struct xe_sched_job *job)
1258 {
1259 struct xe_bo *bo = xe_vma_bo(vma);
1260
1261 xe_bo_assert_held(bo);
1262
1263 if (bo && !bo->vm)
1264 return job_test_add_deps(job, bo->ttm.base.resv,
1265 DMA_RESV_USAGE_KERNEL);
1266
1267 return 0;
1268 }
1269
op_add_deps(struct xe_vm * vm,struct xe_vma_op * op,struct xe_sched_job * job)1270 static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
1271 struct xe_sched_job *job)
1272 {
1273 int err = 0;
1274
1275 /*
1276 * No need to check for is_cpu_addr_mirror here as vma_add_deps is a
1277 * NOP if VMA is_cpu_addr_mirror
1278 */
1279
1280 switch (op->base.op) {
1281 case DRM_GPUVA_OP_MAP:
1282 if (!op->map.immediate && xe_vm_in_fault_mode(vm))
1283 break;
1284
1285 err = vma_add_deps(op->map.vma, job);
1286 break;
1287 case DRM_GPUVA_OP_REMAP:
1288 if (op->remap.prev)
1289 err = vma_add_deps(op->remap.prev, job);
1290 if (!err && op->remap.next)
1291 err = vma_add_deps(op->remap.next, job);
1292 break;
1293 case DRM_GPUVA_OP_UNMAP:
1294 break;
1295 case DRM_GPUVA_OP_PREFETCH:
1296 err = vma_add_deps(gpuva_to_vma(op->base.prefetch.va), job);
1297 break;
1298 case DRM_GPUVA_OP_DRIVER:
1299 break;
1300 default:
1301 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
1302 }
1303
1304 return err;
1305 }
1306
xe_pt_vm_dependencies(struct xe_sched_job * job,struct xe_tlb_inval_job * ijob,struct xe_tlb_inval_job * mjob,struct xe_vm * vm,struct xe_vma_ops * vops,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_range_fence_tree * rftree)1307 static int xe_pt_vm_dependencies(struct xe_sched_job *job,
1308 struct xe_tlb_inval_job *ijob,
1309 struct xe_tlb_inval_job *mjob,
1310 struct xe_vm *vm,
1311 struct xe_vma_ops *vops,
1312 struct xe_vm_pgtable_update_ops *pt_update_ops,
1313 struct xe_range_fence_tree *rftree)
1314 {
1315 struct xe_range_fence *rtfence;
1316 struct dma_fence *fence;
1317 struct xe_vma_op *op;
1318 int err = 0, i;
1319
1320 xe_vm_assert_held(vm);
1321
1322 if (!job && !no_in_syncs(vops->syncs, vops->num_syncs))
1323 return -ETIME;
1324
1325 if (!job && !xe_exec_queue_is_idle(pt_update_ops->q))
1326 return -ETIME;
1327
1328 if (pt_update_ops->wait_vm_bookkeep || pt_update_ops->wait_vm_kernel) {
1329 err = job_test_add_deps(job, xe_vm_resv(vm),
1330 pt_update_ops->wait_vm_bookkeep ?
1331 DMA_RESV_USAGE_BOOKKEEP :
1332 DMA_RESV_USAGE_KERNEL);
1333 if (err)
1334 return err;
1335 }
1336
1337 rtfence = xe_range_fence_tree_first(rftree, pt_update_ops->start,
1338 pt_update_ops->last);
1339 while (rtfence) {
1340 fence = rtfence->fence;
1341
1342 if (!dma_fence_is_signaled(fence)) {
1343 /*
1344 * Is this a CPU update? GPU is busy updating, so return
1345 * an error
1346 */
1347 if (!job)
1348 return -ETIME;
1349
1350 dma_fence_get(fence);
1351 err = drm_sched_job_add_dependency(&job->drm, fence);
1352 if (err)
1353 return err;
1354 }
1355
1356 rtfence = xe_range_fence_tree_next(rtfence,
1357 pt_update_ops->start,
1358 pt_update_ops->last);
1359 }
1360
1361 list_for_each_entry(op, &vops->list, link) {
1362 err = op_add_deps(vm, op, job);
1363 if (err)
1364 return err;
1365 }
1366
1367 for (i = 0; job && !err && i < vops->num_syncs; i++)
1368 err = xe_sync_entry_add_deps(&vops->syncs[i], job);
1369
1370 if (job) {
1371 if (ijob) {
1372 err = xe_tlb_inval_job_alloc_dep(ijob);
1373 if (err)
1374 return err;
1375 }
1376
1377 if (mjob) {
1378 err = xe_tlb_inval_job_alloc_dep(mjob);
1379 if (err)
1380 return err;
1381 }
1382 }
1383
1384 return err;
1385 }
1386
xe_pt_pre_commit(struct xe_migrate_pt_update * pt_update)1387 static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update)
1388 {
1389 struct xe_vma_ops *vops = pt_update->vops;
1390 struct xe_vm *vm = vops->vm;
1391 struct xe_range_fence_tree *rftree = &vm->rftree[pt_update->tile_id];
1392 struct xe_vm_pgtable_update_ops *pt_update_ops =
1393 &vops->pt_update_ops[pt_update->tile_id];
1394
1395 return xe_pt_vm_dependencies(pt_update->job, pt_update->ijob,
1396 pt_update->mjob, vm, pt_update->vops,
1397 pt_update_ops, rftree);
1398 }
1399
1400 #if IS_ENABLED(CONFIG_DRM_GPUSVM)
1401 #ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
1402
xe_pt_userptr_inject_eagain(struct xe_userptr_vma * uvma)1403 static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
1404 {
1405 u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
1406 static u32 count;
1407
1408 if (count++ % divisor == divisor - 1) {
1409 uvma->userptr.divisor = divisor << 1;
1410 return true;
1411 }
1412
1413 return false;
1414 }
1415
1416 #else
1417
xe_pt_userptr_inject_eagain(struct xe_userptr_vma * uvma)1418 static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
1419 {
1420 return false;
1421 }
1422
1423 #endif
1424
vma_check_userptr(struct xe_vm * vm,struct xe_vma * vma,struct xe_vm_pgtable_update_ops * pt_update)1425 static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
1426 struct xe_vm_pgtable_update_ops *pt_update)
1427 {
1428 struct xe_userptr_vma *uvma;
1429 unsigned long notifier_seq;
1430
1431 xe_svm_assert_held_read(vm);
1432
1433 if (!xe_vma_is_userptr(vma))
1434 return 0;
1435
1436 uvma = to_userptr_vma(vma);
1437 if (xe_pt_userptr_inject_eagain(uvma))
1438 xe_vma_userptr_force_invalidate(uvma);
1439
1440 notifier_seq = uvma->userptr.pages.notifier_seq;
1441
1442 if (!mmu_interval_read_retry(&uvma->userptr.notifier,
1443 notifier_seq))
1444 return 0;
1445
1446 if (xe_vm_in_fault_mode(vm))
1447 return -EAGAIN;
1448
1449 /*
1450 * Just continue the operation since exec or rebind worker
1451 * will take care of rebinding.
1452 */
1453 return 0;
1454 }
1455
op_check_svm_userptr(struct xe_vm * vm,struct xe_vma_op * op,struct xe_vm_pgtable_update_ops * pt_update)1456 static int op_check_svm_userptr(struct xe_vm *vm, struct xe_vma_op *op,
1457 struct xe_vm_pgtable_update_ops *pt_update)
1458 {
1459 int err = 0;
1460
1461 xe_svm_assert_held_read(vm);
1462
1463 switch (op->base.op) {
1464 case DRM_GPUVA_OP_MAP:
1465 if (!op->map.immediate && xe_vm_in_fault_mode(vm))
1466 break;
1467
1468 err = vma_check_userptr(vm, op->map.vma, pt_update);
1469 break;
1470 case DRM_GPUVA_OP_REMAP:
1471 if (op->remap.prev && !op->remap.skip_prev)
1472 err = vma_check_userptr(vm, op->remap.prev, pt_update);
1473 if (!err && op->remap.next && !op->remap.skip_next)
1474 err = vma_check_userptr(vm, op->remap.next, pt_update);
1475 break;
1476 case DRM_GPUVA_OP_UNMAP:
1477 break;
1478 case DRM_GPUVA_OP_PREFETCH:
1479 if (xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))) {
1480 struct xe_svm_range *range = op->map_range.range;
1481 unsigned long i;
1482
1483 xe_assert(vm->xe,
1484 xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va)));
1485 xa_for_each(&op->prefetch_range.range, i, range) {
1486 xe_svm_range_debug(range, "PRE-COMMIT");
1487
1488 if (!xe_svm_range_pages_valid(range)) {
1489 xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
1490 return -ENODATA;
1491 }
1492 }
1493 } else {
1494 err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va), pt_update);
1495 }
1496 break;
1497 #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
1498 case DRM_GPUVA_OP_DRIVER:
1499 if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
1500 struct xe_svm_range *range = op->map_range.range;
1501
1502 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
1503
1504 xe_svm_range_debug(range, "PRE-COMMIT");
1505
1506 if (!xe_svm_range_pages_valid(range)) {
1507 xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
1508 return -EAGAIN;
1509 }
1510 }
1511 break;
1512 #endif
1513 default:
1514 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
1515 }
1516
1517 return err;
1518 }
1519
xe_pt_svm_userptr_pre_commit(struct xe_migrate_pt_update * pt_update)1520 static int xe_pt_svm_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
1521 {
1522 struct xe_vm *vm = pt_update->vops->vm;
1523 struct xe_vma_ops *vops = pt_update->vops;
1524 struct xe_vm_pgtable_update_ops *pt_update_ops =
1525 &vops->pt_update_ops[pt_update->tile_id];
1526 struct xe_vma_op *op;
1527 int err;
1528
1529 err = xe_pt_pre_commit(pt_update);
1530 if (err)
1531 return err;
1532
1533 xe_svm_notifier_lock(vm);
1534
1535 list_for_each_entry(op, &vops->list, link) {
1536 err = op_check_svm_userptr(vm, op, pt_update_ops);
1537 if (err) {
1538 xe_svm_notifier_unlock(vm);
1539 break;
1540 }
1541 }
1542
1543 return err;
1544 }
1545 #endif
1546
1547 struct xe_pt_stage_unbind_walk {
1548 /** @base: The pagewalk base-class. */
1549 struct xe_pt_walk base;
1550
1551 /* Input parameters for the walk */
1552 /** @tile: The tile we're unbinding from. */
1553 struct xe_tile *tile;
1554
1555 /**
1556 * @modified_start: Walk range start, modified to include any
1557 * shared pagetables that we're the only user of and can thus
1558 * treat as private.
1559 */
1560 u64 modified_start;
1561 /** @modified_end: Walk range start, modified like @modified_start. */
1562 u64 modified_end;
1563
1564 /** @prl: Backing pointer to page reclaim list in pt_update_ops */
1565 struct xe_page_reclaim_list *prl;
1566
1567 /* Output */
1568 /* @wupd: Structure to track the page-table updates we're building */
1569 struct xe_walk_update wupd;
1570 };
1571
1572 /*
1573 * Check whether this range is the only one populating this pagetable,
1574 * and in that case, update the walk range checks so that higher levels don't
1575 * view us as a shared pagetable.
1576 */
xe_pt_check_kill(u64 addr,u64 next,unsigned int level,const struct xe_pt * child,enum page_walk_action * action,struct xe_pt_walk * walk)1577 static bool xe_pt_check_kill(u64 addr, u64 next, unsigned int level,
1578 const struct xe_pt *child,
1579 enum page_walk_action *action,
1580 struct xe_pt_walk *walk)
1581 {
1582 struct xe_pt_stage_unbind_walk *xe_walk =
1583 container_of(walk, typeof(*xe_walk), base);
1584 unsigned int shift = walk->shifts[level];
1585 u64 size = 1ull << shift;
1586
1587 if (IS_ALIGNED(addr, size) && IS_ALIGNED(next, size) &&
1588 ((next - addr) >> shift) == child->num_live) {
1589 u64 size = 1ull << walk->shifts[level + 1];
1590
1591 *action = ACTION_CONTINUE;
1592
1593 if (xe_walk->modified_start >= addr)
1594 xe_walk->modified_start = round_down(addr, size);
1595 if (xe_walk->modified_end <= next)
1596 xe_walk->modified_end = round_up(next, size);
1597
1598 return true;
1599 }
1600
1601 return false;
1602 }
1603
1604 /* page_size = 2^(reclamation_size + XE_PTE_SHIFT) */
1605 #define COMPUTE_RECLAIM_ADDRESS_MASK(page_size) \
1606 ({ \
1607 BUILD_BUG_ON(!__builtin_constant_p(page_size)); \
1608 ilog2(page_size) - XE_PTE_SHIFT; \
1609 })
1610
generate_reclaim_entry(struct xe_tile * tile,struct xe_page_reclaim_list * prl,u64 pte,struct xe_pt * xe_child)1611 static int generate_reclaim_entry(struct xe_tile *tile,
1612 struct xe_page_reclaim_list *prl,
1613 u64 pte, struct xe_pt *xe_child)
1614 {
1615 struct xe_gt *gt = tile->primary_gt;
1616 struct xe_guc_page_reclaim_entry *reclaim_entries = prl->entries;
1617 u64 phys_addr = pte & XE_PTE_ADDR_MASK;
1618 u64 phys_page = phys_addr >> XE_PTE_SHIFT;
1619 int num_entries = prl->num_entries;
1620 u32 reclamation_size;
1621
1622 xe_tile_assert(tile, xe_child->level <= MAX_HUGEPTE_LEVEL);
1623 xe_tile_assert(tile, reclaim_entries);
1624 xe_tile_assert(tile, num_entries < XE_PAGE_RECLAIM_MAX_ENTRIES - 1);
1625
1626 if (!xe_page_reclaim_list_valid(prl))
1627 return -EINVAL;
1628
1629 /**
1630 * reclamation_size indicates the size of the page to be
1631 * invalidated and flushed from non-coherent cache.
1632 * Page size is computed as 2^(reclamation_size + XE_PTE_SHIFT) bytes.
1633 * Only 4K, 64K (level 0), and 2M pages are supported by hardware for page reclaim
1634 */
1635 if (xe_child->level == 0 && !(pte & XE_PTE_PS64)) {
1636 xe_gt_stats_incr(gt, XE_GT_STATS_ID_PRL_4K_ENTRY_COUNT, 1);
1637 reclamation_size = COMPUTE_RECLAIM_ADDRESS_MASK(SZ_4K); /* reclamation_size = 0 */
1638 xe_tile_assert(tile, phys_addr % SZ_4K == 0);
1639 } else if (xe_child->level == 0) {
1640 xe_gt_stats_incr(gt, XE_GT_STATS_ID_PRL_64K_ENTRY_COUNT, 1);
1641 reclamation_size = COMPUTE_RECLAIM_ADDRESS_MASK(SZ_64K); /* reclamation_size = 4 */
1642 xe_tile_assert(tile, phys_addr % SZ_64K == 0);
1643 } else if (xe_child->level == 1 && pte & XE_PDE_PS_2M) {
1644 xe_gt_stats_incr(gt, XE_GT_STATS_ID_PRL_2M_ENTRY_COUNT, 1);
1645 reclamation_size = COMPUTE_RECLAIM_ADDRESS_MASK(SZ_2M); /* reclamation_size = 9 */
1646 xe_tile_assert(tile, phys_addr % SZ_2M == 0);
1647 } else {
1648 xe_page_reclaim_list_abort(tile->primary_gt, prl,
1649 "unsupported PTE level=%u pte=%#llx",
1650 xe_child->level, pte);
1651 return -EINVAL;
1652 }
1653
1654 reclaim_entries[num_entries].qw =
1655 FIELD_PREP(XE_PAGE_RECLAIM_VALID, 1) |
1656 FIELD_PREP(XE_PAGE_RECLAIM_SIZE, reclamation_size) |
1657 FIELD_PREP(XE_PAGE_RECLAIM_ADDR_LO, phys_page) |
1658 FIELD_PREP(XE_PAGE_RECLAIM_ADDR_HI, phys_page >> 20);
1659 prl->num_entries++;
1660 vm_dbg(&tile_to_xe(tile)->drm,
1661 "PRL add entry: level=%u pte=%#llx reclamation_size=%u prl_idx=%d\n",
1662 xe_child->level, pte, reclamation_size, num_entries);
1663
1664 return 0;
1665 }
1666
xe_pt_stage_unbind_entry(struct xe_ptw * parent,pgoff_t offset,unsigned int level,u64 addr,u64 next,struct xe_ptw ** child,enum page_walk_action * action,struct xe_pt_walk * walk)1667 static int xe_pt_stage_unbind_entry(struct xe_ptw *parent, pgoff_t offset,
1668 unsigned int level, u64 addr, u64 next,
1669 struct xe_ptw **child,
1670 enum page_walk_action *action,
1671 struct xe_pt_walk *walk)
1672 {
1673 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
1674 struct xe_pt_stage_unbind_walk *xe_walk =
1675 container_of(walk, typeof(*xe_walk), base);
1676 struct xe_device *xe = tile_to_xe(xe_walk->tile);
1677 pgoff_t first = xe_pt_offset(addr, xe_child->level, walk);
1678 bool killed;
1679
1680 XE_WARN_ON(!*child);
1681 XE_WARN_ON(!level);
1682 /* Check for leaf node */
1683 if (xe_walk->prl && xe_page_reclaim_list_valid(xe_walk->prl) &&
1684 xe_child->level <= MAX_HUGEPTE_LEVEL) {
1685 struct iosys_map *leaf_map = &xe_child->bo->vmap;
1686 pgoff_t count = xe_pt_num_entries(addr, next, xe_child->level, walk);
1687
1688 for (pgoff_t i = 0; i < count; i++) {
1689 u64 pte;
1690 int ret;
1691
1692 /*
1693 * If not a leaf pt, skip unless non-leaf pt is interleaved between
1694 * leaf ptes which causes the page walk to skip over the child leaves
1695 */
1696 if (xe_child->base.children && xe_child->base.children[first + i]) {
1697 u64 pt_size = 1ULL << walk->shifts[xe_child->level];
1698 bool edge_pt = (i == 0 && !IS_ALIGNED(addr, pt_size)) ||
1699 (i == count - 1 && !IS_ALIGNED(next, pt_size));
1700
1701 if (!edge_pt) {
1702 xe_page_reclaim_list_abort(xe_walk->tile->primary_gt,
1703 xe_walk->prl,
1704 "PT is skipped by walk at level=%u offset=%lu",
1705 xe_child->level, first + i);
1706 break;
1707 }
1708 continue;
1709 }
1710
1711 pte = xe_map_rd(xe, leaf_map, (first + i) * sizeof(u64), u64);
1712
1713 /*
1714 * In rare scenarios, pte may not be written yet due to racy conditions.
1715 * In such cases, invalidate the PRL and fallback to full PPC invalidation.
1716 */
1717 if (!pte) {
1718 xe_page_reclaim_list_abort(xe_walk->tile->primary_gt, xe_walk->prl,
1719 "found zero pte at addr=%#llx", addr);
1720 break;
1721 }
1722
1723 /* Ensure it is a defined page */
1724 xe_tile_assert(xe_walk->tile, xe_child->level == 0 ||
1725 (pte & (XE_PDE_PS_2M | XE_PDPE_PS_1G)));
1726
1727 /* An entry should be added for 64KB but contigious 4K have XE_PTE_PS64 */
1728 if (pte & XE_PTE_PS64)
1729 i += 15; /* Skip other 15 consecutive 4K pages in the 64K page */
1730
1731 /* Account for NULL terminated entry on end (-1) */
1732 if (xe_walk->prl->num_entries < XE_PAGE_RECLAIM_MAX_ENTRIES - 1) {
1733 ret = generate_reclaim_entry(xe_walk->tile, xe_walk->prl,
1734 pte, xe_child);
1735 if (ret)
1736 break;
1737 } else {
1738 /* overflow, mark as invalid */
1739 xe_page_reclaim_list_abort(xe_walk->tile->primary_gt, xe_walk->prl,
1740 "overflow while adding pte=%#llx",
1741 pte);
1742 break;
1743 }
1744 }
1745 }
1746
1747 killed = xe_pt_check_kill(addr, next, level - 1, xe_child, action, walk);
1748
1749 /*
1750 * Verify if any PTE are potentially dropped at non-leaf levels, either from being
1751 * killed or the page walk covers the region.
1752 */
1753 if (xe_walk->prl && xe_page_reclaim_list_valid(xe_walk->prl) &&
1754 xe_child->level > MAX_HUGEPTE_LEVEL && xe_child->num_live) {
1755 bool covered = xe_pt_covers(addr, next, xe_child->level, &xe_walk->base);
1756
1757 /*
1758 * If aborting page walk early (kill) or page walk completes the full range
1759 * we need to invalidate the PRL.
1760 */
1761 if (killed || covered)
1762 xe_page_reclaim_list_abort(xe_walk->tile->primary_gt, xe_walk->prl,
1763 "kill at level=%u addr=%#llx next=%#llx num_live=%u",
1764 level, addr, next, xe_child->num_live);
1765 }
1766
1767 return 0;
1768 }
1769
1770 static int
xe_pt_stage_unbind_post_descend(struct xe_ptw * parent,pgoff_t offset,unsigned int level,u64 addr,u64 next,struct xe_ptw ** child,enum page_walk_action * action,struct xe_pt_walk * walk)1771 xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset,
1772 unsigned int level, u64 addr, u64 next,
1773 struct xe_ptw **child,
1774 enum page_walk_action *action,
1775 struct xe_pt_walk *walk)
1776 {
1777 struct xe_pt_stage_unbind_walk *xe_walk =
1778 container_of(walk, typeof(*xe_walk), base);
1779 struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
1780 pgoff_t end_offset;
1781 u64 size = 1ull << walk->shifts[--level];
1782 int err;
1783
1784 if (!IS_ALIGNED(addr, size))
1785 addr = xe_walk->modified_start;
1786 if (!IS_ALIGNED(next, size))
1787 next = xe_walk->modified_end;
1788
1789 /* Parent == *child is the root pt. Don't kill it. */
1790 if (parent != *child &&
1791 xe_pt_check_kill(addr, next, level, xe_child, action, walk))
1792 return 0;
1793
1794 if (!xe_pt_nonshared_offsets(addr, next, level, walk, action, &offset,
1795 &end_offset))
1796 return 0;
1797
1798 err = xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, true);
1799 if (err)
1800 return err;
1801
1802 xe_walk->wupd.updates[level].update->qwords = end_offset - offset;
1803
1804 return 0;
1805 }
1806
1807 static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = {
1808 .pt_entry = xe_pt_stage_unbind_entry,
1809 .pt_post_descend = xe_pt_stage_unbind_post_descend,
1810 };
1811
1812 /**
1813 * xe_pt_stage_unbind() - Build page-table update structures for an unbind
1814 * operation
1815 * @tile: The tile we're unbinding for.
1816 * @vm: The vm
1817 * @vma: The vma we're unbinding.
1818 * @range: The range we're unbinding.
1819 * @entries: Caller-provided storage for the update structures.
1820 *
1821 * Builds page-table update structures for an unbind operation. The function
1822 * will attempt to remove all page-tables that we're the only user
1823 * of, and for that to work, the unbind operation must be committed in the
1824 * same critical section that blocks racing binds to the same page-table tree.
1825 *
1826 * Return: The number of entries used.
1827 */
xe_pt_stage_unbind(struct xe_tile * tile,struct xe_vm * vm,struct xe_vma * vma,struct xe_svm_range * range,struct xe_vm_pgtable_update * entries)1828 static unsigned int xe_pt_stage_unbind(struct xe_tile *tile,
1829 struct xe_vm *vm,
1830 struct xe_vma *vma,
1831 struct xe_svm_range *range,
1832 struct xe_vm_pgtable_update *entries)
1833 {
1834 u64 start = range ? xe_svm_range_start(range) : xe_vma_start(vma);
1835 u64 end = range ? xe_svm_range_end(range) : xe_vma_end(vma);
1836 struct xe_vm_pgtable_update_op *pt_update_op =
1837 container_of(entries, struct xe_vm_pgtable_update_op, entries[0]);
1838 struct xe_pt_stage_unbind_walk xe_walk = {
1839 .base = {
1840 .ops = &xe_pt_stage_unbind_ops,
1841 .shifts = xe_normal_pt_shifts,
1842 .max_level = XE_PT_HIGHEST_LEVEL,
1843 .staging = true,
1844 },
1845 .tile = tile,
1846 .modified_start = start,
1847 .modified_end = end,
1848 .wupd.entries = entries,
1849 .prl = pt_update_op->prl,
1850 };
1851 struct xe_pt *pt = vm->pt_root[tile->id];
1852
1853 (void)xe_pt_walk_shared(&pt->base, pt->level, start, end,
1854 &xe_walk.base);
1855
1856 return xe_walk.wupd.num_used_entries;
1857 }
1858
1859 static void
xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update * pt_update,struct xe_tile * tile,struct iosys_map * map,void * ptr,u32 qword_ofs,u32 num_qwords,const struct xe_vm_pgtable_update * update)1860 xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
1861 struct xe_tile *tile, struct iosys_map *map,
1862 void *ptr, u32 qword_ofs, u32 num_qwords,
1863 const struct xe_vm_pgtable_update *update)
1864 {
1865 struct xe_vm *vm = pt_update->vops->vm;
1866 u64 empty = __xe_pt_empty_pte(tile, vm, update->pt->level);
1867 int i;
1868
1869 if (map && map->is_iomem)
1870 for (i = 0; i < num_qwords; ++i)
1871 xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
1872 sizeof(u64), u64, empty);
1873 else if (map)
1874 memset64(map->vaddr + qword_ofs * sizeof(u64), empty,
1875 num_qwords);
1876 else
1877 memset64(ptr, empty, num_qwords);
1878 }
1879
xe_pt_abort_unbind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries)1880 static void xe_pt_abort_unbind(struct xe_vma *vma,
1881 struct xe_vm_pgtable_update *entries,
1882 u32 num_entries)
1883 {
1884 int i, j;
1885
1886 xe_pt_commit_prepare_locks_assert(vma);
1887
1888 for (i = num_entries - 1; i >= 0; --i) {
1889 struct xe_vm_pgtable_update *entry = &entries[i];
1890 struct xe_pt *pt = entry->pt;
1891 struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
1892
1893 pt->num_live += entry->qwords;
1894
1895 if (!pt->level)
1896 continue;
1897
1898 for (j = entry->ofs; j < entry->ofs + entry->qwords; j++)
1899 pt_dir->staging[j] =
1900 entries[i].pt_entries[j - entry->ofs].pt ?
1901 &entries[i].pt_entries[j - entry->ofs].pt->base : NULL;
1902 }
1903 }
1904
1905 static void
xe_pt_commit_prepare_unbind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries)1906 xe_pt_commit_prepare_unbind(struct xe_vma *vma,
1907 struct xe_vm_pgtable_update *entries,
1908 u32 num_entries)
1909 {
1910 int i, j;
1911
1912 xe_pt_commit_prepare_locks_assert(vma);
1913
1914 for (i = 0; i < num_entries; ++i) {
1915 struct xe_vm_pgtable_update *entry = &entries[i];
1916 struct xe_pt *pt = entry->pt;
1917 struct xe_pt_dir *pt_dir;
1918
1919 pt->num_live -= entry->qwords;
1920 if (!pt->level)
1921 continue;
1922
1923 pt_dir = as_xe_pt_dir(pt);
1924 for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) {
1925 entry->pt_entries[j - entry->ofs].pt =
1926 xe_pt_entry_staging(pt_dir, j);
1927 pt_dir->staging[j] = NULL;
1928 }
1929 }
1930 }
1931
1932 static void
xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops * pt_update_ops,u64 start,u64 end)1933 xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
1934 u64 start, u64 end)
1935 {
1936 u64 last;
1937 u32 current_op = pt_update_ops->current_op;
1938 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
1939 int i, level = 0;
1940
1941 for (i = 0; i < pt_op->num_entries; i++) {
1942 const struct xe_vm_pgtable_update *entry = &pt_op->entries[i];
1943
1944 if (entry->pt->level > level)
1945 level = entry->pt->level;
1946 }
1947
1948 /* Greedy (non-optimal) calculation but simple */
1949 start = ALIGN_DOWN(start, 0x1ull << xe_pt_shift(level));
1950 last = ALIGN(end, 0x1ull << xe_pt_shift(level)) - 1;
1951
1952 if (start < pt_update_ops->start)
1953 pt_update_ops->start = start;
1954 if (last > pt_update_ops->last)
1955 pt_update_ops->last = last;
1956 }
1957
vma_reserve_fences(struct xe_device * xe,struct xe_vma * vma)1958 static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma)
1959 {
1960 int shift = xe_device_get_root_tile(xe)->media_gt ? 1 : 0;
1961
1962 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
1963 return dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv,
1964 xe->info.tile_count << shift);
1965
1966 return 0;
1967 }
1968
bind_op_prepare(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma * vma,bool invalidate_on_bind)1969 static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
1970 struct xe_vm_pgtable_update_ops *pt_update_ops,
1971 struct xe_vma *vma, bool invalidate_on_bind)
1972 {
1973 u32 current_op = pt_update_ops->current_op;
1974 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
1975 int err;
1976
1977 xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
1978 xe_bo_assert_held(xe_vma_bo(vma));
1979
1980 vm_dbg(&xe_vma_vm(vma)->xe->drm,
1981 "Preparing bind, with range [%llx...%llx)\n",
1982 xe_vma_start(vma), xe_vma_end(vma) - 1);
1983
1984 pt_op->vma = NULL;
1985 pt_op->bind = true;
1986 pt_op->rebind = BIT(tile->id) & vma->tile_present;
1987
1988 err = vma_reserve_fences(tile_to_xe(tile), vma);
1989 if (err)
1990 return err;
1991
1992 err = xe_pt_prepare_bind(tile, vma, NULL, pt_op->entries,
1993 &pt_op->num_entries, invalidate_on_bind);
1994 if (!err) {
1995 xe_tile_assert(tile, pt_op->num_entries <=
1996 ARRAY_SIZE(pt_op->entries));
1997 xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
1998 pt_op->num_entries, true);
1999
2000 xe_pt_update_ops_rfence_interval(pt_update_ops,
2001 xe_vma_start(vma),
2002 xe_vma_end(vma));
2003 ++pt_update_ops->current_op;
2004 pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma);
2005
2006 /*
2007 * If rebind, we have to invalidate TLB on !LR vms to invalidate
2008 * cached PTEs point to freed memory. On LR vms this is done
2009 * automatically when the context is re-enabled by the rebind worker,
2010 * or in fault mode it was invalidated on PTE zapping.
2011 *
2012 * If !rebind, and scratch enabled VMs, there is a chance the scratch
2013 * PTE is already cached in the TLB so it needs to be invalidated.
2014 * On !LR VMs this is done in the ring ops preceding a batch, but on
2015 * LR, in particular on user-space batch buffer chaining, it needs to
2016 * be done here.
2017 */
2018 if ((!pt_op->rebind && xe_vm_has_scratch(vm) &&
2019 xe_vm_in_lr_mode(vm)))
2020 pt_update_ops->needs_invalidation = true;
2021 else if (pt_op->rebind && !xe_vm_in_lr_mode(vm))
2022 /* We bump also if batch_invalidate_tlb is true */
2023 vm->tlb_flush_seqno++;
2024
2025 vma->tile_staged |= BIT(tile->id);
2026 pt_op->vma = vma;
2027 xe_pt_commit_prepare_bind(vma, pt_op->entries,
2028 pt_op->num_entries, pt_op->rebind);
2029 } else {
2030 xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries);
2031 }
2032
2033 return err;
2034 }
2035
bind_range_prepare(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma * vma,struct xe_svm_range * range)2036 static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
2037 struct xe_vm_pgtable_update_ops *pt_update_ops,
2038 struct xe_vma *vma, struct xe_svm_range *range)
2039 {
2040 u32 current_op = pt_update_ops->current_op;
2041 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
2042 int err;
2043
2044 xe_tile_assert(tile, xe_vma_is_cpu_addr_mirror(vma));
2045
2046 vm_dbg(&xe_vma_vm(vma)->xe->drm,
2047 "Preparing bind, with range [%lx...%lx)\n",
2048 xe_svm_range_start(range), xe_svm_range_end(range) - 1);
2049
2050 pt_op->vma = NULL;
2051 pt_op->bind = true;
2052 pt_op->rebind = BIT(tile->id) & range->tile_present;
2053
2054 err = xe_pt_prepare_bind(tile, vma, range, pt_op->entries,
2055 &pt_op->num_entries, false);
2056 if (!err) {
2057 xe_tile_assert(tile, pt_op->num_entries <=
2058 ARRAY_SIZE(pt_op->entries));
2059 xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
2060 pt_op->num_entries, true);
2061
2062 xe_pt_update_ops_rfence_interval(pt_update_ops,
2063 xe_svm_range_start(range),
2064 xe_svm_range_end(range));
2065 ++pt_update_ops->current_op;
2066 pt_update_ops->needs_svm_lock = true;
2067
2068 pt_op->vma = vma;
2069 xe_pt_commit_prepare_bind(vma, pt_op->entries,
2070 pt_op->num_entries, pt_op->rebind);
2071 } else {
2072 xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries);
2073 }
2074
2075 return err;
2076 }
2077
unbind_op_prepare(struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma * vma)2078 static int unbind_op_prepare(struct xe_tile *tile,
2079 struct xe_vm_pgtable_update_ops *pt_update_ops,
2080 struct xe_vma *vma)
2081 {
2082 struct xe_device *xe = tile_to_xe(tile);
2083 u32 current_op = pt_update_ops->current_op;
2084 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
2085 int err;
2086
2087 if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id)))
2088 return 0;
2089
2090 xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
2091 xe_bo_assert_held(xe_vma_bo(vma));
2092
2093 vm_dbg(&xe_vma_vm(vma)->xe->drm,
2094 "Preparing unbind, with range [%llx...%llx)\n",
2095 xe_vma_start(vma), xe_vma_end(vma) - 1);
2096
2097 pt_op->vma = vma;
2098 pt_op->bind = false;
2099 pt_op->rebind = false;
2100 /*
2101 * Maintain one PRL located in pt_update_ops that all others in unbind op reference.
2102 * Ensure that PRL is allocated only once, and if invalidated, remains an invalidated PRL.
2103 */
2104 if (xe->info.has_page_reclaim_hw_assist &&
2105 xe_page_reclaim_list_is_new(&pt_update_ops->prl))
2106 xe_page_reclaim_list_alloc_entries(&pt_update_ops->prl);
2107
2108 /* Page reclaim may not be needed due to other features, so skip the corresponding VMA */
2109 pt_op->prl = (xe_page_reclaim_list_valid(&pt_update_ops->prl) &&
2110 !xe_page_reclaim_skip(tile, vma)) ? &pt_update_ops->prl : NULL;
2111
2112 err = vma_reserve_fences(tile_to_xe(tile), vma);
2113 if (err)
2114 return err;
2115
2116 pt_op->num_entries = xe_pt_stage_unbind(tile, xe_vma_vm(vma),
2117 vma, NULL, pt_op->entries);
2118
2119 xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
2120 pt_op->num_entries, false);
2121 xe_pt_update_ops_rfence_interval(pt_update_ops, xe_vma_start(vma),
2122 xe_vma_end(vma));
2123 ++pt_update_ops->current_op;
2124 pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma);
2125 pt_update_ops->needs_invalidation = true;
2126
2127 xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries);
2128
2129 return 0;
2130 }
2131
2132 static bool
xe_pt_op_check_range_skip_invalidation(struct xe_vm_pgtable_update_op * pt_op,struct xe_svm_range * range)2133 xe_pt_op_check_range_skip_invalidation(struct xe_vm_pgtable_update_op *pt_op,
2134 struct xe_svm_range *range)
2135 {
2136 struct xe_vm_pgtable_update *update = pt_op->entries;
2137
2138 XE_WARN_ON(!pt_op->num_entries);
2139
2140 /*
2141 * We can't skip the invalidation if we are removing PTEs that span more
2142 * than the range, do some checks to ensure we are removing PTEs that
2143 * are invalid.
2144 */
2145
2146 if (pt_op->num_entries > 1)
2147 return false;
2148
2149 if (update->pt->level == 0)
2150 return true;
2151
2152 if (update->pt->level == 1)
2153 return xe_svm_range_size(range) >= SZ_2M;
2154
2155 return false;
2156 }
2157
unbind_range_prepare(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_svm_range * range)2158 static int unbind_range_prepare(struct xe_vm *vm,
2159 struct xe_tile *tile,
2160 struct xe_vm_pgtable_update_ops *pt_update_ops,
2161 struct xe_svm_range *range)
2162 {
2163 u32 current_op = pt_update_ops->current_op;
2164 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
2165
2166 if (!(range->tile_present & BIT(tile->id)))
2167 return 0;
2168
2169 vm_dbg(&vm->xe->drm,
2170 "Preparing unbind, with range [%lx...%lx)\n",
2171 xe_svm_range_start(range), xe_svm_range_end(range) - 1);
2172
2173 pt_op->vma = XE_INVALID_VMA;
2174 pt_op->bind = false;
2175 pt_op->rebind = false;
2176 pt_op->prl = NULL;
2177
2178 pt_op->num_entries = xe_pt_stage_unbind(tile, vm, NULL, range,
2179 pt_op->entries);
2180
2181 xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
2182 pt_op->num_entries, false);
2183 xe_pt_update_ops_rfence_interval(pt_update_ops, xe_svm_range_start(range),
2184 xe_svm_range_end(range));
2185 ++pt_update_ops->current_op;
2186 pt_update_ops->needs_svm_lock = true;
2187 pt_update_ops->needs_invalidation |= xe_vm_has_scratch(vm) ||
2188 xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
2189 range->tile_invalidated) ||
2190 !xe_pt_op_check_range_skip_invalidation(pt_op, range);
2191
2192 xe_pt_commit_prepare_unbind(XE_INVALID_VMA, pt_op->entries,
2193 pt_op->num_entries);
2194
2195 return 0;
2196 }
2197
op_prepare(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma_op * op)2198 static int op_prepare(struct xe_vm *vm,
2199 struct xe_tile *tile,
2200 struct xe_vm_pgtable_update_ops *pt_update_ops,
2201 struct xe_vma_op *op)
2202 {
2203 int err = 0;
2204
2205 xe_vm_assert_held(vm);
2206
2207 switch (op->base.op) {
2208 case DRM_GPUVA_OP_MAP:
2209 if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
2210 !op->map.invalidate_on_bind) ||
2211 (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
2212 break;
2213
2214 err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
2215 op->map.invalidate_on_bind);
2216 pt_update_ops->wait_vm_kernel = true;
2217 break;
2218 case DRM_GPUVA_OP_REMAP:
2219 {
2220 struct xe_vma *old = gpuva_to_vma(op->base.remap.unmap->va);
2221
2222 if (xe_vma_is_cpu_addr_mirror(old))
2223 break;
2224
2225 err = unbind_op_prepare(tile, pt_update_ops, old);
2226
2227 if (!err && op->remap.prev && !op->remap.skip_prev) {
2228 err = bind_op_prepare(vm, tile, pt_update_ops,
2229 op->remap.prev, false);
2230 pt_update_ops->wait_vm_bookkeep = true;
2231 }
2232 if (!err && op->remap.next && !op->remap.skip_next) {
2233 err = bind_op_prepare(vm, tile, pt_update_ops,
2234 op->remap.next, false);
2235 pt_update_ops->wait_vm_bookkeep = true;
2236 }
2237 break;
2238 }
2239 case DRM_GPUVA_OP_UNMAP:
2240 {
2241 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2242
2243 if (xe_vma_is_cpu_addr_mirror(vma))
2244 break;
2245
2246 err = unbind_op_prepare(tile, pt_update_ops, vma);
2247 break;
2248 }
2249 case DRM_GPUVA_OP_PREFETCH:
2250 {
2251 struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
2252
2253 if (xe_vma_is_cpu_addr_mirror(vma)) {
2254 struct xe_svm_range *range;
2255 unsigned long i;
2256
2257 xa_for_each(&op->prefetch_range.range, i, range) {
2258 err = bind_range_prepare(vm, tile, pt_update_ops,
2259 vma, range);
2260 if (err)
2261 return err;
2262 }
2263 } else {
2264 err = bind_op_prepare(vm, tile, pt_update_ops, vma, false);
2265 pt_update_ops->wait_vm_kernel = true;
2266 }
2267 break;
2268 }
2269 case DRM_GPUVA_OP_DRIVER:
2270 if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
2271 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
2272
2273 err = bind_range_prepare(vm, tile, pt_update_ops,
2274 op->map_range.vma,
2275 op->map_range.range);
2276 } else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
2277 err = unbind_range_prepare(vm, tile, pt_update_ops,
2278 op->unmap_range.range);
2279 }
2280 break;
2281 default:
2282 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2283 }
2284
2285 return err;
2286 }
2287
2288 static void
xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops * pt_update_ops)2289 xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops *pt_update_ops)
2290 {
2291 init_llist_head(&pt_update_ops->deferred);
2292 pt_update_ops->start = ~0x0ull;
2293 pt_update_ops->last = 0x0ull;
2294 xe_page_reclaim_list_init(&pt_update_ops->prl);
2295 }
2296
2297 /**
2298 * xe_pt_update_ops_prepare() - Prepare PT update operations
2299 * @tile: Tile of PT update operations
2300 * @vops: VMA operationa
2301 *
2302 * Prepare PT update operations which includes updating internal PT state,
2303 * allocate memory for page tables, populate page table being pruned in, and
2304 * create PT update operations for leaf insertion / removal.
2305 *
2306 * Return: 0 on success, negative error code on error.
2307 */
xe_pt_update_ops_prepare(struct xe_tile * tile,struct xe_vma_ops * vops)2308 int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
2309 {
2310 struct xe_vm_pgtable_update_ops *pt_update_ops =
2311 &vops->pt_update_ops[tile->id];
2312 struct xe_vma_op *op;
2313 int shift = tile->media_gt ? 1 : 0;
2314 int err;
2315
2316 lockdep_assert_held(&vops->vm->lock);
2317 xe_vm_assert_held(vops->vm);
2318
2319 xe_pt_update_ops_init(pt_update_ops);
2320
2321 err = dma_resv_reserve_fences(xe_vm_resv(vops->vm),
2322 tile_to_xe(tile)->info.tile_count << shift);
2323 if (err)
2324 return err;
2325
2326 list_for_each_entry(op, &vops->list, link) {
2327 err = op_prepare(vops->vm, tile, pt_update_ops, op);
2328
2329 if (err)
2330 return err;
2331 }
2332
2333 xe_tile_assert(tile, pt_update_ops->current_op <=
2334 pt_update_ops->num_ops);
2335
2336 #ifdef TEST_VM_OPS_ERROR
2337 if (vops->inject_error &&
2338 vops->vm->xe->vm_inject_error_position == FORCE_OP_ERROR_PREPARE)
2339 return -ENOSPC;
2340 #endif
2341
2342 return 0;
2343 }
2344 ALLOW_ERROR_INJECTION(xe_pt_update_ops_prepare, ERRNO);
2345
bind_op_commit(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma * vma,struct dma_fence * fence,struct dma_fence * fence2,bool invalidate_on_bind)2346 static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
2347 struct xe_vm_pgtable_update_ops *pt_update_ops,
2348 struct xe_vma *vma, struct dma_fence *fence,
2349 struct dma_fence *fence2, bool invalidate_on_bind)
2350 {
2351 xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
2352
2353 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
2354 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
2355 pt_update_ops->wait_vm_bookkeep ?
2356 DMA_RESV_USAGE_KERNEL :
2357 DMA_RESV_USAGE_BOOKKEEP);
2358 if (fence2)
2359 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
2360 pt_update_ops->wait_vm_bookkeep ?
2361 DMA_RESV_USAGE_KERNEL :
2362 DMA_RESV_USAGE_BOOKKEEP);
2363 }
2364 /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
2365 WRITE_ONCE(vma->tile_present, vma->tile_present | BIT(tile->id));
2366 if (invalidate_on_bind)
2367 WRITE_ONCE(vma->tile_invalidated,
2368 vma->tile_invalidated | BIT(tile->id));
2369 else
2370 WRITE_ONCE(vma->tile_invalidated,
2371 vma->tile_invalidated & ~BIT(tile->id));
2372 vma->tile_staged &= ~BIT(tile->id);
2373 if (xe_vma_is_userptr(vma)) {
2374 xe_svm_assert_held_read(vm);
2375 to_userptr_vma(vma)->userptr.initial_bind = true;
2376 }
2377
2378 /*
2379 * Kick rebind worker if this bind triggers preempt fences and not in
2380 * the rebind worker
2381 */
2382 if (pt_update_ops->wait_vm_bookkeep &&
2383 xe_vm_in_preempt_fence_mode(vm) &&
2384 !current->mm)
2385 xe_vm_queue_rebind_worker(vm);
2386 }
2387
unbind_op_commit(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma * vma,struct dma_fence * fence,struct dma_fence * fence2)2388 static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
2389 struct xe_vm_pgtable_update_ops *pt_update_ops,
2390 struct xe_vma *vma, struct dma_fence *fence,
2391 struct dma_fence *fence2)
2392 {
2393 xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
2394
2395 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
2396 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
2397 pt_update_ops->wait_vm_bookkeep ?
2398 DMA_RESV_USAGE_KERNEL :
2399 DMA_RESV_USAGE_BOOKKEEP);
2400 if (fence2)
2401 dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
2402 pt_update_ops->wait_vm_bookkeep ?
2403 DMA_RESV_USAGE_KERNEL :
2404 DMA_RESV_USAGE_BOOKKEEP);
2405 }
2406 vma->tile_present &= ~BIT(tile->id);
2407 if (!vma->tile_present) {
2408 list_del_init(&vma->combined_links.rebind);
2409 if (xe_vma_is_userptr(vma)) {
2410 xe_svm_assert_held_read(vm);
2411
2412 spin_lock(&vm->userptr.invalidated_lock);
2413 list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
2414 spin_unlock(&vm->userptr.invalidated_lock);
2415 }
2416 }
2417 }
2418
range_present_and_invalidated_tile(struct xe_vm * vm,struct xe_svm_range * range,u8 tile_id)2419 static void range_present_and_invalidated_tile(struct xe_vm *vm,
2420 struct xe_svm_range *range,
2421 u8 tile_id)
2422 {
2423 /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
2424
2425 lockdep_assert_held(&vm->svm.gpusvm.notifier_lock);
2426
2427 WRITE_ONCE(range->tile_present, range->tile_present | BIT(tile_id));
2428 WRITE_ONCE(range->tile_invalidated, range->tile_invalidated & ~BIT(tile_id));
2429 }
2430
op_commit(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma_op * op,struct dma_fence * fence,struct dma_fence * fence2)2431 static void op_commit(struct xe_vm *vm,
2432 struct xe_tile *tile,
2433 struct xe_vm_pgtable_update_ops *pt_update_ops,
2434 struct xe_vma_op *op, struct dma_fence *fence,
2435 struct dma_fence *fence2)
2436 {
2437 xe_vm_assert_held(vm);
2438
2439 switch (op->base.op) {
2440 case DRM_GPUVA_OP_MAP:
2441 if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
2442 (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
2443 break;
2444
2445 bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
2446 fence2, op->map.invalidate_on_bind);
2447 break;
2448 case DRM_GPUVA_OP_REMAP:
2449 {
2450 struct xe_vma *old = gpuva_to_vma(op->base.remap.unmap->va);
2451
2452 if (xe_vma_is_cpu_addr_mirror(old))
2453 break;
2454
2455 unbind_op_commit(vm, tile, pt_update_ops, old, fence, fence2);
2456
2457 if (op->remap.prev && !op->remap.skip_prev)
2458 bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
2459 fence, fence2, false);
2460 if (op->remap.next && !op->remap.skip_next)
2461 bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
2462 fence, fence2, false);
2463 break;
2464 }
2465 case DRM_GPUVA_OP_UNMAP:
2466 {
2467 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2468
2469 if (!xe_vma_is_cpu_addr_mirror(vma))
2470 unbind_op_commit(vm, tile, pt_update_ops, vma, fence,
2471 fence2);
2472 break;
2473 }
2474 case DRM_GPUVA_OP_PREFETCH:
2475 {
2476 struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
2477
2478 if (xe_vma_is_cpu_addr_mirror(vma)) {
2479 struct xe_svm_range *range = NULL;
2480 unsigned long i;
2481
2482 xa_for_each(&op->prefetch_range.range, i, range)
2483 range_present_and_invalidated_tile(vm, range, tile->id);
2484 } else {
2485 bind_op_commit(vm, tile, pt_update_ops, vma, fence,
2486 fence2, false);
2487 }
2488 break;
2489 }
2490 case DRM_GPUVA_OP_DRIVER:
2491 {
2492 /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
2493 if (op->subop == XE_VMA_SUBOP_MAP_RANGE)
2494 range_present_and_invalidated_tile(vm, op->map_range.range, tile->id);
2495 else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
2496 WRITE_ONCE(op->unmap_range.range->tile_present,
2497 op->unmap_range.range->tile_present &
2498 ~BIT(tile->id));
2499
2500 break;
2501 }
2502 default:
2503 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2504 }
2505 }
2506
2507 static const struct xe_migrate_pt_update_ops migrate_ops = {
2508 .populate = xe_vm_populate_pgtable,
2509 .clear = xe_migrate_clear_pgtable_callback,
2510 .pre_commit = xe_pt_pre_commit,
2511 };
2512
2513 #if IS_ENABLED(CONFIG_DRM_GPUSVM)
2514 static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops = {
2515 .populate = xe_vm_populate_pgtable,
2516 .clear = xe_migrate_clear_pgtable_callback,
2517 .pre_commit = xe_pt_svm_userptr_pre_commit,
2518 };
2519 #else
2520 static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops;
2521 #endif
2522
to_dep_scheduler(struct xe_exec_queue * q,struct xe_gt * gt)2523 static struct xe_dep_scheduler *to_dep_scheduler(struct xe_exec_queue *q,
2524 struct xe_gt *gt)
2525 {
2526 if (xe_gt_is_media_type(gt))
2527 return q->tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT].dep_scheduler;
2528
2529 return q->tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT].dep_scheduler;
2530 }
2531
2532 /**
2533 * xe_pt_update_ops_run() - Run PT update operations
2534 * @tile: Tile of PT update operations
2535 * @vops: VMA operationa
2536 *
2537 * Run PT update operations which includes committing internal PT state changes,
2538 * creating job for PT update operations for leaf insertion / removal, and
2539 * installing job fence in various places.
2540 *
2541 * Return: fence on success, negative ERR_PTR on error.
2542 */
2543 struct dma_fence *
xe_pt_update_ops_run(struct xe_tile * tile,struct xe_vma_ops * vops)2544 xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
2545 {
2546 struct xe_vm *vm = vops->vm;
2547 struct xe_vm_pgtable_update_ops *pt_update_ops =
2548 &vops->pt_update_ops[tile->id];
2549 struct xe_exec_queue *q = pt_update_ops->q;
2550 struct dma_fence *fence, *ifence = NULL, *mfence = NULL;
2551 struct xe_tlb_inval_job *ijob = NULL, *mjob = NULL;
2552 struct xe_range_fence *rfence;
2553 struct xe_vma_op *op;
2554 int err = 0, i;
2555 struct xe_migrate_pt_update update = {
2556 .ops = pt_update_ops->needs_svm_lock ?
2557 &svm_userptr_migrate_ops :
2558 &migrate_ops,
2559 .vops = vops,
2560 .tile_id = tile->id,
2561 };
2562
2563 lockdep_assert_held(&vm->lock);
2564 xe_vm_assert_held(vm);
2565
2566 if (!pt_update_ops->current_op) {
2567 xe_tile_assert(tile, xe_vm_in_fault_mode(vm));
2568
2569 return dma_fence_get_stub();
2570 }
2571
2572 #ifdef TEST_VM_OPS_ERROR
2573 if (vops->inject_error &&
2574 vm->xe->vm_inject_error_position == FORCE_OP_ERROR_RUN)
2575 return ERR_PTR(-ENOSPC);
2576 #endif
2577
2578 if (pt_update_ops->needs_invalidation) {
2579 struct xe_dep_scheduler *dep_scheduler =
2580 to_dep_scheduler(q, tile->primary_gt);
2581
2582 ijob = xe_tlb_inval_job_create(q, &tile->primary_gt->tlb_inval,
2583 dep_scheduler, vm,
2584 pt_update_ops->start,
2585 pt_update_ops->last,
2586 XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
2587 if (IS_ERR(ijob)) {
2588 err = PTR_ERR(ijob);
2589 goto kill_vm_tile1;
2590 }
2591 update.ijob = ijob;
2592 /*
2593 * Only add page reclaim for the primary GT. Media GT does not have
2594 * any PPC to flush, so enabling the PPC flush bit for media is
2595 * effectively a NOP and provides no performance benefit nor
2596 * interfere with primary GT.
2597 */
2598 if (xe_page_reclaim_list_valid(&pt_update_ops->prl)) {
2599 xe_tlb_inval_job_add_page_reclaim(ijob, &pt_update_ops->prl);
2600 /* Release ref from alloc, job will now handle it */
2601 xe_page_reclaim_list_invalidate(&pt_update_ops->prl);
2602 }
2603
2604 if (tile->media_gt) {
2605 dep_scheduler = to_dep_scheduler(q, tile->media_gt);
2606
2607 mjob = xe_tlb_inval_job_create(q,
2608 &tile->media_gt->tlb_inval,
2609 dep_scheduler, vm,
2610 pt_update_ops->start,
2611 pt_update_ops->last,
2612 XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT);
2613 if (IS_ERR(mjob)) {
2614 err = PTR_ERR(mjob);
2615 goto free_ijob;
2616 }
2617 update.mjob = mjob;
2618 }
2619 }
2620
2621 rfence = kzalloc_obj(*rfence);
2622 if (!rfence) {
2623 err = -ENOMEM;
2624 goto free_ijob;
2625 }
2626
2627 fence = xe_migrate_update_pgtables(tile->migrate, &update);
2628 if (IS_ERR(fence)) {
2629 err = PTR_ERR(fence);
2630 goto free_rfence;
2631 }
2632
2633 /* Point of no return - VM killed if failure after this */
2634 for (i = 0; i < pt_update_ops->current_op; ++i) {
2635 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
2636
2637 xe_pt_commit(pt_op->vma, pt_op->entries,
2638 pt_op->num_entries, &pt_update_ops->deferred);
2639 pt_op->vma = NULL; /* skip in xe_pt_update_ops_abort */
2640 }
2641
2642 if (xe_range_fence_insert(&vm->rftree[tile->id], rfence,
2643 &xe_range_fence_kfree_ops,
2644 pt_update_ops->start,
2645 pt_update_ops->last, fence))
2646 dma_fence_wait(fence, false);
2647
2648 if (ijob)
2649 ifence = xe_tlb_inval_job_push(ijob, tile->migrate, fence);
2650 if (mjob)
2651 mfence = xe_tlb_inval_job_push(mjob, tile->migrate, fence);
2652
2653 if (!mjob && !ijob) {
2654 dma_resv_add_fence(xe_vm_resv(vm), fence,
2655 pt_update_ops->wait_vm_bookkeep ?
2656 DMA_RESV_USAGE_KERNEL :
2657 DMA_RESV_USAGE_BOOKKEEP);
2658
2659 list_for_each_entry(op, &vops->list, link)
2660 op_commit(vops->vm, tile, pt_update_ops, op, fence, NULL);
2661 } else if (ijob && !mjob) {
2662 dma_resv_add_fence(xe_vm_resv(vm), ifence,
2663 pt_update_ops->wait_vm_bookkeep ?
2664 DMA_RESV_USAGE_KERNEL :
2665 DMA_RESV_USAGE_BOOKKEEP);
2666
2667 list_for_each_entry(op, &vops->list, link)
2668 op_commit(vops->vm, tile, pt_update_ops, op, ifence, NULL);
2669 } else {
2670 dma_resv_add_fence(xe_vm_resv(vm), ifence,
2671 pt_update_ops->wait_vm_bookkeep ?
2672 DMA_RESV_USAGE_KERNEL :
2673 DMA_RESV_USAGE_BOOKKEEP);
2674
2675 dma_resv_add_fence(xe_vm_resv(vm), mfence,
2676 pt_update_ops->wait_vm_bookkeep ?
2677 DMA_RESV_USAGE_KERNEL :
2678 DMA_RESV_USAGE_BOOKKEEP);
2679
2680 list_for_each_entry(op, &vops->list, link)
2681 op_commit(vops->vm, tile, pt_update_ops, op, ifence,
2682 mfence);
2683 }
2684
2685 if (pt_update_ops->needs_svm_lock)
2686 xe_svm_notifier_unlock(vm);
2687
2688 /*
2689 * The last fence is only used for zero bind queue idling; migrate
2690 * queues are not exposed to user space.
2691 */
2692 if (!(q->flags & EXEC_QUEUE_FLAG_MIGRATE))
2693 xe_exec_queue_last_fence_set(q, vm, fence);
2694
2695 xe_tlb_inval_job_put(mjob);
2696 xe_tlb_inval_job_put(ijob);
2697 dma_fence_put(ifence);
2698 dma_fence_put(mfence);
2699
2700 return fence;
2701
2702 free_rfence:
2703 kfree(rfence);
2704 free_ijob:
2705 xe_tlb_inval_job_put(mjob);
2706 xe_tlb_inval_job_put(ijob);
2707 kill_vm_tile1:
2708 if (err != -EAGAIN && err != -ENODATA && tile->id)
2709 xe_vm_kill(vops->vm, false);
2710
2711 return ERR_PTR(err);
2712 }
2713 ALLOW_ERROR_INJECTION(xe_pt_update_ops_run, ERRNO);
2714
2715 /**
2716 * xe_pt_update_ops_fini() - Finish PT update operations
2717 * @tile: Tile of PT update operations
2718 * @vops: VMA operations
2719 *
2720 * Finish PT update operations by committing to destroy page table memory
2721 */
xe_pt_update_ops_fini(struct xe_tile * tile,struct xe_vma_ops * vops)2722 void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops)
2723 {
2724 struct xe_vm_pgtable_update_ops *pt_update_ops =
2725 &vops->pt_update_ops[tile->id];
2726 int i;
2727
2728 xe_page_reclaim_entries_put(pt_update_ops->prl.entries);
2729
2730 lockdep_assert_held(&vops->vm->lock);
2731 xe_vm_assert_held(vops->vm);
2732
2733 for (i = 0; i < pt_update_ops->current_op; ++i) {
2734 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
2735
2736 xe_pt_free_bind(pt_op->entries, pt_op->num_entries);
2737 }
2738 xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
2739 }
2740
2741 /**
2742 * xe_pt_update_ops_abort() - Abort PT update operations
2743 * @tile: Tile of PT update operations
2744 * @vops: VMA operationa
2745 *
2746 * Abort PT update operations by unwinding internal PT state
2747 */
xe_pt_update_ops_abort(struct xe_tile * tile,struct xe_vma_ops * vops)2748 void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops)
2749 {
2750 struct xe_vm_pgtable_update_ops *pt_update_ops =
2751 &vops->pt_update_ops[tile->id];
2752 int i;
2753
2754 lockdep_assert_held(&vops->vm->lock);
2755 xe_vm_assert_held(vops->vm);
2756
2757 for (i = pt_update_ops->num_ops - 1; i >= 0; --i) {
2758 struct xe_vm_pgtable_update_op *pt_op =
2759 &pt_update_ops->ops[i];
2760
2761 if (!pt_op->vma || i >= pt_update_ops->current_op)
2762 continue;
2763
2764 if (pt_op->bind)
2765 xe_pt_abort_bind(pt_op->vma, pt_op->entries,
2766 pt_op->num_entries,
2767 pt_op->rebind);
2768 else
2769 xe_pt_abort_unbind(pt_op->vma, pt_op->entries,
2770 pt_op->num_entries);
2771 }
2772
2773 xe_pt_update_ops_fini(tile, vops);
2774 }
2775