1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2024 Intel Corporation
4 */
5
6 #include <drm/drm_drv.h>
7
8 #include "xe_bo.h"
9 #include "xe_exec_queue_types.h"
10 #include "xe_gt_stats.h"
11 #include "xe_migrate.h"
12 #include "xe_module.h"
13 #include "xe_pm.h"
14 #include "xe_pt.h"
15 #include "xe_svm.h"
16 #include "xe_tile.h"
17 #include "xe_ttm_vram_mgr.h"
18 #include "xe_vm.h"
19 #include "xe_vm_types.h"
20 #include "xe_vram_types.h"
21
xe_svm_range_in_vram(struct xe_svm_range * range)22 static bool xe_svm_range_in_vram(struct xe_svm_range *range)
23 {
24 /*
25 * Advisory only check whether the range is currently backed by VRAM
26 * memory.
27 */
28
29 struct drm_gpusvm_pages_flags flags = {
30 /* Pairs with WRITE_ONCE in drm_gpusvm.c */
31 .__flags = READ_ONCE(range->base.pages.flags.__flags),
32 };
33
34 return flags.has_devmem_pages;
35 }
36
xe_svm_range_has_vram_binding(struct xe_svm_range * range)37 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
38 {
39 /* Not reliable without notifier lock */
40 return xe_svm_range_in_vram(range) && range->tile_present;
41 }
42
gpusvm_to_vm(struct drm_gpusvm * gpusvm)43 static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
44 {
45 return container_of(gpusvm, struct xe_vm, svm.gpusvm);
46 }
47
range_to_vm(struct drm_gpusvm_range * r)48 static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
49 {
50 return gpusvm_to_vm(r->gpusvm);
51 }
52
53 #define range_debug(r__, operation__) \
54 vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
55 "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
56 "start=0x%014lx, end=0x%014lx, size=%lu", \
57 (operation__), range_to_vm(&(r__)->base)->usm.asid, \
58 (r__)->base.gpusvm, \
59 xe_svm_range_in_vram((r__)) ? 1 : 0, \
60 xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \
61 (r__)->base.pages.notifier_seq, \
62 xe_svm_range_start((r__)), xe_svm_range_end((r__)), \
63 xe_svm_range_size((r__)))
64
xe_svm_range_debug(struct xe_svm_range * range,const char * operation)65 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
66 {
67 range_debug(range, operation);
68 }
69
70 static struct drm_gpusvm_range *
xe_svm_range_alloc(struct drm_gpusvm * gpusvm)71 xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
72 {
73 struct xe_svm_range *range;
74
75 range = kzalloc(sizeof(*range), GFP_KERNEL);
76 if (!range)
77 return NULL;
78
79 INIT_LIST_HEAD(&range->garbage_collector_link);
80 xe_vm_get(gpusvm_to_vm(gpusvm));
81
82 return &range->base;
83 }
84
xe_svm_range_free(struct drm_gpusvm_range * range)85 static void xe_svm_range_free(struct drm_gpusvm_range *range)
86 {
87 xe_vm_put(range_to_vm(range));
88 kfree(range);
89 }
90
91 static void
xe_svm_garbage_collector_add_range(struct xe_vm * vm,struct xe_svm_range * range,const struct mmu_notifier_range * mmu_range)92 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
93 const struct mmu_notifier_range *mmu_range)
94 {
95 struct xe_device *xe = vm->xe;
96
97 range_debug(range, "GARBAGE COLLECTOR ADD");
98
99 drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
100
101 spin_lock(&vm->svm.garbage_collector.lock);
102 if (list_empty(&range->garbage_collector_link))
103 list_add_tail(&range->garbage_collector_link,
104 &vm->svm.garbage_collector.range_list);
105 spin_unlock(&vm->svm.garbage_collector.lock);
106
107 queue_work(xe->usm.pf_wq, &vm->svm.garbage_collector.work);
108 }
109
xe_svm_tlb_inval_count_stats_incr(struct xe_gt * gt)110 static void xe_svm_tlb_inval_count_stats_incr(struct xe_gt *gt)
111 {
112 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT, 1);
113 }
114
115 static u8
xe_svm_range_notifier_event_begin(struct xe_vm * vm,struct drm_gpusvm_range * r,const struct mmu_notifier_range * mmu_range,u64 * adj_start,u64 * adj_end)116 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
117 const struct mmu_notifier_range *mmu_range,
118 u64 *adj_start, u64 *adj_end)
119 {
120 struct xe_svm_range *range = to_xe_range(r);
121 struct xe_device *xe = vm->xe;
122 struct xe_tile *tile;
123 u8 tile_mask = 0;
124 u8 id;
125
126 xe_svm_assert_in_notifier(vm);
127
128 range_debug(range, "NOTIFIER");
129
130 /* Skip if already unmapped or if no binding exist */
131 if (range->base.pages.flags.unmapped || !range->tile_present)
132 return 0;
133
134 range_debug(range, "NOTIFIER - EXECUTE");
135
136 /* Adjust invalidation to range boundaries */
137 *adj_start = min(xe_svm_range_start(range), mmu_range->start);
138 *adj_end = max(xe_svm_range_end(range), mmu_range->end);
139
140 /*
141 * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the
142 * invalidation code can't correctly cope with sparse ranges or
143 * invalidations spanning multiple ranges.
144 */
145 for_each_tile(tile, xe, id)
146 if (xe_pt_zap_ptes_range(tile, vm, range)) {
147 /*
148 * WRITE_ONCE pairs with READ_ONCE in
149 * xe_vm_has_valid_gpu_mapping()
150 */
151 WRITE_ONCE(range->tile_invalidated,
152 range->tile_invalidated | BIT(id));
153
154 if (!(tile_mask & BIT(id))) {
155 xe_svm_tlb_inval_count_stats_incr(tile->primary_gt);
156 if (tile->media_gt)
157 xe_svm_tlb_inval_count_stats_incr(tile->media_gt);
158 tile_mask |= BIT(id);
159 }
160 }
161
162 return tile_mask;
163 }
164
165 static void
xe_svm_range_notifier_event_end(struct xe_vm * vm,struct drm_gpusvm_range * r,const struct mmu_notifier_range * mmu_range)166 xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
167 const struct mmu_notifier_range *mmu_range)
168 {
169 struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
170
171 xe_svm_assert_in_notifier(vm);
172
173 drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
174 if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
175 xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
176 mmu_range);
177 }
178
xe_svm_stats_ktime_us_delta(ktime_t start)179 static s64 xe_svm_stats_ktime_us_delta(ktime_t start)
180 {
181 return IS_ENABLED(CONFIG_DEBUG_FS) ?
182 ktime_us_delta(ktime_get(), start) : 0;
183 }
184
xe_svm_tlb_inval_us_stats_incr(struct xe_gt * gt,ktime_t start)185 static void xe_svm_tlb_inval_us_stats_incr(struct xe_gt *gt, ktime_t start)
186 {
187 s64 us_delta = xe_svm_stats_ktime_us_delta(start);
188
189 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_US, us_delta);
190 }
191
xe_svm_stats_ktime_get(void)192 static ktime_t xe_svm_stats_ktime_get(void)
193 {
194 return IS_ENABLED(CONFIG_DEBUG_FS) ? ktime_get() : 0;
195 }
196
xe_svm_invalidate(struct drm_gpusvm * gpusvm,struct drm_gpusvm_notifier * notifier,const struct mmu_notifier_range * mmu_range)197 static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
198 struct drm_gpusvm_notifier *notifier,
199 const struct mmu_notifier_range *mmu_range)
200 {
201 struct xe_vm *vm = gpusvm_to_vm(gpusvm);
202 struct xe_device *xe = vm->xe;
203 struct drm_gpusvm_range *r, *first;
204 struct xe_tile *tile;
205 ktime_t start = xe_svm_stats_ktime_get();
206 u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
207 u8 tile_mask = 0, id;
208 long err;
209
210 xe_svm_assert_in_notifier(vm);
211
212 vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
213 "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
214 vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
215 mmu_range->start, mmu_range->end, mmu_range->event);
216
217 /* Adjust invalidation to notifier boundaries */
218 adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
219 adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
220
221 first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
222 if (!first)
223 return;
224
225 /*
226 * PTs may be getting destroyed so not safe to touch these but PT should
227 * be invalidated at this point in time. Regardless we still need to
228 * ensure any dma mappings are unmapped in the here.
229 */
230 if (xe_vm_is_closed(vm))
231 goto range_notifier_event_end;
232
233 /*
234 * XXX: Less than ideal to always wait on VM's resv slots if an
235 * invalidation is not required. Could walk range list twice to figure
236 * out if an invalidations is need, but also not ideal.
237 */
238 err = dma_resv_wait_timeout(xe_vm_resv(vm),
239 DMA_RESV_USAGE_BOOKKEEP,
240 false, MAX_SCHEDULE_TIMEOUT);
241 XE_WARN_ON(err <= 0);
242
243 r = first;
244 drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
245 tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range,
246 &adj_start,
247 &adj_end);
248 if (!tile_mask)
249 goto range_notifier_event_end;
250
251 xe_device_wmb(xe);
252
253 err = xe_vm_range_tilemask_tlb_inval(vm, adj_start, adj_end, tile_mask);
254 WARN_ON_ONCE(err);
255
256 range_notifier_event_end:
257 r = first;
258 drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
259 xe_svm_range_notifier_event_end(vm, r, mmu_range);
260 for_each_tile(tile, xe, id) {
261 if (tile_mask & BIT(id)) {
262 xe_svm_tlb_inval_us_stats_incr(tile->primary_gt, start);
263 if (tile->media_gt)
264 xe_svm_tlb_inval_us_stats_incr(tile->media_gt, start);
265 }
266 }
267 }
268
__xe_svm_garbage_collector(struct xe_vm * vm,struct xe_svm_range * range)269 static int __xe_svm_garbage_collector(struct xe_vm *vm,
270 struct xe_svm_range *range)
271 {
272 struct dma_fence *fence;
273
274 range_debug(range, "GARBAGE COLLECTOR");
275
276 xe_vm_lock(vm, false);
277 fence = xe_vm_range_unbind(vm, range);
278 xe_vm_unlock(vm);
279 if (IS_ERR(fence))
280 return PTR_ERR(fence);
281 dma_fence_put(fence);
282
283 drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
284
285 return 0;
286 }
287
xe_svm_range_set_default_attr(struct xe_vm * vm,u64 range_start,u64 range_end)288 static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 range_end)
289 {
290 struct xe_vma *vma;
291 struct xe_vma_mem_attr default_attr = {
292 .preferred_loc = {
293 .devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
294 .migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
295 },
296 .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
297 };
298 int err = 0;
299
300 vma = xe_vm_find_vma_by_addr(vm, range_start);
301 if (!vma)
302 return -EINVAL;
303
304 if (!(vma->gpuva.flags & XE_VMA_MADV_AUTORESET)) {
305 drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n");
306 return 0;
307 }
308
309 if (xe_vma_has_default_mem_attrs(vma))
310 return 0;
311
312 vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx",
313 xe_vma_start(vma), xe_vma_end(vma));
314
315 if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
316 default_attr.pat_index = vma->attr.default_pat_index;
317 default_attr.default_pat_index = vma->attr.default_pat_index;
318 vma->attr = default_attr;
319 } else {
320 vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx",
321 range_start, range_end);
322 err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start);
323 if (err) {
324 drm_warn(&vm->xe->drm, "VMA SPLIT failed: %pe\n", ERR_PTR(err));
325 xe_vm_kill(vm, true);
326 return err;
327 }
328 }
329
330 /*
331 * On call from xe_svm_handle_pagefault original VMA might be changed
332 * signal this to lookup for VMA again.
333 */
334 return -EAGAIN;
335 }
336
xe_svm_garbage_collector(struct xe_vm * vm)337 static int xe_svm_garbage_collector(struct xe_vm *vm)
338 {
339 struct xe_svm_range *range;
340 u64 range_start;
341 u64 range_end;
342 int err, ret = 0;
343
344 lockdep_assert_held_write(&vm->lock);
345
346 if (xe_vm_is_closed_or_banned(vm))
347 return -ENOENT;
348
349 for (;;) {
350 spin_lock(&vm->svm.garbage_collector.lock);
351 range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
352 typeof(*range),
353 garbage_collector_link);
354 if (!range)
355 break;
356
357 range_start = xe_svm_range_start(range);
358 range_end = xe_svm_range_end(range);
359
360 list_del(&range->garbage_collector_link);
361 spin_unlock(&vm->svm.garbage_collector.lock);
362
363 err = __xe_svm_garbage_collector(vm, range);
364 if (err) {
365 drm_warn(&vm->xe->drm,
366 "Garbage collection failed: %pe\n",
367 ERR_PTR(err));
368 xe_vm_kill(vm, true);
369 return err;
370 }
371
372 err = xe_svm_range_set_default_attr(vm, range_start, range_end);
373 if (err) {
374 if (err == -EAGAIN)
375 ret = -EAGAIN;
376 else
377 return err;
378 }
379 }
380 spin_unlock(&vm->svm.garbage_collector.lock);
381
382 return ret;
383 }
384
xe_svm_garbage_collector_work_func(struct work_struct * w)385 static void xe_svm_garbage_collector_work_func(struct work_struct *w)
386 {
387 struct xe_vm *vm = container_of(w, struct xe_vm,
388 svm.garbage_collector.work);
389
390 down_write(&vm->lock);
391 xe_svm_garbage_collector(vm);
392 up_write(&vm->lock);
393 }
394
395 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
396
page_to_vr(struct page * page)397 static struct xe_vram_region *page_to_vr(struct page *page)
398 {
399 return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
400 }
401
xe_vram_region_page_to_dpa(struct xe_vram_region * vr,struct page * page)402 static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
403 struct page *page)
404 {
405 u64 dpa;
406 u64 pfn = page_to_pfn(page);
407 u64 offset;
408
409 xe_assert(vr->xe, is_device_private_page(page));
410 xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base);
411
412 offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
413 dpa = vr->dpa_base + offset;
414
415 return dpa;
416 }
417
418 enum xe_svm_copy_dir {
419 XE_SVM_COPY_TO_VRAM,
420 XE_SVM_COPY_TO_SRAM,
421 };
422
xe_svm_copy_kb_stats_incr(struct xe_gt * gt,const enum xe_svm_copy_dir dir,int kb)423 static void xe_svm_copy_kb_stats_incr(struct xe_gt *gt,
424 const enum xe_svm_copy_dir dir,
425 int kb)
426 {
427 if (dir == XE_SVM_COPY_TO_VRAM)
428 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_KB, kb);
429 else
430 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_KB, kb);
431 }
432
xe_svm_copy_us_stats_incr(struct xe_gt * gt,const enum xe_svm_copy_dir dir,unsigned long npages,ktime_t start)433 static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
434 const enum xe_svm_copy_dir dir,
435 unsigned long npages,
436 ktime_t start)
437 {
438 s64 us_delta = xe_svm_stats_ktime_us_delta(start);
439
440 if (dir == XE_SVM_COPY_TO_VRAM) {
441 switch (npages) {
442 case 1:
443 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US,
444 us_delta);
445 break;
446 case 16:
447 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US,
448 us_delta);
449 break;
450 case 512:
451 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US,
452 us_delta);
453 break;
454 }
455 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_US,
456 us_delta);
457 } else {
458 switch (npages) {
459 case 1:
460 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_CPU_COPY_US,
461 us_delta);
462 break;
463 case 16:
464 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_CPU_COPY_US,
465 us_delta);
466 break;
467 case 512:
468 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_CPU_COPY_US,
469 us_delta);
470 break;
471 }
472 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_US,
473 us_delta);
474 }
475 }
476
xe_svm_copy(struct page ** pages,struct drm_pagemap_addr * pagemap_addr,unsigned long npages,const enum xe_svm_copy_dir dir)477 static int xe_svm_copy(struct page **pages,
478 struct drm_pagemap_addr *pagemap_addr,
479 unsigned long npages, const enum xe_svm_copy_dir dir)
480 {
481 struct xe_vram_region *vr = NULL;
482 struct xe_gt *gt = NULL;
483 struct xe_device *xe;
484 struct dma_fence *fence = NULL;
485 unsigned long i;
486 #define XE_VRAM_ADDR_INVALID ~0x0ull
487 u64 vram_addr = XE_VRAM_ADDR_INVALID;
488 int err = 0, pos = 0;
489 bool sram = dir == XE_SVM_COPY_TO_SRAM;
490 ktime_t start = xe_svm_stats_ktime_get();
491
492 /*
493 * This flow is complex: it locates physically contiguous device pages,
494 * derives the starting physical address, and performs a single GPU copy
495 * to for every 8M chunk in a DMA address array. Both device pages and
496 * DMA addresses may be sparsely populated. If either is NULL, a copy is
497 * triggered based on the current search state. The last GPU copy is
498 * waited on to ensure all copies are complete.
499 */
500
501 for (i = 0; i < npages; ++i) {
502 struct page *spage = pages[i];
503 struct dma_fence *__fence;
504 u64 __vram_addr;
505 bool match = false, chunk, last;
506
507 #define XE_MIGRATE_CHUNK_SIZE SZ_8M
508 chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
509 last = (i + 1) == npages;
510
511 /* No CPU page and no device pages queue'd to copy */
512 if (!pagemap_addr[i].addr && vram_addr == XE_VRAM_ADDR_INVALID)
513 continue;
514
515 if (!vr && spage) {
516 vr = page_to_vr(spage);
517 gt = xe_migrate_exec_queue(vr->migrate)->gt;
518 xe = vr->xe;
519 }
520 XE_WARN_ON(spage && page_to_vr(spage) != vr);
521
522 /*
523 * CPU page and device page valid, capture physical address on
524 * first device page, check if physical contiguous on subsequent
525 * device pages.
526 */
527 if (pagemap_addr[i].addr && spage) {
528 __vram_addr = xe_vram_region_page_to_dpa(vr, spage);
529 if (vram_addr == XE_VRAM_ADDR_INVALID) {
530 vram_addr = __vram_addr;
531 pos = i;
532 }
533
534 match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
535 /* Expected with contiguous memory */
536 xe_assert(vr->xe, match);
537
538 if (pagemap_addr[i].order) {
539 i += NR_PAGES(pagemap_addr[i].order) - 1;
540 chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
541 last = (i + 1) == npages;
542 }
543 }
544
545 /*
546 * Mismatched physical address, 8M copy chunk, or last page -
547 * trigger a copy.
548 */
549 if (!match || chunk || last) {
550 /*
551 * Extra page for first copy if last page and matching
552 * physical address.
553 */
554 int incr = (match && last) ? 1 : 0;
555
556 if (vram_addr != XE_VRAM_ADDR_INVALID) {
557 xe_svm_copy_kb_stats_incr(gt, dir,
558 (i - pos + incr) *
559 (PAGE_SIZE / SZ_1K));
560 if (sram) {
561 vm_dbg(&xe->drm,
562 "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
563 vram_addr,
564 (u64)pagemap_addr[pos].addr, i - pos + incr);
565 __fence = xe_migrate_from_vram(vr->migrate,
566 i - pos + incr,
567 vram_addr,
568 &pagemap_addr[pos]);
569 } else {
570 vm_dbg(&xe->drm,
571 "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
572 (u64)pagemap_addr[pos].addr, vram_addr,
573 i - pos + incr);
574 __fence = xe_migrate_to_vram(vr->migrate,
575 i - pos + incr,
576 &pagemap_addr[pos],
577 vram_addr);
578 }
579 if (IS_ERR(__fence)) {
580 err = PTR_ERR(__fence);
581 goto err_out;
582 }
583
584 dma_fence_put(fence);
585 fence = __fence;
586 }
587
588 /* Setup physical address of next device page */
589 if (pagemap_addr[i].addr && spage) {
590 vram_addr = __vram_addr;
591 pos = i;
592 } else {
593 vram_addr = XE_VRAM_ADDR_INVALID;
594 }
595
596 /* Extra mismatched device page, copy it */
597 if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
598 xe_svm_copy_kb_stats_incr(gt, dir,
599 (PAGE_SIZE / SZ_1K));
600 if (sram) {
601 vm_dbg(&xe->drm,
602 "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
603 vram_addr, (u64)pagemap_addr[pos].addr, 1);
604 __fence = xe_migrate_from_vram(vr->migrate, 1,
605 vram_addr,
606 &pagemap_addr[pos]);
607 } else {
608 vm_dbg(&xe->drm,
609 "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
610 (u64)pagemap_addr[pos].addr, vram_addr, 1);
611 __fence = xe_migrate_to_vram(vr->migrate, 1,
612 &pagemap_addr[pos],
613 vram_addr);
614 }
615 if (IS_ERR(__fence)) {
616 err = PTR_ERR(__fence);
617 goto err_out;
618 }
619
620 dma_fence_put(fence);
621 fence = __fence;
622 }
623 }
624 }
625
626 err_out:
627 /* Wait for all copies to complete */
628 if (fence) {
629 dma_fence_wait(fence, false);
630 dma_fence_put(fence);
631 }
632
633 /*
634 * XXX: We can't derive the GT here (or anywhere in this functions, but
635 * compute always uses the primary GT so accumulate stats on the likely
636 * GT of the fault.
637 */
638 if (gt)
639 xe_svm_copy_us_stats_incr(gt, dir, npages, start);
640
641 return err;
642 #undef XE_MIGRATE_CHUNK_SIZE
643 #undef XE_VRAM_ADDR_INVALID
644 }
645
xe_svm_copy_to_devmem(struct page ** pages,struct drm_pagemap_addr * pagemap_addr,unsigned long npages)646 static int xe_svm_copy_to_devmem(struct page **pages,
647 struct drm_pagemap_addr *pagemap_addr,
648 unsigned long npages)
649 {
650 return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
651 }
652
xe_svm_copy_to_ram(struct page ** pages,struct drm_pagemap_addr * pagemap_addr,unsigned long npages)653 static int xe_svm_copy_to_ram(struct page **pages,
654 struct drm_pagemap_addr *pagemap_addr,
655 unsigned long npages)
656 {
657 return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
658 }
659
to_xe_bo(struct drm_pagemap_devmem * devmem_allocation)660 static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
661 {
662 return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
663 }
664
xe_svm_devmem_release(struct drm_pagemap_devmem * devmem_allocation)665 static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
666 {
667 struct xe_bo *bo = to_xe_bo(devmem_allocation);
668 struct xe_device *xe = xe_bo_device(bo);
669
670 xe_bo_put_async(bo);
671 xe_pm_runtime_put(xe);
672 }
673
block_offset_to_pfn(struct xe_vram_region * vr,u64 offset)674 static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
675 {
676 return PHYS_PFN(offset + vr->hpa_base);
677 }
678
vram_to_buddy(struct xe_vram_region * vram)679 static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
680 {
681 return &vram->ttm.mm;
682 }
683
xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem * devmem_allocation,unsigned long npages,unsigned long * pfn)684 static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
685 unsigned long npages, unsigned long *pfn)
686 {
687 struct xe_bo *bo = to_xe_bo(devmem_allocation);
688 struct ttm_resource *res = bo->ttm.resource;
689 struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks;
690 struct drm_buddy_block *block;
691 int j = 0;
692
693 list_for_each_entry(block, blocks, link) {
694 struct xe_vram_region *vr = block->private;
695 struct drm_buddy *buddy = vram_to_buddy(vr);
696 u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
697 int i;
698
699 for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
700 pfn[j++] = block_pfn + i;
701 }
702
703 return 0;
704 }
705
706 static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
707 .devmem_release = xe_svm_devmem_release,
708 .populate_devmem_pfn = xe_svm_populate_devmem_pfn,
709 .copy_to_devmem = xe_svm_copy_to_devmem,
710 .copy_to_ram = xe_svm_copy_to_ram,
711 };
712
713 #endif
714
715 static const struct drm_gpusvm_ops gpusvm_ops = {
716 .range_alloc = xe_svm_range_alloc,
717 .range_free = xe_svm_range_free,
718 .invalidate = xe_svm_invalidate,
719 };
720
721 static const unsigned long fault_chunk_sizes[] = {
722 SZ_2M,
723 SZ_64K,
724 SZ_4K,
725 };
726
727 /**
728 * xe_svm_init() - SVM initialize
729 * @vm: The VM.
730 *
731 * Initialize SVM state which is embedded within the VM.
732 *
733 * Return: 0 on success, negative error code on error.
734 */
xe_svm_init(struct xe_vm * vm)735 int xe_svm_init(struct xe_vm *vm)
736 {
737 int err;
738
739 if (vm->flags & XE_VM_FLAG_FAULT_MODE) {
740 spin_lock_init(&vm->svm.garbage_collector.lock);
741 INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
742 INIT_WORK(&vm->svm.garbage_collector.work,
743 xe_svm_garbage_collector_work_func);
744
745 err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
746 current->mm, 0, vm->size,
747 xe_modparam.svm_notifier_size * SZ_1M,
748 &gpusvm_ops, fault_chunk_sizes,
749 ARRAY_SIZE(fault_chunk_sizes));
750 drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
751 } else {
752 err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)",
753 &vm->xe->drm, NULL, 0, 0, 0, NULL,
754 NULL, 0);
755 }
756
757 return err;
758 }
759
760 /**
761 * xe_svm_close() - SVM close
762 * @vm: The VM.
763 *
764 * Close SVM state (i.e., stop and flush all SVM actions).
765 */
xe_svm_close(struct xe_vm * vm)766 void xe_svm_close(struct xe_vm *vm)
767 {
768 xe_assert(vm->xe, xe_vm_is_closed(vm));
769 flush_work(&vm->svm.garbage_collector.work);
770 }
771
772 /**
773 * xe_svm_fini() - SVM finalize
774 * @vm: The VM.
775 *
776 * Finalize SVM state which is embedded within the VM.
777 */
xe_svm_fini(struct xe_vm * vm)778 void xe_svm_fini(struct xe_vm *vm)
779 {
780 xe_assert(vm->xe, xe_vm_is_closed(vm));
781
782 drm_gpusvm_fini(&vm->svm.gpusvm);
783 }
784
xe_svm_range_is_valid(struct xe_svm_range * range,struct xe_tile * tile,bool devmem_only)785 static bool xe_svm_range_is_valid(struct xe_svm_range *range,
786 struct xe_tile *tile,
787 bool devmem_only)
788 {
789 return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
790 range->tile_invalidated) &&
791 (!devmem_only || xe_svm_range_in_vram(range)));
792 }
793
794 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
795 * @vm: xe_vm pointer
796 * @range: Pointer to the SVM range structure
797 *
798 * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
799 * and migrates them to SMEM
800 */
xe_svm_range_migrate_to_smem(struct xe_vm * vm,struct xe_svm_range * range)801 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
802 {
803 if (xe_svm_range_in_vram(range))
804 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
805 }
806
807 /**
808 * xe_svm_range_validate() - Check if the SVM range is valid
809 * @vm: xe_vm pointer
810 * @range: Pointer to the SVM range structure
811 * @tile_mask: Mask representing the tiles to be checked
812 * @devmem_preferred : if true range needs to be in devmem
813 *
814 * The xe_svm_range_validate() function checks if a range is
815 * valid and located in the desired memory region.
816 *
817 * Return: true if the range is valid, false otherwise
818 */
xe_svm_range_validate(struct xe_vm * vm,struct xe_svm_range * range,u8 tile_mask,bool devmem_preferred)819 bool xe_svm_range_validate(struct xe_vm *vm,
820 struct xe_svm_range *range,
821 u8 tile_mask, bool devmem_preferred)
822 {
823 bool ret;
824
825 xe_svm_notifier_lock(vm);
826
827 ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
828 (devmem_preferred == range->base.pages.flags.has_devmem_pages);
829
830 xe_svm_notifier_unlock(vm);
831
832 return ret;
833 }
834
835 /**
836 * xe_svm_find_vma_start - Find start of CPU VMA
837 * @vm: xe_vm pointer
838 * @start: start address
839 * @end: end address
840 * @vma: Pointer to struct xe_vma
841 *
842 *
843 * This function searches for a cpu vma, within the specified
844 * range [start, end] in the given VM. It adjusts the range based on the
845 * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
846 *
847 * Return: The starting address of the VMA within the range,
848 * or ULONG_MAX if no VMA is found
849 */
xe_svm_find_vma_start(struct xe_vm * vm,u64 start,u64 end,struct xe_vma * vma)850 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
851 {
852 return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
853 max(start, xe_vma_start(vma)),
854 min(end, xe_vma_end(vma)));
855 }
856
857 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
xe_drm_pagemap_populate_mm(struct drm_pagemap * dpagemap,unsigned long start,unsigned long end,struct mm_struct * mm,unsigned long timeslice_ms)858 static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
859 unsigned long start, unsigned long end,
860 struct mm_struct *mm,
861 unsigned long timeslice_ms)
862 {
863 struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
864 struct xe_device *xe = vr->xe;
865 struct device *dev = xe->drm.dev;
866 struct drm_buddy_block *block;
867 struct xe_validation_ctx vctx;
868 struct list_head *blocks;
869 struct drm_exec exec;
870 struct xe_bo *bo;
871 int err = 0, idx;
872
873 if (!drm_dev_enter(&xe->drm, &idx))
874 return -ENODEV;
875
876 xe_pm_runtime_get(xe);
877
878 xe_validation_guard(&vctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
879 bo = xe_bo_create_locked(xe, NULL, NULL, end - start,
880 ttm_bo_type_device,
881 (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) |
882 XE_BO_FLAG_CPU_ADDR_MIRROR, &exec);
883 drm_exec_retry_on_contention(&exec);
884 if (IS_ERR(bo)) {
885 err = PTR_ERR(bo);
886 xe_validation_retry_on_oom(&vctx, &err);
887 break;
888 }
889
890 drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
891 &dpagemap_devmem_ops, dpagemap, end - start);
892
893 blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
894 list_for_each_entry(block, blocks, link)
895 block->private = vr;
896
897 xe_bo_get(bo);
898
899 /* Ensure the device has a pm ref while there are device pages active. */
900 xe_pm_runtime_get_noresume(xe);
901 err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
902 start, end, timeslice_ms,
903 xe_svm_devm_owner(xe));
904 if (err)
905 xe_svm_devmem_release(&bo->devmem_allocation);
906 xe_bo_unlock(bo);
907 xe_bo_put(bo);
908 }
909 xe_pm_runtime_put(xe);
910 drm_dev_exit(idx);
911
912 return err;
913 }
914 #endif
915
supports_4K_migration(struct xe_device * xe)916 static bool supports_4K_migration(struct xe_device *xe)
917 {
918 if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
919 return false;
920
921 return true;
922 }
923
924 /**
925 * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
926 * @range: SVM range for which migration needs to be decided
927 * @vma: vma which has range
928 * @preferred_region_is_vram: preferred region for range is vram
929 *
930 * Return: True for range needing migration and migration is supported else false
931 */
xe_svm_range_needs_migrate_to_vram(struct xe_svm_range * range,struct xe_vma * vma,bool preferred_region_is_vram)932 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
933 bool preferred_region_is_vram)
934 {
935 struct xe_vm *vm = range_to_vm(&range->base);
936 u64 range_size = xe_svm_range_size(range);
937
938 if (!range->base.pages.flags.migrate_devmem || !preferred_region_is_vram)
939 return false;
940
941 xe_assert(vm->xe, IS_DGFX(vm->xe));
942
943 if (xe_svm_range_in_vram(range)) {
944 drm_info(&vm->xe->drm, "Range is already in VRAM\n");
945 return false;
946 }
947
948 if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
949 drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
950 return false;
951 }
952
953 return true;
954 }
955
956 #define DECL_SVM_RANGE_COUNT_STATS(elem, stat) \
957 static void xe_svm_range_##elem##_count_stats_incr(struct xe_gt *gt, \
958 struct xe_svm_range *range) \
959 { \
960 switch (xe_svm_range_size(range)) { \
961 case SZ_4K: \
962 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_COUNT, 1); \
963 break; \
964 case SZ_64K: \
965 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_COUNT, 1); \
966 break; \
967 case SZ_2M: \
968 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_COUNT, 1); \
969 break; \
970 } \
971 } \
972
DECL_SVM_RANGE_COUNT_STATS(fault,PAGEFAULT)973 DECL_SVM_RANGE_COUNT_STATS(fault, PAGEFAULT)
974 DECL_SVM_RANGE_COUNT_STATS(valid_fault, VALID_PAGEFAULT)
975 DECL_SVM_RANGE_COUNT_STATS(migrate, MIGRATE)
976
977 #define DECL_SVM_RANGE_US_STATS(elem, stat) \
978 static void xe_svm_range_##elem##_us_stats_incr(struct xe_gt *gt, \
979 struct xe_svm_range *range, \
980 ktime_t start) \
981 { \
982 s64 us_delta = xe_svm_stats_ktime_us_delta(start); \
983 \
984 switch (xe_svm_range_size(range)) { \
985 case SZ_4K: \
986 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_US, \
987 us_delta); \
988 break; \
989 case SZ_64K: \
990 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_US, \
991 us_delta); \
992 break; \
993 case SZ_2M: \
994 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_US, \
995 us_delta); \
996 break; \
997 } \
998 } \
999
1000 DECL_SVM_RANGE_US_STATS(migrate, MIGRATE)
1001 DECL_SVM_RANGE_US_STATS(get_pages, GET_PAGES)
1002 DECL_SVM_RANGE_US_STATS(bind, BIND)
1003 DECL_SVM_RANGE_US_STATS(fault, PAGEFAULT)
1004
1005 static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
1006 struct xe_gt *gt, u64 fault_addr,
1007 bool need_vram)
1008 {
1009 int devmem_possible = IS_DGFX(vm->xe) &&
1010 IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
1011 struct drm_gpusvm_ctx ctx = {
1012 .read_only = xe_vma_read_only(vma),
1013 .devmem_possible = devmem_possible,
1014 .check_pages_threshold = devmem_possible ? SZ_64K : 0,
1015 .devmem_only = need_vram && devmem_possible,
1016 .timeslice_ms = need_vram && devmem_possible ?
1017 vm->xe->atomic_svm_timeslice_ms : 0,
1018 .device_private_page_owner = xe_svm_devm_owner(vm->xe),
1019 };
1020 struct xe_validation_ctx vctx;
1021 struct drm_exec exec;
1022 struct xe_svm_range *range;
1023 struct dma_fence *fence;
1024 struct drm_pagemap *dpagemap;
1025 struct xe_tile *tile = gt_to_tile(gt);
1026 int migrate_try_count = ctx.devmem_only ? 3 : 1;
1027 ktime_t start = xe_svm_stats_ktime_get(), bind_start, get_pages_start;
1028 int err;
1029
1030 lockdep_assert_held_write(&vm->lock);
1031 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
1032
1033 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
1034
1035 retry:
1036 /* Always process UNMAPs first so view SVM ranges is current */
1037 err = xe_svm_garbage_collector(vm);
1038 if (err)
1039 return err;
1040
1041 dpagemap = xe_vma_resolve_pagemap(vma, tile);
1042 if (!dpagemap && !ctx.devmem_only)
1043 ctx.device_private_page_owner = NULL;
1044 range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
1045
1046 if (IS_ERR(range))
1047 return PTR_ERR(range);
1048
1049 xe_svm_range_fault_count_stats_incr(gt, range);
1050
1051 if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
1052 err = -EACCES;
1053 goto out;
1054 }
1055
1056 if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) {
1057 xe_svm_range_valid_fault_count_stats_incr(gt, range);
1058 range_debug(range, "PAGE FAULT - VALID");
1059 goto out;
1060 }
1061
1062 range_debug(range, "PAGE FAULT");
1063
1064 if (--migrate_try_count >= 0 &&
1065 xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
1066 ktime_t migrate_start = xe_svm_stats_ktime_get();
1067
1068 /* TODO : For multi-device dpagemap will be used to find the
1069 * remote tile and remote device. Will need to modify
1070 * xe_svm_alloc_vram to use dpagemap for future multi-device
1071 * support.
1072 */
1073 xe_svm_range_migrate_count_stats_incr(gt, range);
1074 err = xe_svm_alloc_vram(tile, range, &ctx);
1075 xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start);
1076 ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
1077 if (err) {
1078 if (migrate_try_count || !ctx.devmem_only) {
1079 drm_dbg(&vm->xe->drm,
1080 "VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
1081 vm->usm.asid, ERR_PTR(err));
1082
1083 /*
1084 * In the devmem-only case, mixed mappings may
1085 * be found. The get_pages function will fix
1086 * these up to a single location, allowing the
1087 * page fault handler to make forward progress.
1088 */
1089 if (ctx.devmem_only)
1090 goto get_pages;
1091 else
1092 goto retry;
1093 } else {
1094 drm_err(&vm->xe->drm,
1095 "VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
1096 vm->usm.asid, ERR_PTR(err));
1097 return err;
1098 }
1099 }
1100 }
1101
1102 get_pages:
1103 get_pages_start = xe_svm_stats_ktime_get();
1104
1105 range_debug(range, "GET PAGES");
1106 err = xe_svm_range_get_pages(vm, range, &ctx);
1107 /* Corner where CPU mappings have changed */
1108 if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
1109 ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
1110 if (migrate_try_count > 0 || !ctx.devmem_only) {
1111 drm_dbg(&vm->xe->drm,
1112 "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
1113 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1114 range_debug(range, "PAGE FAULT - RETRY PAGES");
1115 goto retry;
1116 } else {
1117 drm_err(&vm->xe->drm,
1118 "Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
1119 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1120 }
1121 }
1122 if (err) {
1123 range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
1124 goto out;
1125 }
1126
1127 xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
1128 range_debug(range, "PAGE FAULT - BIND");
1129
1130 bind_start = xe_svm_stats_ktime_get();
1131 xe_validation_guard(&vctx, &vm->xe->val, &exec, (struct xe_val_flags) {}, err) {
1132 err = xe_vm_drm_exec_lock(vm, &exec);
1133 drm_exec_retry_on_contention(&exec);
1134
1135 xe_vm_set_validation_exec(vm, &exec);
1136 fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
1137 xe_vm_set_validation_exec(vm, NULL);
1138 if (IS_ERR(fence)) {
1139 drm_exec_retry_on_contention(&exec);
1140 err = PTR_ERR(fence);
1141 xe_validation_retry_on_oom(&vctx, &err);
1142 xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1143 break;
1144 }
1145 }
1146 if (err)
1147 goto err_out;
1148
1149 dma_fence_wait(fence, false);
1150 dma_fence_put(fence);
1151 xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1152
1153 out:
1154 xe_svm_range_fault_us_stats_incr(gt, range, start);
1155 return 0;
1156
1157 err_out:
1158 if (err == -EAGAIN) {
1159 ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
1160 range_debug(range, "PAGE FAULT - RETRY BIND");
1161 goto retry;
1162 }
1163
1164 return err;
1165 }
1166
1167 /**
1168 * xe_svm_handle_pagefault() - SVM handle page fault
1169 * @vm: The VM.
1170 * @vma: The CPU address mirror VMA.
1171 * @gt: The gt upon the fault occurred.
1172 * @fault_addr: The GPU fault address.
1173 * @atomic: The fault atomic access bit.
1174 *
1175 * Create GPU bindings for a SVM page fault. Optionally migrate to device
1176 * memory.
1177 *
1178 * Return: 0 on success, negative error code on error.
1179 */
xe_svm_handle_pagefault(struct xe_vm * vm,struct xe_vma * vma,struct xe_gt * gt,u64 fault_addr,bool atomic)1180 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
1181 struct xe_gt *gt, u64 fault_addr,
1182 bool atomic)
1183 {
1184 int need_vram, ret;
1185 retry:
1186 need_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
1187 if (need_vram < 0)
1188 return need_vram;
1189
1190 ret = __xe_svm_handle_pagefault(vm, vma, gt, fault_addr,
1191 need_vram ? true : false);
1192 if (ret == -EAGAIN) {
1193 /*
1194 * Retry once on -EAGAIN to re-lookup the VMA, as the original VMA
1195 * may have been split by xe_svm_range_set_default_attr.
1196 */
1197 vma = xe_vm_find_vma_by_addr(vm, fault_addr);
1198 if (!vma)
1199 return -EINVAL;
1200
1201 goto retry;
1202 }
1203 return ret;
1204 }
1205
1206 /**
1207 * xe_svm_has_mapping() - SVM has mappings
1208 * @vm: The VM.
1209 * @start: Start address.
1210 * @end: End address.
1211 *
1212 * Check if an address range has SVM mappings.
1213 *
1214 * Return: True if address range has a SVM mapping, False otherwise
1215 */
xe_svm_has_mapping(struct xe_vm * vm,u64 start,u64 end)1216 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
1217 {
1218 return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
1219 }
1220
1221 /**
1222 * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
1223 * @vm: The VM
1224 * @start: start addr
1225 * @end: end addr
1226 *
1227 * This function UNMAPS svm ranges if start or end address are inside them.
1228 */
xe_svm_unmap_address_range(struct xe_vm * vm,u64 start,u64 end)1229 void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
1230 {
1231 struct drm_gpusvm_notifier *notifier, *next;
1232
1233 lockdep_assert_held_write(&vm->lock);
1234
1235 drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
1236 struct drm_gpusvm_range *range, *__next;
1237
1238 drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
1239 if (start > drm_gpusvm_range_start(range) ||
1240 end < drm_gpusvm_range_end(range)) {
1241 if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
1242 drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
1243 drm_gpusvm_range_get(range);
1244 __xe_svm_garbage_collector(vm, to_xe_range(range));
1245 if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
1246 spin_lock(&vm->svm.garbage_collector.lock);
1247 list_del(&to_xe_range(range)->garbage_collector_link);
1248 spin_unlock(&vm->svm.garbage_collector.lock);
1249 }
1250 drm_gpusvm_range_put(range);
1251 }
1252 }
1253 }
1254 }
1255
1256 /**
1257 * xe_svm_bo_evict() - SVM evict BO to system memory
1258 * @bo: BO to evict
1259 *
1260 * SVM evict BO to system memory. GPU SVM layer ensures all device pages
1261 * are evicted before returning.
1262 *
1263 * Return: 0 on success standard error code otherwise
1264 */
xe_svm_bo_evict(struct xe_bo * bo)1265 int xe_svm_bo_evict(struct xe_bo *bo)
1266 {
1267 return drm_pagemap_evict_to_ram(&bo->devmem_allocation);
1268 }
1269
1270 /**
1271 * xe_svm_range_find_or_insert- Find or insert GPU SVM range
1272 * @vm: xe_vm pointer
1273 * @addr: address for which range needs to be found/inserted
1274 * @vma: Pointer to struct xe_vma which mirrors CPU
1275 * @ctx: GPU SVM context
1276 *
1277 * This function finds or inserts a newly allocated a SVM range based on the
1278 * address.
1279 *
1280 * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
1281 */
xe_svm_range_find_or_insert(struct xe_vm * vm,u64 addr,struct xe_vma * vma,struct drm_gpusvm_ctx * ctx)1282 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
1283 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
1284 {
1285 struct drm_gpusvm_range *r;
1286
1287 r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
1288 xe_vma_start(vma), xe_vma_end(vma), ctx);
1289 if (IS_ERR(r))
1290 return ERR_CAST(r);
1291
1292 return to_xe_range(r);
1293 }
1294
1295 /**
1296 * xe_svm_range_get_pages() - Get pages for a SVM range
1297 * @vm: Pointer to the struct xe_vm
1298 * @range: Pointer to the xe SVM range structure
1299 * @ctx: GPU SVM context
1300 *
1301 * This function gets pages for a SVM range and ensures they are mapped for
1302 * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
1303 *
1304 * Return: 0 on success, negative error code on failure.
1305 */
xe_svm_range_get_pages(struct xe_vm * vm,struct xe_svm_range * range,struct drm_gpusvm_ctx * ctx)1306 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
1307 struct drm_gpusvm_ctx *ctx)
1308 {
1309 int err = 0;
1310
1311 err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
1312 if (err == -EOPNOTSUPP) {
1313 range_debug(range, "PAGE FAULT - EVICT PAGES");
1314 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
1315 }
1316
1317 return err;
1318 }
1319
1320 /**
1321 * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
1322 * @vm: Pointer to the xe_vm structure
1323 * @start: Start of the input range
1324 * @end: End of the input range
1325 *
1326 * This function removes the page table entries (PTEs) associated
1327 * with the svm ranges within the given input start and end
1328 *
1329 * Return: tile_mask for which gt's need to be tlb invalidated.
1330 */
xe_svm_ranges_zap_ptes_in_range(struct xe_vm * vm,u64 start,u64 end)1331 u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
1332 {
1333 struct drm_gpusvm_notifier *notifier;
1334 struct xe_svm_range *range;
1335 u64 adj_start, adj_end;
1336 struct xe_tile *tile;
1337 u8 tile_mask = 0;
1338 u8 id;
1339
1340 lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
1341 lockdep_is_held_type(&vm->lock, 0));
1342
1343 drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
1344 struct drm_gpusvm_range *r = NULL;
1345
1346 adj_start = max(start, drm_gpusvm_notifier_start(notifier));
1347 adj_end = min(end, drm_gpusvm_notifier_end(notifier));
1348 drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
1349 range = to_xe_range(r);
1350 for_each_tile(tile, vm->xe, id) {
1351 if (xe_pt_zap_ptes_range(tile, vm, range)) {
1352 tile_mask |= BIT(id);
1353 /*
1354 * WRITE_ONCE pairs with READ_ONCE in
1355 * xe_vm_has_valid_gpu_mapping().
1356 * Must not fail after setting
1357 * tile_invalidated and before
1358 * TLB invalidation.
1359 */
1360 WRITE_ONCE(range->tile_invalidated,
1361 range->tile_invalidated | BIT(id));
1362 }
1363 }
1364 }
1365 }
1366
1367 return tile_mask;
1368 }
1369
1370 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
1371
tile_local_pagemap(struct xe_tile * tile)1372 static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile)
1373 {
1374 return &tile->mem.vram->dpagemap;
1375 }
1376
1377 /**
1378 * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
1379 * @vma: Pointer to the xe_vma structure containing memory attributes
1380 * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
1381 *
1382 * This function determines the correct DRM pagemap to use for a given VMA.
1383 * It first checks if a valid devmem_fd is provided in the VMA's preferred
1384 * location. If the devmem_fd is negative, it returns NULL, indicating no
1385 * pagemap is available and smem to be used as preferred location.
1386 * If the devmem_fd is equal to the default faulting
1387 * GT identifier, it returns the VRAM pagemap associated with the tile.
1388 *
1389 * Future support for multi-device configurations may use drm_pagemap_from_fd()
1390 * to resolve pagemaps from arbitrary file descriptors.
1391 *
1392 * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
1393 */
xe_vma_resolve_pagemap(struct xe_vma * vma,struct xe_tile * tile)1394 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1395 {
1396 s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
1397
1398 if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
1399 return NULL;
1400
1401 if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
1402 return IS_DGFX(tile_to_xe(tile)) ? tile_local_pagemap(tile) : NULL;
1403
1404 /* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
1405 return NULL;
1406 }
1407
1408 /**
1409 * xe_svm_alloc_vram()- Allocate device memory pages for range,
1410 * migrating existing data.
1411 * @tile: tile to allocate vram from
1412 * @range: SVM range
1413 * @ctx: DRM GPU SVM context
1414 *
1415 * Return: 0 on success, error code on failure.
1416 */
xe_svm_alloc_vram(struct xe_tile * tile,struct xe_svm_range * range,const struct drm_gpusvm_ctx * ctx)1417 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
1418 const struct drm_gpusvm_ctx *ctx)
1419 {
1420 struct drm_pagemap *dpagemap;
1421
1422 xe_assert(tile_to_xe(tile), range->base.pages.flags.migrate_devmem);
1423 range_debug(range, "ALLOCATE VRAM");
1424
1425 dpagemap = tile_local_pagemap(tile);
1426 return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
1427 xe_svm_range_end(range),
1428 range->base.gpusvm->mm,
1429 ctx->timeslice_ms);
1430 }
1431
1432 static struct drm_pagemap_addr
xe_drm_pagemap_device_map(struct drm_pagemap * dpagemap,struct device * dev,struct page * page,unsigned int order,enum dma_data_direction dir)1433 xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
1434 struct device *dev,
1435 struct page *page,
1436 unsigned int order,
1437 enum dma_data_direction dir)
1438 {
1439 struct device *pgmap_dev = dpagemap->dev;
1440 enum drm_interconnect_protocol prot;
1441 dma_addr_t addr;
1442
1443 if (pgmap_dev == dev) {
1444 addr = xe_vram_region_page_to_dpa(page_to_vr(page), page);
1445 prot = XE_INTERCONNECT_VRAM;
1446 } else {
1447 addr = DMA_MAPPING_ERROR;
1448 prot = 0;
1449 }
1450
1451 return drm_pagemap_addr_encode(addr, prot, order, dir);
1452 }
1453
1454 static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
1455 .device_map = xe_drm_pagemap_device_map,
1456 .populate_mm = xe_drm_pagemap_populate_mm,
1457 };
1458
1459 /**
1460 * xe_devm_add: Remap and provide memmap backing for device memory
1461 * @tile: tile that the memory region belongs to
1462 * @vr: vram memory region to remap
1463 *
1464 * This remap device memory to host physical address space and create
1465 * struct page to back device memory
1466 *
1467 * Return: 0 on success standard error code otherwise
1468 */
xe_devm_add(struct xe_tile * tile,struct xe_vram_region * vr)1469 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1470 {
1471 struct xe_device *xe = tile_to_xe(tile);
1472 struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
1473 struct resource *res;
1474 void *addr;
1475 int ret;
1476
1477 res = devm_request_free_mem_region(dev, &iomem_resource,
1478 vr->usable_size);
1479 if (IS_ERR(res)) {
1480 ret = PTR_ERR(res);
1481 return ret;
1482 }
1483
1484 vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
1485 vr->pagemap.range.start = res->start;
1486 vr->pagemap.range.end = res->end;
1487 vr->pagemap.nr_range = 1;
1488 vr->pagemap.ops = drm_pagemap_pagemap_ops_get();
1489 vr->pagemap.owner = xe_svm_devm_owner(xe);
1490 addr = devm_memremap_pages(dev, &vr->pagemap);
1491
1492 vr->dpagemap.dev = dev;
1493 vr->dpagemap.ops = &xe_drm_pagemap_ops;
1494
1495 if (IS_ERR(addr)) {
1496 devm_release_mem_region(dev, res->start, resource_size(res));
1497 ret = PTR_ERR(addr);
1498 drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
1499 tile->id, ERR_PTR(ret));
1500 return ret;
1501 }
1502 vr->hpa_base = res->start;
1503
1504 drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
1505 tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
1506 return 0;
1507 }
1508 #else
xe_svm_alloc_vram(struct xe_tile * tile,struct xe_svm_range * range,const struct drm_gpusvm_ctx * ctx)1509 int xe_svm_alloc_vram(struct xe_tile *tile,
1510 struct xe_svm_range *range,
1511 const struct drm_gpusvm_ctx *ctx)
1512 {
1513 return -EOPNOTSUPP;
1514 }
1515
xe_devm_add(struct xe_tile * tile,struct xe_vram_region * vr)1516 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1517 {
1518 return 0;
1519 }
1520
xe_vma_resolve_pagemap(struct xe_vma * vma,struct xe_tile * tile)1521 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1522 {
1523 return NULL;
1524 }
1525 #endif
1526
1527 /**
1528 * xe_svm_flush() - SVM flush
1529 * @vm: The VM.
1530 *
1531 * Flush all SVM actions.
1532 */
xe_svm_flush(struct xe_vm * vm)1533 void xe_svm_flush(struct xe_vm *vm)
1534 {
1535 if (xe_vm_in_fault_mode(vm))
1536 flush_work(&vm->svm.garbage_collector.work);
1537 }
1538