1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2024 Intel Corporation
4 */
5
6 #include <drm/drm_drv.h>
7
8 #include "xe_bo.h"
9 #include "xe_exec_queue_types.h"
10 #include "xe_gt_stats.h"
11 #include "xe_migrate.h"
12 #include "xe_module.h"
13 #include "xe_pm.h"
14 #include "xe_pt.h"
15 #include "xe_svm.h"
16 #include "xe_tile.h"
17 #include "xe_ttm_vram_mgr.h"
18 #include "xe_vm.h"
19 #include "xe_vm_types.h"
20 #include "xe_vram_types.h"
21
xe_svm_range_in_vram(struct xe_svm_range * range)22 static bool xe_svm_range_in_vram(struct xe_svm_range *range)
23 {
24 /*
25 * Advisory only check whether the range is currently backed by VRAM
26 * memory.
27 */
28
29 struct drm_gpusvm_pages_flags flags = {
30 /* Pairs with WRITE_ONCE in drm_gpusvm.c */
31 .__flags = READ_ONCE(range->base.pages.flags.__flags),
32 };
33
34 return flags.has_devmem_pages;
35 }
36
xe_svm_range_has_vram_binding(struct xe_svm_range * range)37 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
38 {
39 /* Not reliable without notifier lock */
40 return xe_svm_range_in_vram(range) && range->tile_present;
41 }
42
gpusvm_to_vm(struct drm_gpusvm * gpusvm)43 static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
44 {
45 return container_of(gpusvm, struct xe_vm, svm.gpusvm);
46 }
47
range_to_vm(struct drm_gpusvm_range * r)48 static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
49 {
50 return gpusvm_to_vm(r->gpusvm);
51 }
52
53 #define range_debug(r__, operation__) \
54 vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
55 "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
56 "start=0x%014lx, end=0x%014lx, size=%lu", \
57 (operation__), range_to_vm(&(r__)->base)->usm.asid, \
58 (r__)->base.gpusvm, \
59 xe_svm_range_in_vram((r__)) ? 1 : 0, \
60 xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \
61 (r__)->base.pages.notifier_seq, \
62 xe_svm_range_start((r__)), xe_svm_range_end((r__)), \
63 xe_svm_range_size((r__)))
64
xe_svm_range_debug(struct xe_svm_range * range,const char * operation)65 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
66 {
67 range_debug(range, operation);
68 }
69
70 static struct drm_gpusvm_range *
xe_svm_range_alloc(struct drm_gpusvm * gpusvm)71 xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
72 {
73 struct xe_svm_range *range;
74
75 range = kzalloc(sizeof(*range), GFP_KERNEL);
76 if (!range)
77 return NULL;
78
79 INIT_LIST_HEAD(&range->garbage_collector_link);
80 xe_vm_get(gpusvm_to_vm(gpusvm));
81
82 return &range->base;
83 }
84
xe_svm_range_free(struct drm_gpusvm_range * range)85 static void xe_svm_range_free(struct drm_gpusvm_range *range)
86 {
87 xe_vm_put(range_to_vm(range));
88 kfree(range);
89 }
90
91 static void
xe_svm_garbage_collector_add_range(struct xe_vm * vm,struct xe_svm_range * range,const struct mmu_notifier_range * mmu_range)92 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
93 const struct mmu_notifier_range *mmu_range)
94 {
95 struct xe_device *xe = vm->xe;
96
97 range_debug(range, "GARBAGE COLLECTOR ADD");
98
99 drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
100
101 spin_lock(&vm->svm.garbage_collector.lock);
102 if (list_empty(&range->garbage_collector_link))
103 list_add_tail(&range->garbage_collector_link,
104 &vm->svm.garbage_collector.range_list);
105 spin_unlock(&vm->svm.garbage_collector.lock);
106
107 queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq,
108 &vm->svm.garbage_collector.work);
109 }
110
xe_svm_tlb_inval_count_stats_incr(struct xe_gt * gt)111 static void xe_svm_tlb_inval_count_stats_incr(struct xe_gt *gt)
112 {
113 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT, 1);
114 }
115
116 static u8
xe_svm_range_notifier_event_begin(struct xe_vm * vm,struct drm_gpusvm_range * r,const struct mmu_notifier_range * mmu_range,u64 * adj_start,u64 * adj_end)117 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
118 const struct mmu_notifier_range *mmu_range,
119 u64 *adj_start, u64 *adj_end)
120 {
121 struct xe_svm_range *range = to_xe_range(r);
122 struct xe_device *xe = vm->xe;
123 struct xe_tile *tile;
124 u8 tile_mask = 0;
125 u8 id;
126
127 xe_svm_assert_in_notifier(vm);
128
129 range_debug(range, "NOTIFIER");
130
131 /* Skip if already unmapped or if no binding exist */
132 if (range->base.pages.flags.unmapped || !range->tile_present)
133 return 0;
134
135 range_debug(range, "NOTIFIER - EXECUTE");
136
137 /* Adjust invalidation to range boundaries */
138 *adj_start = min(xe_svm_range_start(range), mmu_range->start);
139 *adj_end = max(xe_svm_range_end(range), mmu_range->end);
140
141 /*
142 * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the
143 * invalidation code can't correctly cope with sparse ranges or
144 * invalidations spanning multiple ranges.
145 */
146 for_each_tile(tile, xe, id)
147 if (xe_pt_zap_ptes_range(tile, vm, range)) {
148 /*
149 * WRITE_ONCE pairs with READ_ONCE in
150 * xe_vm_has_valid_gpu_mapping()
151 */
152 WRITE_ONCE(range->tile_invalidated,
153 range->tile_invalidated | BIT(id));
154
155 if (!(tile_mask & BIT(id))) {
156 xe_svm_tlb_inval_count_stats_incr(tile->primary_gt);
157 if (tile->media_gt)
158 xe_svm_tlb_inval_count_stats_incr(tile->media_gt);
159 tile_mask |= BIT(id);
160 }
161 }
162
163 return tile_mask;
164 }
165
166 static void
xe_svm_range_notifier_event_end(struct xe_vm * vm,struct drm_gpusvm_range * r,const struct mmu_notifier_range * mmu_range)167 xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
168 const struct mmu_notifier_range *mmu_range)
169 {
170 struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
171
172 xe_svm_assert_in_notifier(vm);
173
174 drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
175 if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
176 xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
177 mmu_range);
178 }
179
xe_svm_stats_ktime_us_delta(ktime_t start)180 static s64 xe_svm_stats_ktime_us_delta(ktime_t start)
181 {
182 return IS_ENABLED(CONFIG_DEBUG_FS) ?
183 ktime_us_delta(ktime_get(), start) : 0;
184 }
185
xe_svm_tlb_inval_us_stats_incr(struct xe_gt * gt,ktime_t start)186 static void xe_svm_tlb_inval_us_stats_incr(struct xe_gt *gt, ktime_t start)
187 {
188 s64 us_delta = xe_svm_stats_ktime_us_delta(start);
189
190 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_US, us_delta);
191 }
192
xe_svm_stats_ktime_get(void)193 static ktime_t xe_svm_stats_ktime_get(void)
194 {
195 return IS_ENABLED(CONFIG_DEBUG_FS) ? ktime_get() : 0;
196 }
197
xe_svm_invalidate(struct drm_gpusvm * gpusvm,struct drm_gpusvm_notifier * notifier,const struct mmu_notifier_range * mmu_range)198 static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
199 struct drm_gpusvm_notifier *notifier,
200 const struct mmu_notifier_range *mmu_range)
201 {
202 struct xe_vm *vm = gpusvm_to_vm(gpusvm);
203 struct xe_device *xe = vm->xe;
204 struct drm_gpusvm_range *r, *first;
205 struct xe_tile *tile;
206 ktime_t start = xe_svm_stats_ktime_get();
207 u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
208 u8 tile_mask = 0, id;
209 long err;
210
211 xe_svm_assert_in_notifier(vm);
212
213 vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
214 "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
215 vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
216 mmu_range->start, mmu_range->end, mmu_range->event);
217
218 /* Adjust invalidation to notifier boundaries */
219 adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
220 adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
221
222 first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
223 if (!first)
224 return;
225
226 /*
227 * PTs may be getting destroyed so not safe to touch these but PT should
228 * be invalidated at this point in time. Regardless we still need to
229 * ensure any dma mappings are unmapped in the here.
230 */
231 if (xe_vm_is_closed(vm))
232 goto range_notifier_event_end;
233
234 /*
235 * XXX: Less than ideal to always wait on VM's resv slots if an
236 * invalidation is not required. Could walk range list twice to figure
237 * out if an invalidations is need, but also not ideal.
238 */
239 err = dma_resv_wait_timeout(xe_vm_resv(vm),
240 DMA_RESV_USAGE_BOOKKEEP,
241 false, MAX_SCHEDULE_TIMEOUT);
242 XE_WARN_ON(err <= 0);
243
244 r = first;
245 drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
246 tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range,
247 &adj_start,
248 &adj_end);
249 if (!tile_mask)
250 goto range_notifier_event_end;
251
252 xe_device_wmb(xe);
253
254 err = xe_vm_range_tilemask_tlb_inval(vm, adj_start, adj_end, tile_mask);
255 WARN_ON_ONCE(err);
256
257 range_notifier_event_end:
258 r = first;
259 drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
260 xe_svm_range_notifier_event_end(vm, r, mmu_range);
261 for_each_tile(tile, xe, id) {
262 if (tile_mask & BIT(id)) {
263 xe_svm_tlb_inval_us_stats_incr(tile->primary_gt, start);
264 if (tile->media_gt)
265 xe_svm_tlb_inval_us_stats_incr(tile->media_gt, start);
266 }
267 }
268 }
269
__xe_svm_garbage_collector(struct xe_vm * vm,struct xe_svm_range * range)270 static int __xe_svm_garbage_collector(struct xe_vm *vm,
271 struct xe_svm_range *range)
272 {
273 struct dma_fence *fence;
274
275 range_debug(range, "GARBAGE COLLECTOR");
276
277 xe_vm_lock(vm, false);
278 fence = xe_vm_range_unbind(vm, range);
279 xe_vm_unlock(vm);
280 if (IS_ERR(fence))
281 return PTR_ERR(fence);
282 dma_fence_put(fence);
283
284 drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
285
286 return 0;
287 }
288
xe_svm_range_set_default_attr(struct xe_vm * vm,u64 range_start,u64 range_end)289 static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 range_end)
290 {
291 struct xe_vma *vma;
292 struct xe_vma_mem_attr default_attr = {
293 .preferred_loc = {
294 .devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
295 .migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
296 },
297 .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
298 };
299 int err = 0;
300
301 vma = xe_vm_find_vma_by_addr(vm, range_start);
302 if (!vma)
303 return -EINVAL;
304
305 if (xe_vma_has_default_mem_attrs(vma))
306 return 0;
307
308 vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx",
309 xe_vma_start(vma), xe_vma_end(vma));
310
311 if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
312 default_attr.pat_index = vma->attr.default_pat_index;
313 default_attr.default_pat_index = vma->attr.default_pat_index;
314 vma->attr = default_attr;
315 } else {
316 vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx",
317 range_start, range_end);
318 err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start);
319 if (err) {
320 drm_warn(&vm->xe->drm, "VMA SPLIT failed: %pe\n", ERR_PTR(err));
321 xe_vm_kill(vm, true);
322 return err;
323 }
324 }
325
326 /*
327 * On call from xe_svm_handle_pagefault original VMA might be changed
328 * signal this to lookup for VMA again.
329 */
330 return -EAGAIN;
331 }
332
xe_svm_garbage_collector(struct xe_vm * vm)333 static int xe_svm_garbage_collector(struct xe_vm *vm)
334 {
335 struct xe_svm_range *range;
336 u64 range_start;
337 u64 range_end;
338 int err, ret = 0;
339
340 lockdep_assert_held_write(&vm->lock);
341
342 if (xe_vm_is_closed_or_banned(vm))
343 return -ENOENT;
344
345 for (;;) {
346 spin_lock(&vm->svm.garbage_collector.lock);
347 range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
348 typeof(*range),
349 garbage_collector_link);
350 if (!range)
351 break;
352
353 range_start = xe_svm_range_start(range);
354 range_end = xe_svm_range_end(range);
355
356 list_del(&range->garbage_collector_link);
357 spin_unlock(&vm->svm.garbage_collector.lock);
358
359 err = __xe_svm_garbage_collector(vm, range);
360 if (err) {
361 drm_warn(&vm->xe->drm,
362 "Garbage collection failed: %pe\n",
363 ERR_PTR(err));
364 xe_vm_kill(vm, true);
365 return err;
366 }
367
368 err = xe_svm_range_set_default_attr(vm, range_start, range_end);
369 if (err) {
370 if (err == -EAGAIN)
371 ret = -EAGAIN;
372 else
373 return err;
374 }
375 }
376 spin_unlock(&vm->svm.garbage_collector.lock);
377
378 return ret;
379 }
380
xe_svm_garbage_collector_work_func(struct work_struct * w)381 static void xe_svm_garbage_collector_work_func(struct work_struct *w)
382 {
383 struct xe_vm *vm = container_of(w, struct xe_vm,
384 svm.garbage_collector.work);
385
386 down_write(&vm->lock);
387 xe_svm_garbage_collector(vm);
388 up_write(&vm->lock);
389 }
390
391 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
392
page_to_vr(struct page * page)393 static struct xe_vram_region *page_to_vr(struct page *page)
394 {
395 return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
396 }
397
xe_vram_region_page_to_dpa(struct xe_vram_region * vr,struct page * page)398 static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
399 struct page *page)
400 {
401 u64 dpa;
402 u64 pfn = page_to_pfn(page);
403 u64 offset;
404
405 xe_assert(vr->xe, is_device_private_page(page));
406 xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base);
407
408 offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
409 dpa = vr->dpa_base + offset;
410
411 return dpa;
412 }
413
414 enum xe_svm_copy_dir {
415 XE_SVM_COPY_TO_VRAM,
416 XE_SVM_COPY_TO_SRAM,
417 };
418
xe_svm_copy_kb_stats_incr(struct xe_gt * gt,const enum xe_svm_copy_dir dir,int kb)419 static void xe_svm_copy_kb_stats_incr(struct xe_gt *gt,
420 const enum xe_svm_copy_dir dir,
421 int kb)
422 {
423 if (dir == XE_SVM_COPY_TO_VRAM)
424 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_KB, kb);
425 else
426 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_KB, kb);
427 }
428
xe_svm_copy_us_stats_incr(struct xe_gt * gt,const enum xe_svm_copy_dir dir,unsigned long npages,ktime_t start)429 static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
430 const enum xe_svm_copy_dir dir,
431 unsigned long npages,
432 ktime_t start)
433 {
434 s64 us_delta = xe_svm_stats_ktime_us_delta(start);
435
436 if (dir == XE_SVM_COPY_TO_VRAM) {
437 switch (npages) {
438 case 1:
439 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US,
440 us_delta);
441 break;
442 case 16:
443 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US,
444 us_delta);
445 break;
446 case 512:
447 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US,
448 us_delta);
449 break;
450 }
451 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_US,
452 us_delta);
453 } else {
454 switch (npages) {
455 case 1:
456 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_CPU_COPY_US,
457 us_delta);
458 break;
459 case 16:
460 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_CPU_COPY_US,
461 us_delta);
462 break;
463 case 512:
464 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_CPU_COPY_US,
465 us_delta);
466 break;
467 }
468 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_US,
469 us_delta);
470 }
471 }
472
xe_svm_copy(struct page ** pages,struct drm_pagemap_addr * pagemap_addr,unsigned long npages,const enum xe_svm_copy_dir dir)473 static int xe_svm_copy(struct page **pages,
474 struct drm_pagemap_addr *pagemap_addr,
475 unsigned long npages, const enum xe_svm_copy_dir dir)
476 {
477 struct xe_vram_region *vr = NULL;
478 struct xe_gt *gt = NULL;
479 struct xe_device *xe;
480 struct dma_fence *fence = NULL;
481 unsigned long i;
482 #define XE_VRAM_ADDR_INVALID ~0x0ull
483 u64 vram_addr = XE_VRAM_ADDR_INVALID;
484 int err = 0, pos = 0;
485 bool sram = dir == XE_SVM_COPY_TO_SRAM;
486 ktime_t start = xe_svm_stats_ktime_get();
487
488 /*
489 * This flow is complex: it locates physically contiguous device pages,
490 * derives the starting physical address, and performs a single GPU copy
491 * to for every 8M chunk in a DMA address array. Both device pages and
492 * DMA addresses may be sparsely populated. If either is NULL, a copy is
493 * triggered based on the current search state. The last GPU copy is
494 * waited on to ensure all copies are complete.
495 */
496
497 for (i = 0; i < npages; ++i) {
498 struct page *spage = pages[i];
499 struct dma_fence *__fence;
500 u64 __vram_addr;
501 bool match = false, chunk, last;
502
503 #define XE_MIGRATE_CHUNK_SIZE SZ_8M
504 chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
505 last = (i + 1) == npages;
506
507 /* No CPU page and no device pages queue'd to copy */
508 if (!pagemap_addr[i].addr && vram_addr == XE_VRAM_ADDR_INVALID)
509 continue;
510
511 if (!vr && spage) {
512 vr = page_to_vr(spage);
513 gt = xe_migrate_exec_queue(vr->migrate)->gt;
514 xe = vr->xe;
515 }
516 XE_WARN_ON(spage && page_to_vr(spage) != vr);
517
518 /*
519 * CPU page and device page valid, capture physical address on
520 * first device page, check if physical contiguous on subsequent
521 * device pages.
522 */
523 if (pagemap_addr[i].addr && spage) {
524 __vram_addr = xe_vram_region_page_to_dpa(vr, spage);
525 if (vram_addr == XE_VRAM_ADDR_INVALID) {
526 vram_addr = __vram_addr;
527 pos = i;
528 }
529
530 match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
531 /* Expected with contiguous memory */
532 xe_assert(vr->xe, match);
533
534 if (pagemap_addr[i].order) {
535 i += NR_PAGES(pagemap_addr[i].order) - 1;
536 chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
537 last = (i + 1) == npages;
538 }
539 }
540
541 /*
542 * Mismatched physical address, 8M copy chunk, or last page -
543 * trigger a copy.
544 */
545 if (!match || chunk || last) {
546 /*
547 * Extra page for first copy if last page and matching
548 * physical address.
549 */
550 int incr = (match && last) ? 1 : 0;
551
552 if (vram_addr != XE_VRAM_ADDR_INVALID) {
553 xe_svm_copy_kb_stats_incr(gt, dir,
554 (i - pos + incr) *
555 (PAGE_SIZE / SZ_1K));
556 if (sram) {
557 vm_dbg(&xe->drm,
558 "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
559 vram_addr,
560 (u64)pagemap_addr[pos].addr, i - pos + incr);
561 __fence = xe_migrate_from_vram(vr->migrate,
562 i - pos + incr,
563 vram_addr,
564 &pagemap_addr[pos]);
565 } else {
566 vm_dbg(&xe->drm,
567 "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
568 (u64)pagemap_addr[pos].addr, vram_addr,
569 i - pos + incr);
570 __fence = xe_migrate_to_vram(vr->migrate,
571 i - pos + incr,
572 &pagemap_addr[pos],
573 vram_addr);
574 }
575 if (IS_ERR(__fence)) {
576 err = PTR_ERR(__fence);
577 goto err_out;
578 }
579
580 dma_fence_put(fence);
581 fence = __fence;
582 }
583
584 /* Setup physical address of next device page */
585 if (pagemap_addr[i].addr && spage) {
586 vram_addr = __vram_addr;
587 pos = i;
588 } else {
589 vram_addr = XE_VRAM_ADDR_INVALID;
590 }
591
592 /* Extra mismatched device page, copy it */
593 if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
594 xe_svm_copy_kb_stats_incr(gt, dir,
595 (PAGE_SIZE / SZ_1K));
596 if (sram) {
597 vm_dbg(&xe->drm,
598 "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
599 vram_addr, (u64)pagemap_addr[pos].addr, 1);
600 __fence = xe_migrate_from_vram(vr->migrate, 1,
601 vram_addr,
602 &pagemap_addr[pos]);
603 } else {
604 vm_dbg(&xe->drm,
605 "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
606 (u64)pagemap_addr[pos].addr, vram_addr, 1);
607 __fence = xe_migrate_to_vram(vr->migrate, 1,
608 &pagemap_addr[pos],
609 vram_addr);
610 }
611 if (IS_ERR(__fence)) {
612 err = PTR_ERR(__fence);
613 goto err_out;
614 }
615
616 dma_fence_put(fence);
617 fence = __fence;
618 }
619 }
620 }
621
622 err_out:
623 /* Wait for all copies to complete */
624 if (fence) {
625 dma_fence_wait(fence, false);
626 dma_fence_put(fence);
627 }
628
629 /*
630 * XXX: We can't derive the GT here (or anywhere in this functions, but
631 * compute always uses the primary GT so accumlate stats on the likely
632 * GT of the fault.
633 */
634 if (gt)
635 xe_svm_copy_us_stats_incr(gt, dir, npages, start);
636
637 return err;
638 #undef XE_MIGRATE_CHUNK_SIZE
639 #undef XE_VRAM_ADDR_INVALID
640 }
641
xe_svm_copy_to_devmem(struct page ** pages,struct drm_pagemap_addr * pagemap_addr,unsigned long npages)642 static int xe_svm_copy_to_devmem(struct page **pages,
643 struct drm_pagemap_addr *pagemap_addr,
644 unsigned long npages)
645 {
646 return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
647 }
648
xe_svm_copy_to_ram(struct page ** pages,struct drm_pagemap_addr * pagemap_addr,unsigned long npages)649 static int xe_svm_copy_to_ram(struct page **pages,
650 struct drm_pagemap_addr *pagemap_addr,
651 unsigned long npages)
652 {
653 return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
654 }
655
to_xe_bo(struct drm_pagemap_devmem * devmem_allocation)656 static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
657 {
658 return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
659 }
660
xe_svm_devmem_release(struct drm_pagemap_devmem * devmem_allocation)661 static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
662 {
663 struct xe_bo *bo = to_xe_bo(devmem_allocation);
664 struct xe_device *xe = xe_bo_device(bo);
665
666 xe_bo_put_async(bo);
667 xe_pm_runtime_put(xe);
668 }
669
block_offset_to_pfn(struct xe_vram_region * vr,u64 offset)670 static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
671 {
672 return PHYS_PFN(offset + vr->hpa_base);
673 }
674
vram_to_buddy(struct xe_vram_region * vram)675 static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
676 {
677 return &vram->ttm.mm;
678 }
679
xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem * devmem_allocation,unsigned long npages,unsigned long * pfn)680 static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
681 unsigned long npages, unsigned long *pfn)
682 {
683 struct xe_bo *bo = to_xe_bo(devmem_allocation);
684 struct ttm_resource *res = bo->ttm.resource;
685 struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks;
686 struct drm_buddy_block *block;
687 int j = 0;
688
689 list_for_each_entry(block, blocks, link) {
690 struct xe_vram_region *vr = block->private;
691 struct drm_buddy *buddy = vram_to_buddy(vr);
692 u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
693 int i;
694
695 for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
696 pfn[j++] = block_pfn + i;
697 }
698
699 return 0;
700 }
701
702 static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
703 .devmem_release = xe_svm_devmem_release,
704 .populate_devmem_pfn = xe_svm_populate_devmem_pfn,
705 .copy_to_devmem = xe_svm_copy_to_devmem,
706 .copy_to_ram = xe_svm_copy_to_ram,
707 };
708
709 #endif
710
711 static const struct drm_gpusvm_ops gpusvm_ops = {
712 .range_alloc = xe_svm_range_alloc,
713 .range_free = xe_svm_range_free,
714 .invalidate = xe_svm_invalidate,
715 };
716
717 static const unsigned long fault_chunk_sizes[] = {
718 SZ_2M,
719 SZ_64K,
720 SZ_4K,
721 };
722
723 /**
724 * xe_svm_init() - SVM initialize
725 * @vm: The VM.
726 *
727 * Initialize SVM state which is embedded within the VM.
728 *
729 * Return: 0 on success, negative error code on error.
730 */
xe_svm_init(struct xe_vm * vm)731 int xe_svm_init(struct xe_vm *vm)
732 {
733 int err;
734
735 if (vm->flags & XE_VM_FLAG_FAULT_MODE) {
736 spin_lock_init(&vm->svm.garbage_collector.lock);
737 INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
738 INIT_WORK(&vm->svm.garbage_collector.work,
739 xe_svm_garbage_collector_work_func);
740
741 err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
742 current->mm, 0, vm->size,
743 xe_modparam.svm_notifier_size * SZ_1M,
744 &gpusvm_ops, fault_chunk_sizes,
745 ARRAY_SIZE(fault_chunk_sizes));
746 drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
747 } else {
748 err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)",
749 &vm->xe->drm, NULL, 0, 0, 0, NULL,
750 NULL, 0);
751 }
752
753 return err;
754 }
755
756 /**
757 * xe_svm_close() - SVM close
758 * @vm: The VM.
759 *
760 * Close SVM state (i.e., stop and flush all SVM actions).
761 */
xe_svm_close(struct xe_vm * vm)762 void xe_svm_close(struct xe_vm *vm)
763 {
764 xe_assert(vm->xe, xe_vm_is_closed(vm));
765 flush_work(&vm->svm.garbage_collector.work);
766 }
767
768 /**
769 * xe_svm_fini() - SVM finalize
770 * @vm: The VM.
771 *
772 * Finalize SVM state which is embedded within the VM.
773 */
xe_svm_fini(struct xe_vm * vm)774 void xe_svm_fini(struct xe_vm *vm)
775 {
776 xe_assert(vm->xe, xe_vm_is_closed(vm));
777
778 drm_gpusvm_fini(&vm->svm.gpusvm);
779 }
780
xe_svm_range_is_valid(struct xe_svm_range * range,struct xe_tile * tile,bool devmem_only)781 static bool xe_svm_range_is_valid(struct xe_svm_range *range,
782 struct xe_tile *tile,
783 bool devmem_only)
784 {
785 return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
786 range->tile_invalidated) &&
787 (!devmem_only || xe_svm_range_in_vram(range)));
788 }
789
790 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
791 * @vm: xe_vm pointer
792 * @range: Pointer to the SVM range structure
793 *
794 * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
795 * and migrates them to SMEM
796 */
xe_svm_range_migrate_to_smem(struct xe_vm * vm,struct xe_svm_range * range)797 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
798 {
799 if (xe_svm_range_in_vram(range))
800 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
801 }
802
803 /**
804 * xe_svm_range_validate() - Check if the SVM range is valid
805 * @vm: xe_vm pointer
806 * @range: Pointer to the SVM range structure
807 * @tile_mask: Mask representing the tiles to be checked
808 * @devmem_preferred : if true range needs to be in devmem
809 *
810 * The xe_svm_range_validate() function checks if a range is
811 * valid and located in the desired memory region.
812 *
813 * Return: true if the range is valid, false otherwise
814 */
xe_svm_range_validate(struct xe_vm * vm,struct xe_svm_range * range,u8 tile_mask,bool devmem_preferred)815 bool xe_svm_range_validate(struct xe_vm *vm,
816 struct xe_svm_range *range,
817 u8 tile_mask, bool devmem_preferred)
818 {
819 bool ret;
820
821 xe_svm_notifier_lock(vm);
822
823 ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
824 (devmem_preferred == range->base.pages.flags.has_devmem_pages);
825
826 xe_svm_notifier_unlock(vm);
827
828 return ret;
829 }
830
831 /**
832 * xe_svm_find_vma_start - Find start of CPU VMA
833 * @vm: xe_vm pointer
834 * @start: start address
835 * @end: end address
836 * @vma: Pointer to struct xe_vma
837 *
838 *
839 * This function searches for a cpu vma, within the specified
840 * range [start, end] in the given VM. It adjusts the range based on the
841 * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
842 *
843 * Return: The starting address of the VMA within the range,
844 * or ULONG_MAX if no VMA is found
845 */
xe_svm_find_vma_start(struct xe_vm * vm,u64 start,u64 end,struct xe_vma * vma)846 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
847 {
848 return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
849 max(start, xe_vma_start(vma)),
850 min(end, xe_vma_end(vma)));
851 }
852
853 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
xe_drm_pagemap_populate_mm(struct drm_pagemap * dpagemap,unsigned long start,unsigned long end,struct mm_struct * mm,unsigned long timeslice_ms)854 static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
855 unsigned long start, unsigned long end,
856 struct mm_struct *mm,
857 unsigned long timeslice_ms)
858 {
859 struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
860 struct xe_device *xe = vr->xe;
861 struct device *dev = xe->drm.dev;
862 struct drm_buddy_block *block;
863 struct xe_validation_ctx vctx;
864 struct list_head *blocks;
865 struct drm_exec exec;
866 struct xe_bo *bo;
867 int err = 0, idx;
868
869 if (!drm_dev_enter(&xe->drm, &idx))
870 return -ENODEV;
871
872 xe_pm_runtime_get(xe);
873
874 xe_validation_guard(&vctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
875 bo = xe_bo_create_locked(xe, NULL, NULL, end - start,
876 ttm_bo_type_device,
877 (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) |
878 XE_BO_FLAG_CPU_ADDR_MIRROR, &exec);
879 drm_exec_retry_on_contention(&exec);
880 if (IS_ERR(bo)) {
881 err = PTR_ERR(bo);
882 xe_validation_retry_on_oom(&vctx, &err);
883 break;
884 }
885
886 drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
887 &dpagemap_devmem_ops, dpagemap, end - start);
888
889 blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
890 list_for_each_entry(block, blocks, link)
891 block->private = vr;
892
893 xe_bo_get(bo);
894
895 /* Ensure the device has a pm ref while there are device pages active. */
896 xe_pm_runtime_get_noresume(xe);
897 err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
898 start, end, timeslice_ms,
899 xe_svm_devm_owner(xe));
900 if (err)
901 xe_svm_devmem_release(&bo->devmem_allocation);
902 xe_bo_unlock(bo);
903 xe_bo_put(bo);
904 }
905 xe_pm_runtime_put(xe);
906 drm_dev_exit(idx);
907
908 return err;
909 }
910 #endif
911
supports_4K_migration(struct xe_device * xe)912 static bool supports_4K_migration(struct xe_device *xe)
913 {
914 if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
915 return false;
916
917 return true;
918 }
919
920 /**
921 * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
922 * @range: SVM range for which migration needs to be decided
923 * @vma: vma which has range
924 * @preferred_region_is_vram: preferred region for range is vram
925 *
926 * Return: True for range needing migration and migration is supported else false
927 */
xe_svm_range_needs_migrate_to_vram(struct xe_svm_range * range,struct xe_vma * vma,bool preferred_region_is_vram)928 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
929 bool preferred_region_is_vram)
930 {
931 struct xe_vm *vm = range_to_vm(&range->base);
932 u64 range_size = xe_svm_range_size(range);
933
934 if (!range->base.pages.flags.migrate_devmem || !preferred_region_is_vram)
935 return false;
936
937 xe_assert(vm->xe, IS_DGFX(vm->xe));
938
939 if (xe_svm_range_in_vram(range)) {
940 drm_info(&vm->xe->drm, "Range is already in VRAM\n");
941 return false;
942 }
943
944 if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
945 drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
946 return false;
947 }
948
949 return true;
950 }
951
952 #define DECL_SVM_RANGE_COUNT_STATS(elem, stat) \
953 static void xe_svm_range_##elem##_count_stats_incr(struct xe_gt *gt, \
954 struct xe_svm_range *range) \
955 { \
956 switch (xe_svm_range_size(range)) { \
957 case SZ_4K: \
958 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_COUNT, 1); \
959 break; \
960 case SZ_64K: \
961 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_COUNT, 1); \
962 break; \
963 case SZ_2M: \
964 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_COUNT, 1); \
965 break; \
966 } \
967 } \
968
DECL_SVM_RANGE_COUNT_STATS(fault,PAGEFAULT)969 DECL_SVM_RANGE_COUNT_STATS(fault, PAGEFAULT)
970 DECL_SVM_RANGE_COUNT_STATS(valid_fault, VALID_PAGEFAULT)
971 DECL_SVM_RANGE_COUNT_STATS(migrate, MIGRATE)
972
973 #define DECL_SVM_RANGE_US_STATS(elem, stat) \
974 static void xe_svm_range_##elem##_us_stats_incr(struct xe_gt *gt, \
975 struct xe_svm_range *range, \
976 ktime_t start) \
977 { \
978 s64 us_delta = xe_svm_stats_ktime_us_delta(start); \
979 \
980 switch (xe_svm_range_size(range)) { \
981 case SZ_4K: \
982 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_US, \
983 us_delta); \
984 break; \
985 case SZ_64K: \
986 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_US, \
987 us_delta); \
988 break; \
989 case SZ_2M: \
990 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_US, \
991 us_delta); \
992 break; \
993 } \
994 } \
995
996 DECL_SVM_RANGE_US_STATS(migrate, MIGRATE)
997 DECL_SVM_RANGE_US_STATS(get_pages, GET_PAGES)
998 DECL_SVM_RANGE_US_STATS(bind, BIND)
999 DECL_SVM_RANGE_US_STATS(fault, PAGEFAULT)
1000
1001 static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
1002 struct xe_gt *gt, u64 fault_addr,
1003 bool need_vram)
1004 {
1005 int devmem_possible = IS_DGFX(vm->xe) &&
1006 IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
1007 struct drm_gpusvm_ctx ctx = {
1008 .read_only = xe_vma_read_only(vma),
1009 .devmem_possible = devmem_possible,
1010 .check_pages_threshold = devmem_possible ? SZ_64K : 0,
1011 .devmem_only = need_vram && devmem_possible,
1012 .timeslice_ms = need_vram && devmem_possible ?
1013 vm->xe->atomic_svm_timeslice_ms : 0,
1014 .device_private_page_owner = xe_svm_devm_owner(vm->xe),
1015 };
1016 struct xe_validation_ctx vctx;
1017 struct drm_exec exec;
1018 struct xe_svm_range *range;
1019 struct dma_fence *fence;
1020 struct drm_pagemap *dpagemap;
1021 struct xe_tile *tile = gt_to_tile(gt);
1022 int migrate_try_count = ctx.devmem_only ? 3 : 1;
1023 ktime_t start = xe_svm_stats_ktime_get(), bind_start, get_pages_start;
1024 int err;
1025
1026 lockdep_assert_held_write(&vm->lock);
1027 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
1028
1029 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
1030
1031 retry:
1032 /* Always process UNMAPs first so view SVM ranges is current */
1033 err = xe_svm_garbage_collector(vm);
1034 if (err)
1035 return err;
1036
1037 range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
1038
1039 if (IS_ERR(range))
1040 return PTR_ERR(range);
1041
1042 xe_svm_range_fault_count_stats_incr(gt, range);
1043
1044 if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
1045 err = -EACCES;
1046 goto out;
1047 }
1048
1049 if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) {
1050 xe_svm_range_valid_fault_count_stats_incr(gt, range);
1051 range_debug(range, "PAGE FAULT - VALID");
1052 goto out;
1053 }
1054
1055 range_debug(range, "PAGE FAULT");
1056
1057 dpagemap = xe_vma_resolve_pagemap(vma, tile);
1058 if (--migrate_try_count >= 0 &&
1059 xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
1060 ktime_t migrate_start = xe_svm_stats_ktime_get();
1061
1062 /* TODO : For multi-device dpagemap will be used to find the
1063 * remote tile and remote device. Will need to modify
1064 * xe_svm_alloc_vram to use dpagemap for future multi-device
1065 * support.
1066 */
1067 xe_svm_range_migrate_count_stats_incr(gt, range);
1068 err = xe_svm_alloc_vram(tile, range, &ctx);
1069 xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start);
1070 ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
1071 if (err) {
1072 if (migrate_try_count || !ctx.devmem_only) {
1073 drm_dbg(&vm->xe->drm,
1074 "VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
1075 vm->usm.asid, ERR_PTR(err));
1076 goto retry;
1077 } else {
1078 drm_err(&vm->xe->drm,
1079 "VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
1080 vm->usm.asid, ERR_PTR(err));
1081 return err;
1082 }
1083 }
1084 }
1085
1086 get_pages_start = xe_svm_stats_ktime_get();
1087
1088 range_debug(range, "GET PAGES");
1089 err = xe_svm_range_get_pages(vm, range, &ctx);
1090 /* Corner where CPU mappings have changed */
1091 if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
1092 ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
1093 if (migrate_try_count > 0 || !ctx.devmem_only) {
1094 drm_dbg(&vm->xe->drm,
1095 "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
1096 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1097 range_debug(range, "PAGE FAULT - RETRY PAGES");
1098 goto retry;
1099 } else {
1100 drm_err(&vm->xe->drm,
1101 "Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
1102 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1103 }
1104 }
1105 if (err) {
1106 range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
1107 goto out;
1108 }
1109
1110 xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
1111 range_debug(range, "PAGE FAULT - BIND");
1112
1113 bind_start = xe_svm_stats_ktime_get();
1114 xe_validation_guard(&vctx, &vm->xe->val, &exec, (struct xe_val_flags) {}, err) {
1115 err = xe_vm_drm_exec_lock(vm, &exec);
1116 drm_exec_retry_on_contention(&exec);
1117
1118 xe_vm_set_validation_exec(vm, &exec);
1119 fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
1120 xe_vm_set_validation_exec(vm, NULL);
1121 if (IS_ERR(fence)) {
1122 drm_exec_retry_on_contention(&exec);
1123 err = PTR_ERR(fence);
1124 xe_validation_retry_on_oom(&vctx, &err);
1125 xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1126 break;
1127 }
1128 }
1129 if (err)
1130 goto err_out;
1131
1132 dma_fence_wait(fence, false);
1133 dma_fence_put(fence);
1134 xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1135
1136 out:
1137 xe_svm_range_fault_us_stats_incr(gt, range, start);
1138 return 0;
1139
1140 err_out:
1141 if (err == -EAGAIN) {
1142 ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
1143 range_debug(range, "PAGE FAULT - RETRY BIND");
1144 goto retry;
1145 }
1146
1147 return err;
1148 }
1149
1150 /**
1151 * xe_svm_handle_pagefault() - SVM handle page fault
1152 * @vm: The VM.
1153 * @vma: The CPU address mirror VMA.
1154 * @gt: The gt upon the fault occurred.
1155 * @fault_addr: The GPU fault address.
1156 * @atomic: The fault atomic access bit.
1157 *
1158 * Create GPU bindings for a SVM page fault. Optionally migrate to device
1159 * memory.
1160 *
1161 * Return: 0 on success, negative error code on error.
1162 */
xe_svm_handle_pagefault(struct xe_vm * vm,struct xe_vma * vma,struct xe_gt * gt,u64 fault_addr,bool atomic)1163 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
1164 struct xe_gt *gt, u64 fault_addr,
1165 bool atomic)
1166 {
1167 int need_vram, ret;
1168 retry:
1169 need_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
1170 if (need_vram < 0)
1171 return need_vram;
1172
1173 ret = __xe_svm_handle_pagefault(vm, vma, gt, fault_addr,
1174 need_vram ? true : false);
1175 if (ret == -EAGAIN) {
1176 /*
1177 * Retry once on -EAGAIN to re-lookup the VMA, as the original VMA
1178 * may have been split by xe_svm_range_set_default_attr.
1179 */
1180 vma = xe_vm_find_vma_by_addr(vm, fault_addr);
1181 if (!vma)
1182 return -EINVAL;
1183
1184 goto retry;
1185 }
1186 return ret;
1187 }
1188
1189 /**
1190 * xe_svm_has_mapping() - SVM has mappings
1191 * @vm: The VM.
1192 * @start: Start address.
1193 * @end: End address.
1194 *
1195 * Check if an address range has SVM mappings.
1196 *
1197 * Return: True if address range has a SVM mapping, False otherwise
1198 */
xe_svm_has_mapping(struct xe_vm * vm,u64 start,u64 end)1199 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
1200 {
1201 return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
1202 }
1203
1204 /**
1205 * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
1206 * @vm: The VM
1207 * @start: start addr
1208 * @end: end addr
1209 *
1210 * This function UNMAPS svm ranges if start or end address are inside them.
1211 */
xe_svm_unmap_address_range(struct xe_vm * vm,u64 start,u64 end)1212 void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
1213 {
1214 struct drm_gpusvm_notifier *notifier, *next;
1215
1216 lockdep_assert_held_write(&vm->lock);
1217
1218 drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
1219 struct drm_gpusvm_range *range, *__next;
1220
1221 drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
1222 if (start > drm_gpusvm_range_start(range) ||
1223 end < drm_gpusvm_range_end(range)) {
1224 if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
1225 drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
1226 drm_gpusvm_range_get(range);
1227 __xe_svm_garbage_collector(vm, to_xe_range(range));
1228 if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
1229 spin_lock(&vm->svm.garbage_collector.lock);
1230 list_del(&to_xe_range(range)->garbage_collector_link);
1231 spin_unlock(&vm->svm.garbage_collector.lock);
1232 }
1233 drm_gpusvm_range_put(range);
1234 }
1235 }
1236 }
1237 }
1238
1239 /**
1240 * xe_svm_bo_evict() - SVM evict BO to system memory
1241 * @bo: BO to evict
1242 *
1243 * SVM evict BO to system memory. GPU SVM layer ensures all device pages
1244 * are evicted before returning.
1245 *
1246 * Return: 0 on success standard error code otherwise
1247 */
xe_svm_bo_evict(struct xe_bo * bo)1248 int xe_svm_bo_evict(struct xe_bo *bo)
1249 {
1250 return drm_pagemap_evict_to_ram(&bo->devmem_allocation);
1251 }
1252
1253 /**
1254 * xe_svm_range_find_or_insert- Find or insert GPU SVM range
1255 * @vm: xe_vm pointer
1256 * @addr: address for which range needs to be found/inserted
1257 * @vma: Pointer to struct xe_vma which mirrors CPU
1258 * @ctx: GPU SVM context
1259 *
1260 * This function finds or inserts a newly allocated a SVM range based on the
1261 * address.
1262 *
1263 * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
1264 */
xe_svm_range_find_or_insert(struct xe_vm * vm,u64 addr,struct xe_vma * vma,struct drm_gpusvm_ctx * ctx)1265 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
1266 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
1267 {
1268 struct drm_gpusvm_range *r;
1269
1270 r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
1271 xe_vma_start(vma), xe_vma_end(vma), ctx);
1272 if (IS_ERR(r))
1273 return ERR_CAST(r);
1274
1275 return to_xe_range(r);
1276 }
1277
1278 /**
1279 * xe_svm_range_get_pages() - Get pages for a SVM range
1280 * @vm: Pointer to the struct xe_vm
1281 * @range: Pointer to the xe SVM range structure
1282 * @ctx: GPU SVM context
1283 *
1284 * This function gets pages for a SVM range and ensures they are mapped for
1285 * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
1286 *
1287 * Return: 0 on success, negative error code on failure.
1288 */
xe_svm_range_get_pages(struct xe_vm * vm,struct xe_svm_range * range,struct drm_gpusvm_ctx * ctx)1289 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
1290 struct drm_gpusvm_ctx *ctx)
1291 {
1292 int err = 0;
1293
1294 err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
1295 if (err == -EOPNOTSUPP) {
1296 range_debug(range, "PAGE FAULT - EVICT PAGES");
1297 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
1298 }
1299
1300 return err;
1301 }
1302
1303 /**
1304 * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
1305 * @vm: Pointer to the xe_vm structure
1306 * @start: Start of the input range
1307 * @end: End of the input range
1308 *
1309 * This function removes the page table entries (PTEs) associated
1310 * with the svm ranges within the given input start and end
1311 *
1312 * Return: tile_mask for which gt's need to be tlb invalidated.
1313 */
xe_svm_ranges_zap_ptes_in_range(struct xe_vm * vm,u64 start,u64 end)1314 u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
1315 {
1316 struct drm_gpusvm_notifier *notifier;
1317 struct xe_svm_range *range;
1318 u64 adj_start, adj_end;
1319 struct xe_tile *tile;
1320 u8 tile_mask = 0;
1321 u8 id;
1322
1323 lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
1324 lockdep_is_held_type(&vm->lock, 0));
1325
1326 drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
1327 struct drm_gpusvm_range *r = NULL;
1328
1329 adj_start = max(start, drm_gpusvm_notifier_start(notifier));
1330 adj_end = min(end, drm_gpusvm_notifier_end(notifier));
1331 drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
1332 range = to_xe_range(r);
1333 for_each_tile(tile, vm->xe, id) {
1334 if (xe_pt_zap_ptes_range(tile, vm, range)) {
1335 tile_mask |= BIT(id);
1336 /*
1337 * WRITE_ONCE pairs with READ_ONCE in
1338 * xe_vm_has_valid_gpu_mapping().
1339 * Must not fail after setting
1340 * tile_invalidated and before
1341 * TLB invalidation.
1342 */
1343 WRITE_ONCE(range->tile_invalidated,
1344 range->tile_invalidated | BIT(id));
1345 }
1346 }
1347 }
1348 }
1349
1350 return tile_mask;
1351 }
1352
1353 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
1354
tile_local_pagemap(struct xe_tile * tile)1355 static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile)
1356 {
1357 return &tile->mem.vram->dpagemap;
1358 }
1359
1360 /**
1361 * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
1362 * @vma: Pointer to the xe_vma structure containing memory attributes
1363 * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
1364 *
1365 * This function determines the correct DRM pagemap to use for a given VMA.
1366 * It first checks if a valid devmem_fd is provided in the VMA's preferred
1367 * location. If the devmem_fd is negative, it returns NULL, indicating no
1368 * pagemap is available and smem to be used as preferred location.
1369 * If the devmem_fd is equal to the default faulting
1370 * GT identifier, it returns the VRAM pagemap associated with the tile.
1371 *
1372 * Future support for multi-device configurations may use drm_pagemap_from_fd()
1373 * to resolve pagemaps from arbitrary file descriptors.
1374 *
1375 * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
1376 */
xe_vma_resolve_pagemap(struct xe_vma * vma,struct xe_tile * tile)1377 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1378 {
1379 s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
1380
1381 if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
1382 return NULL;
1383
1384 if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
1385 return IS_DGFX(tile_to_xe(tile)) ? tile_local_pagemap(tile) : NULL;
1386
1387 /* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
1388 return NULL;
1389 }
1390
1391 /**
1392 * xe_svm_alloc_vram()- Allocate device memory pages for range,
1393 * migrating existing data.
1394 * @tile: tile to allocate vram from
1395 * @range: SVM range
1396 * @ctx: DRM GPU SVM context
1397 *
1398 * Return: 0 on success, error code on failure.
1399 */
xe_svm_alloc_vram(struct xe_tile * tile,struct xe_svm_range * range,const struct drm_gpusvm_ctx * ctx)1400 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
1401 const struct drm_gpusvm_ctx *ctx)
1402 {
1403 struct drm_pagemap *dpagemap;
1404
1405 xe_assert(tile_to_xe(tile), range->base.pages.flags.migrate_devmem);
1406 range_debug(range, "ALLOCATE VRAM");
1407
1408 dpagemap = tile_local_pagemap(tile);
1409 return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
1410 xe_svm_range_end(range),
1411 range->base.gpusvm->mm,
1412 ctx->timeslice_ms);
1413 }
1414
1415 static struct drm_pagemap_addr
xe_drm_pagemap_device_map(struct drm_pagemap * dpagemap,struct device * dev,struct page * page,unsigned int order,enum dma_data_direction dir)1416 xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
1417 struct device *dev,
1418 struct page *page,
1419 unsigned int order,
1420 enum dma_data_direction dir)
1421 {
1422 struct device *pgmap_dev = dpagemap->dev;
1423 enum drm_interconnect_protocol prot;
1424 dma_addr_t addr;
1425
1426 if (pgmap_dev == dev) {
1427 addr = xe_vram_region_page_to_dpa(page_to_vr(page), page);
1428 prot = XE_INTERCONNECT_VRAM;
1429 } else {
1430 addr = DMA_MAPPING_ERROR;
1431 prot = 0;
1432 }
1433
1434 return drm_pagemap_addr_encode(addr, prot, order, dir);
1435 }
1436
1437 static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
1438 .device_map = xe_drm_pagemap_device_map,
1439 .populate_mm = xe_drm_pagemap_populate_mm,
1440 };
1441
1442 /**
1443 * xe_devm_add: Remap and provide memmap backing for device memory
1444 * @tile: tile that the memory region belongs to
1445 * @vr: vram memory region to remap
1446 *
1447 * This remap device memory to host physical address space and create
1448 * struct page to back device memory
1449 *
1450 * Return: 0 on success standard error code otherwise
1451 */
xe_devm_add(struct xe_tile * tile,struct xe_vram_region * vr)1452 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1453 {
1454 struct xe_device *xe = tile_to_xe(tile);
1455 struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
1456 struct resource *res;
1457 void *addr;
1458 int ret;
1459
1460 res = devm_request_free_mem_region(dev, &iomem_resource,
1461 vr->usable_size);
1462 if (IS_ERR(res)) {
1463 ret = PTR_ERR(res);
1464 return ret;
1465 }
1466
1467 vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
1468 vr->pagemap.range.start = res->start;
1469 vr->pagemap.range.end = res->end;
1470 vr->pagemap.nr_range = 1;
1471 vr->pagemap.ops = drm_pagemap_pagemap_ops_get();
1472 vr->pagemap.owner = xe_svm_devm_owner(xe);
1473 addr = devm_memremap_pages(dev, &vr->pagemap);
1474
1475 vr->dpagemap.dev = dev;
1476 vr->dpagemap.ops = &xe_drm_pagemap_ops;
1477
1478 if (IS_ERR(addr)) {
1479 devm_release_mem_region(dev, res->start, resource_size(res));
1480 ret = PTR_ERR(addr);
1481 drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
1482 tile->id, ERR_PTR(ret));
1483 return ret;
1484 }
1485 vr->hpa_base = res->start;
1486
1487 drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
1488 tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
1489 return 0;
1490 }
1491 #else
xe_svm_alloc_vram(struct xe_tile * tile,struct xe_svm_range * range,const struct drm_gpusvm_ctx * ctx)1492 int xe_svm_alloc_vram(struct xe_tile *tile,
1493 struct xe_svm_range *range,
1494 const struct drm_gpusvm_ctx *ctx)
1495 {
1496 return -EOPNOTSUPP;
1497 }
1498
xe_devm_add(struct xe_tile * tile,struct xe_vram_region * vr)1499 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1500 {
1501 return 0;
1502 }
1503
xe_vma_resolve_pagemap(struct xe_vma * vma,struct xe_tile * tile)1504 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1505 {
1506 return NULL;
1507 }
1508 #endif
1509
1510 /**
1511 * xe_svm_flush() - SVM flush
1512 * @vm: The VM.
1513 *
1514 * Flush all SVM actions.
1515 */
xe_svm_flush(struct xe_vm * vm)1516 void xe_svm_flush(struct xe_vm *vm)
1517 {
1518 if (xe_vm_in_fault_mode(vm))
1519 flush_work(&vm->svm.garbage_collector.work);
1520 }
1521