1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2024 Intel Corporation
4 */
5
6 #include <drm/drm_drv.h>
7
8 #include "xe_bo.h"
9 #include "xe_gt_stats.h"
10 #include "xe_gt_tlb_invalidation.h"
11 #include "xe_migrate.h"
12 #include "xe_module.h"
13 #include "xe_pm.h"
14 #include "xe_pt.h"
15 #include "xe_svm.h"
16 #include "xe_tile.h"
17 #include "xe_ttm_vram_mgr.h"
18 #include "xe_vm.h"
19 #include "xe_vm_types.h"
20
xe_svm_range_in_vram(struct xe_svm_range * range)21 static bool xe_svm_range_in_vram(struct xe_svm_range *range)
22 {
23 /*
24 * Advisory only check whether the range is currently backed by VRAM
25 * memory.
26 */
27
28 struct drm_gpusvm_range_flags flags = {
29 /* Pairs with WRITE_ONCE in drm_gpusvm.c */
30 .__flags = READ_ONCE(range->base.flags.__flags),
31 };
32
33 return flags.has_devmem_pages;
34 }
35
xe_svm_range_has_vram_binding(struct xe_svm_range * range)36 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
37 {
38 /* Not reliable without notifier lock */
39 return xe_svm_range_in_vram(range) && range->tile_present;
40 }
41
gpusvm_to_vm(struct drm_gpusvm * gpusvm)42 static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
43 {
44 return container_of(gpusvm, struct xe_vm, svm.gpusvm);
45 }
46
range_to_vm(struct drm_gpusvm_range * r)47 static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
48 {
49 return gpusvm_to_vm(r->gpusvm);
50 }
51
52 #define range_debug(r__, operaton__) \
53 vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
54 "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
55 "start=0x%014lx, end=0x%014lx, size=%lu", \
56 (operaton__), range_to_vm(&(r__)->base)->usm.asid, \
57 (r__)->base.gpusvm, \
58 xe_svm_range_in_vram((r__)) ? 1 : 0, \
59 xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \
60 (r__)->base.notifier_seq, \
61 xe_svm_range_start((r__)), xe_svm_range_end((r__)), \
62 xe_svm_range_size((r__)))
63
xe_svm_range_debug(struct xe_svm_range * range,const char * operation)64 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
65 {
66 range_debug(range, operation);
67 }
68
xe_svm_devm_owner(struct xe_device * xe)69 static void *xe_svm_devm_owner(struct xe_device *xe)
70 {
71 return xe;
72 }
73
74 static struct drm_gpusvm_range *
xe_svm_range_alloc(struct drm_gpusvm * gpusvm)75 xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
76 {
77 struct xe_svm_range *range;
78
79 range = kzalloc(sizeof(*range), GFP_KERNEL);
80 if (!range)
81 return NULL;
82
83 INIT_LIST_HEAD(&range->garbage_collector_link);
84 xe_vm_get(gpusvm_to_vm(gpusvm));
85
86 return &range->base;
87 }
88
xe_svm_range_free(struct drm_gpusvm_range * range)89 static void xe_svm_range_free(struct drm_gpusvm_range *range)
90 {
91 xe_vm_put(range_to_vm(range));
92 kfree(range);
93 }
94
95 static void
xe_svm_garbage_collector_add_range(struct xe_vm * vm,struct xe_svm_range * range,const struct mmu_notifier_range * mmu_range)96 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
97 const struct mmu_notifier_range *mmu_range)
98 {
99 struct xe_device *xe = vm->xe;
100
101 range_debug(range, "GARBAGE COLLECTOR ADD");
102
103 drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
104
105 spin_lock(&vm->svm.garbage_collector.lock);
106 if (list_empty(&range->garbage_collector_link))
107 list_add_tail(&range->garbage_collector_link,
108 &vm->svm.garbage_collector.range_list);
109 spin_unlock(&vm->svm.garbage_collector.lock);
110
111 queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq,
112 &vm->svm.garbage_collector.work);
113 }
114
115 static u8
xe_svm_range_notifier_event_begin(struct xe_vm * vm,struct drm_gpusvm_range * r,const struct mmu_notifier_range * mmu_range,u64 * adj_start,u64 * adj_end)116 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
117 const struct mmu_notifier_range *mmu_range,
118 u64 *adj_start, u64 *adj_end)
119 {
120 struct xe_svm_range *range = to_xe_range(r);
121 struct xe_device *xe = vm->xe;
122 struct xe_tile *tile;
123 u8 tile_mask = 0;
124 u8 id;
125
126 xe_svm_assert_in_notifier(vm);
127
128 range_debug(range, "NOTIFIER");
129
130 /* Skip if already unmapped or if no binding exist */
131 if (range->base.flags.unmapped || !range->tile_present)
132 return 0;
133
134 range_debug(range, "NOTIFIER - EXECUTE");
135
136 /* Adjust invalidation to range boundaries */
137 *adj_start = min(xe_svm_range_start(range), mmu_range->start);
138 *adj_end = max(xe_svm_range_end(range), mmu_range->end);
139
140 /*
141 * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the
142 * invalidation code can't correctly cope with sparse ranges or
143 * invalidations spanning multiple ranges.
144 */
145 for_each_tile(tile, xe, id)
146 if (xe_pt_zap_ptes_range(tile, vm, range)) {
147 tile_mask |= BIT(id);
148 /*
149 * WRITE_ONCE pairs with READ_ONCE in
150 * xe_vm_has_valid_gpu_mapping()
151 */
152 WRITE_ONCE(range->tile_invalidated,
153 range->tile_invalidated | BIT(id));
154 }
155
156 return tile_mask;
157 }
158
159 static void
xe_svm_range_notifier_event_end(struct xe_vm * vm,struct drm_gpusvm_range * r,const struct mmu_notifier_range * mmu_range)160 xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
161 const struct mmu_notifier_range *mmu_range)
162 {
163 struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
164
165 xe_svm_assert_in_notifier(vm);
166
167 drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
168 if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
169 xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
170 mmu_range);
171 }
172
xe_svm_invalidate(struct drm_gpusvm * gpusvm,struct drm_gpusvm_notifier * notifier,const struct mmu_notifier_range * mmu_range)173 static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
174 struct drm_gpusvm_notifier *notifier,
175 const struct mmu_notifier_range *mmu_range)
176 {
177 struct xe_vm *vm = gpusvm_to_vm(gpusvm);
178 struct xe_device *xe = vm->xe;
179 struct drm_gpusvm_range *r, *first;
180 u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
181 u8 tile_mask = 0;
182 long err;
183
184 xe_svm_assert_in_notifier(vm);
185
186 vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
187 "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
188 vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
189 mmu_range->start, mmu_range->end, mmu_range->event);
190
191 /* Adjust invalidation to notifier boundaries */
192 adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
193 adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
194
195 first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
196 if (!first)
197 return;
198
199 /*
200 * PTs may be getting destroyed so not safe to touch these but PT should
201 * be invalidated at this point in time. Regardless we still need to
202 * ensure any dma mappings are unmapped in the here.
203 */
204 if (xe_vm_is_closed(vm))
205 goto range_notifier_event_end;
206
207 /*
208 * XXX: Less than ideal to always wait on VM's resv slots if an
209 * invalidation is not required. Could walk range list twice to figure
210 * out if an invalidations is need, but also not ideal.
211 */
212 err = dma_resv_wait_timeout(xe_vm_resv(vm),
213 DMA_RESV_USAGE_BOOKKEEP,
214 false, MAX_SCHEDULE_TIMEOUT);
215 XE_WARN_ON(err <= 0);
216
217 r = first;
218 drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
219 tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range,
220 &adj_start,
221 &adj_end);
222 if (!tile_mask)
223 goto range_notifier_event_end;
224
225 xe_device_wmb(xe);
226
227 err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
228 WARN_ON_ONCE(err);
229
230 range_notifier_event_end:
231 r = first;
232 drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
233 xe_svm_range_notifier_event_end(vm, r, mmu_range);
234 }
235
__xe_svm_garbage_collector(struct xe_vm * vm,struct xe_svm_range * range)236 static int __xe_svm_garbage_collector(struct xe_vm *vm,
237 struct xe_svm_range *range)
238 {
239 struct dma_fence *fence;
240
241 range_debug(range, "GARBAGE COLLECTOR");
242
243 xe_vm_lock(vm, false);
244 fence = xe_vm_range_unbind(vm, range);
245 xe_vm_unlock(vm);
246 if (IS_ERR(fence))
247 return PTR_ERR(fence);
248 dma_fence_put(fence);
249
250 drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
251
252 return 0;
253 }
254
xe_svm_garbage_collector(struct xe_vm * vm)255 static int xe_svm_garbage_collector(struct xe_vm *vm)
256 {
257 struct xe_svm_range *range;
258 int err;
259
260 lockdep_assert_held_write(&vm->lock);
261
262 if (xe_vm_is_closed_or_banned(vm))
263 return -ENOENT;
264
265 spin_lock(&vm->svm.garbage_collector.lock);
266 for (;;) {
267 range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
268 typeof(*range),
269 garbage_collector_link);
270 if (!range)
271 break;
272
273 list_del(&range->garbage_collector_link);
274 spin_unlock(&vm->svm.garbage_collector.lock);
275
276 err = __xe_svm_garbage_collector(vm, range);
277 if (err) {
278 drm_warn(&vm->xe->drm,
279 "Garbage collection failed: %pe\n",
280 ERR_PTR(err));
281 xe_vm_kill(vm, true);
282 return err;
283 }
284
285 spin_lock(&vm->svm.garbage_collector.lock);
286 }
287 spin_unlock(&vm->svm.garbage_collector.lock);
288
289 return 0;
290 }
291
xe_svm_garbage_collector_work_func(struct work_struct * w)292 static void xe_svm_garbage_collector_work_func(struct work_struct *w)
293 {
294 struct xe_vm *vm = container_of(w, struct xe_vm,
295 svm.garbage_collector.work);
296
297 down_write(&vm->lock);
298 xe_svm_garbage_collector(vm);
299 up_write(&vm->lock);
300 }
301
302 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
303
page_to_vr(struct page * page)304 static struct xe_vram_region *page_to_vr(struct page *page)
305 {
306 return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
307 }
308
vr_to_tile(struct xe_vram_region * vr)309 static struct xe_tile *vr_to_tile(struct xe_vram_region *vr)
310 {
311 return container_of(vr, struct xe_tile, mem.vram);
312 }
313
xe_vram_region_page_to_dpa(struct xe_vram_region * vr,struct page * page)314 static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
315 struct page *page)
316 {
317 u64 dpa;
318 struct xe_tile *tile = vr_to_tile(vr);
319 u64 pfn = page_to_pfn(page);
320 u64 offset;
321
322 xe_tile_assert(tile, is_device_private_page(page));
323 xe_tile_assert(tile, (pfn << PAGE_SHIFT) >= vr->hpa_base);
324
325 offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
326 dpa = vr->dpa_base + offset;
327
328 return dpa;
329 }
330
331 enum xe_svm_copy_dir {
332 XE_SVM_COPY_TO_VRAM,
333 XE_SVM_COPY_TO_SRAM,
334 };
335
xe_svm_copy(struct page ** pages,dma_addr_t * dma_addr,unsigned long npages,const enum xe_svm_copy_dir dir)336 static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
337 unsigned long npages, const enum xe_svm_copy_dir dir)
338 {
339 struct xe_vram_region *vr = NULL;
340 struct xe_tile *tile;
341 struct dma_fence *fence = NULL;
342 unsigned long i;
343 #define XE_VRAM_ADDR_INVALID ~0x0ull
344 u64 vram_addr = XE_VRAM_ADDR_INVALID;
345 int err = 0, pos = 0;
346 bool sram = dir == XE_SVM_COPY_TO_SRAM;
347
348 /*
349 * This flow is complex: it locates physically contiguous device pages,
350 * derives the starting physical address, and performs a single GPU copy
351 * to for every 8M chunk in a DMA address array. Both device pages and
352 * DMA addresses may be sparsely populated. If either is NULL, a copy is
353 * triggered based on the current search state. The last GPU copy is
354 * waited on to ensure all copies are complete.
355 */
356
357 for (i = 0; i < npages; ++i) {
358 struct page *spage = pages[i];
359 struct dma_fence *__fence;
360 u64 __vram_addr;
361 bool match = false, chunk, last;
362
363 #define XE_MIGRATE_CHUNK_SIZE SZ_8M
364 chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
365 last = (i + 1) == npages;
366
367 /* No CPU page and no device pages queue'd to copy */
368 if (!dma_addr[i] && vram_addr == XE_VRAM_ADDR_INVALID)
369 continue;
370
371 if (!vr && spage) {
372 vr = page_to_vr(spage);
373 tile = vr_to_tile(vr);
374 }
375 XE_WARN_ON(spage && page_to_vr(spage) != vr);
376
377 /*
378 * CPU page and device page valid, capture physical address on
379 * first device page, check if physical contiguous on subsequent
380 * device pages.
381 */
382 if (dma_addr[i] && spage) {
383 __vram_addr = xe_vram_region_page_to_dpa(vr, spage);
384 if (vram_addr == XE_VRAM_ADDR_INVALID) {
385 vram_addr = __vram_addr;
386 pos = i;
387 }
388
389 match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
390 }
391
392 /*
393 * Mismatched physical address, 8M copy chunk, or last page -
394 * trigger a copy.
395 */
396 if (!match || chunk || last) {
397 /*
398 * Extra page for first copy if last page and matching
399 * physical address.
400 */
401 int incr = (match && last) ? 1 : 0;
402
403 if (vram_addr != XE_VRAM_ADDR_INVALID) {
404 if (sram) {
405 vm_dbg(&tile->xe->drm,
406 "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
407 vram_addr, (u64)dma_addr[pos], i - pos + incr);
408 __fence = xe_migrate_from_vram(tile->migrate,
409 i - pos + incr,
410 vram_addr,
411 dma_addr + pos);
412 } else {
413 vm_dbg(&tile->xe->drm,
414 "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
415 (u64)dma_addr[pos], vram_addr, i - pos + incr);
416 __fence = xe_migrate_to_vram(tile->migrate,
417 i - pos + incr,
418 dma_addr + pos,
419 vram_addr);
420 }
421 if (IS_ERR(__fence)) {
422 err = PTR_ERR(__fence);
423 goto err_out;
424 }
425
426 dma_fence_put(fence);
427 fence = __fence;
428 }
429
430 /* Setup physical address of next device page */
431 if (dma_addr[i] && spage) {
432 vram_addr = __vram_addr;
433 pos = i;
434 } else {
435 vram_addr = XE_VRAM_ADDR_INVALID;
436 }
437
438 /* Extra mismatched device page, copy it */
439 if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
440 if (sram) {
441 vm_dbg(&tile->xe->drm,
442 "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
443 vram_addr, (u64)dma_addr[pos], 1);
444 __fence = xe_migrate_from_vram(tile->migrate, 1,
445 vram_addr,
446 dma_addr + pos);
447 } else {
448 vm_dbg(&tile->xe->drm,
449 "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
450 (u64)dma_addr[pos], vram_addr, 1);
451 __fence = xe_migrate_to_vram(tile->migrate, 1,
452 dma_addr + pos,
453 vram_addr);
454 }
455 if (IS_ERR(__fence)) {
456 err = PTR_ERR(__fence);
457 goto err_out;
458 }
459
460 dma_fence_put(fence);
461 fence = __fence;
462 }
463 }
464 }
465
466 err_out:
467 /* Wait for all copies to complete */
468 if (fence) {
469 dma_fence_wait(fence, false);
470 dma_fence_put(fence);
471 }
472
473 return err;
474 #undef XE_MIGRATE_CHUNK_SIZE
475 #undef XE_VRAM_ADDR_INVALID
476 }
477
xe_svm_copy_to_devmem(struct page ** pages,dma_addr_t * dma_addr,unsigned long npages)478 static int xe_svm_copy_to_devmem(struct page **pages, dma_addr_t *dma_addr,
479 unsigned long npages)
480 {
481 return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_VRAM);
482 }
483
xe_svm_copy_to_ram(struct page ** pages,dma_addr_t * dma_addr,unsigned long npages)484 static int xe_svm_copy_to_ram(struct page **pages, dma_addr_t *dma_addr,
485 unsigned long npages)
486 {
487 return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_SRAM);
488 }
489
to_xe_bo(struct drm_pagemap_devmem * devmem_allocation)490 static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
491 {
492 return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
493 }
494
xe_svm_devmem_release(struct drm_pagemap_devmem * devmem_allocation)495 static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
496 {
497 struct xe_bo *bo = to_xe_bo(devmem_allocation);
498 struct xe_device *xe = xe_bo_device(bo);
499
500 xe_bo_put_async(bo);
501 xe_pm_runtime_put(xe);
502 }
503
block_offset_to_pfn(struct xe_vram_region * vr,u64 offset)504 static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
505 {
506 return PHYS_PFN(offset + vr->hpa_base);
507 }
508
tile_to_buddy(struct xe_tile * tile)509 static struct drm_buddy *tile_to_buddy(struct xe_tile *tile)
510 {
511 return &tile->mem.vram.ttm.mm;
512 }
513
xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem * devmem_allocation,unsigned long npages,unsigned long * pfn)514 static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
515 unsigned long npages, unsigned long *pfn)
516 {
517 struct xe_bo *bo = to_xe_bo(devmem_allocation);
518 struct ttm_resource *res = bo->ttm.resource;
519 struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks;
520 struct drm_buddy_block *block;
521 int j = 0;
522
523 list_for_each_entry(block, blocks, link) {
524 struct xe_vram_region *vr = block->private;
525 struct xe_tile *tile = vr_to_tile(vr);
526 struct drm_buddy *buddy = tile_to_buddy(tile);
527 u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
528 int i;
529
530 for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
531 pfn[j++] = block_pfn + i;
532 }
533
534 return 0;
535 }
536
537 static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
538 .devmem_release = xe_svm_devmem_release,
539 .populate_devmem_pfn = xe_svm_populate_devmem_pfn,
540 .copy_to_devmem = xe_svm_copy_to_devmem,
541 .copy_to_ram = xe_svm_copy_to_ram,
542 };
543
544 #endif
545
546 static const struct drm_gpusvm_ops gpusvm_ops = {
547 .range_alloc = xe_svm_range_alloc,
548 .range_free = xe_svm_range_free,
549 .invalidate = xe_svm_invalidate,
550 };
551
552 static const unsigned long fault_chunk_sizes[] = {
553 SZ_2M,
554 SZ_64K,
555 SZ_4K,
556 };
557
558 /**
559 * xe_svm_init() - SVM initialize
560 * @vm: The VM.
561 *
562 * Initialize SVM state which is embedded within the VM.
563 *
564 * Return: 0 on success, negative error code on error.
565 */
xe_svm_init(struct xe_vm * vm)566 int xe_svm_init(struct xe_vm *vm)
567 {
568 int err;
569
570 spin_lock_init(&vm->svm.garbage_collector.lock);
571 INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
572 INIT_WORK(&vm->svm.garbage_collector.work,
573 xe_svm_garbage_collector_work_func);
574
575 err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
576 current->mm, xe_svm_devm_owner(vm->xe), 0,
577 vm->size, xe_modparam.svm_notifier_size * SZ_1M,
578 &gpusvm_ops, fault_chunk_sizes,
579 ARRAY_SIZE(fault_chunk_sizes));
580 if (err)
581 return err;
582
583 drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
584
585 return 0;
586 }
587
588 /**
589 * xe_svm_close() - SVM close
590 * @vm: The VM.
591 *
592 * Close SVM state (i.e., stop and flush all SVM actions).
593 */
xe_svm_close(struct xe_vm * vm)594 void xe_svm_close(struct xe_vm *vm)
595 {
596 xe_assert(vm->xe, xe_vm_is_closed(vm));
597 flush_work(&vm->svm.garbage_collector.work);
598 }
599
600 /**
601 * xe_svm_fini() - SVM finalize
602 * @vm: The VM.
603 *
604 * Finalize SVM state which is embedded within the VM.
605 */
xe_svm_fini(struct xe_vm * vm)606 void xe_svm_fini(struct xe_vm *vm)
607 {
608 xe_assert(vm->xe, xe_vm_is_closed(vm));
609
610 drm_gpusvm_fini(&vm->svm.gpusvm);
611 }
612
xe_svm_range_is_valid(struct xe_svm_range * range,struct xe_tile * tile,bool devmem_only)613 static bool xe_svm_range_is_valid(struct xe_svm_range *range,
614 struct xe_tile *tile,
615 bool devmem_only)
616 {
617 return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
618 range->tile_invalidated) &&
619 (!devmem_only || xe_svm_range_in_vram(range)));
620 }
621
622 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
623 * @vm: xe_vm pointer
624 * @range: Pointer to the SVM range structure
625 *
626 * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
627 * and migrates them to SMEM
628 */
xe_svm_range_migrate_to_smem(struct xe_vm * vm,struct xe_svm_range * range)629 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
630 {
631 if (xe_svm_range_in_vram(range))
632 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
633 }
634
635 /**
636 * xe_svm_range_validate() - Check if the SVM range is valid
637 * @vm: xe_vm pointer
638 * @range: Pointer to the SVM range structure
639 * @tile_mask: Mask representing the tiles to be checked
640 * @devmem_preferred : if true range needs to be in devmem
641 *
642 * The xe_svm_range_validate() function checks if a range is
643 * valid and located in the desired memory region.
644 *
645 * Return: true if the range is valid, false otherwise
646 */
xe_svm_range_validate(struct xe_vm * vm,struct xe_svm_range * range,u8 tile_mask,bool devmem_preferred)647 bool xe_svm_range_validate(struct xe_vm *vm,
648 struct xe_svm_range *range,
649 u8 tile_mask, bool devmem_preferred)
650 {
651 bool ret;
652
653 xe_svm_notifier_lock(vm);
654
655 ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
656 (devmem_preferred == range->base.flags.has_devmem_pages);
657
658 xe_svm_notifier_unlock(vm);
659
660 return ret;
661 }
662
663 /**
664 * xe_svm_find_vma_start - Find start of CPU VMA
665 * @vm: xe_vm pointer
666 * @start: start address
667 * @end: end address
668 * @vma: Pointer to struct xe_vma
669 *
670 *
671 * This function searches for a cpu vma, within the specified
672 * range [start, end] in the given VM. It adjusts the range based on the
673 * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
674 *
675 * Return: The starting address of the VMA within the range,
676 * or ULONG_MAX if no VMA is found
677 */
xe_svm_find_vma_start(struct xe_vm * vm,u64 start,u64 end,struct xe_vma * vma)678 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
679 {
680 return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
681 max(start, xe_vma_start(vma)),
682 min(end, xe_vma_end(vma)));
683 }
684
685 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
tile_to_vr(struct xe_tile * tile)686 static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
687 {
688 return &tile->mem.vram;
689 }
690
xe_drm_pagemap_populate_mm(struct drm_pagemap * dpagemap,unsigned long start,unsigned long end,struct mm_struct * mm,unsigned long timeslice_ms)691 static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
692 unsigned long start, unsigned long end,
693 struct mm_struct *mm,
694 unsigned long timeslice_ms)
695 {
696 struct xe_tile *tile = container_of(dpagemap, typeof(*tile), mem.vram.dpagemap);
697 struct xe_device *xe = tile_to_xe(tile);
698 struct device *dev = xe->drm.dev;
699 struct xe_vram_region *vr = tile_to_vr(tile);
700 struct drm_buddy_block *block;
701 struct list_head *blocks;
702 struct xe_bo *bo;
703 ktime_t time_end = 0;
704 int err, idx;
705
706 if (!drm_dev_enter(&xe->drm, &idx))
707 return -ENODEV;
708
709 xe_pm_runtime_get(xe);
710
711 retry:
712 bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL, end - start,
713 ttm_bo_type_device,
714 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
715 XE_BO_FLAG_CPU_ADDR_MIRROR);
716 if (IS_ERR(bo)) {
717 err = PTR_ERR(bo);
718 if (xe_vm_validate_should_retry(NULL, err, &time_end))
719 goto retry;
720 goto out_pm_put;
721 }
722
723 drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
724 &dpagemap_devmem_ops,
725 &tile->mem.vram.dpagemap,
726 end - start);
727
728 blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
729 list_for_each_entry(block, blocks, link)
730 block->private = vr;
731
732 xe_bo_get(bo);
733
734 /* Ensure the device has a pm ref while there are device pages active. */
735 xe_pm_runtime_get_noresume(xe);
736 err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
737 start, end, timeslice_ms,
738 xe_svm_devm_owner(xe));
739 if (err)
740 xe_svm_devmem_release(&bo->devmem_allocation);
741
742 xe_bo_unlock(bo);
743 xe_bo_put(bo);
744
745 out_pm_put:
746 xe_pm_runtime_put(xe);
747 drm_dev_exit(idx);
748
749 return err;
750 }
751 #endif
752
supports_4K_migration(struct xe_device * xe)753 static bool supports_4K_migration(struct xe_device *xe)
754 {
755 if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
756 return false;
757
758 return true;
759 }
760
761 /**
762 * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
763 * @range: SVM range for which migration needs to be decided
764 * @vma: vma which has range
765 * @preferred_region_is_vram: preferred region for range is vram
766 *
767 * Return: True for range needing migration and migration is supported else false
768 */
xe_svm_range_needs_migrate_to_vram(struct xe_svm_range * range,struct xe_vma * vma,bool preferred_region_is_vram)769 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
770 bool preferred_region_is_vram)
771 {
772 struct xe_vm *vm = range_to_vm(&range->base);
773 u64 range_size = xe_svm_range_size(range);
774
775 if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
776 return false;
777
778 xe_assert(vm->xe, IS_DGFX(vm->xe));
779
780 if (preferred_region_is_vram && xe_svm_range_in_vram(range)) {
781 drm_info(&vm->xe->drm, "Range is already in VRAM\n");
782 return false;
783 }
784
785 if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
786 drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
787 return false;
788 }
789
790 return true;
791 }
792
793 /**
794 * xe_svm_handle_pagefault() - SVM handle page fault
795 * @vm: The VM.
796 * @vma: The CPU address mirror VMA.
797 * @gt: The gt upon the fault occurred.
798 * @fault_addr: The GPU fault address.
799 * @atomic: The fault atomic access bit.
800 *
801 * Create GPU bindings for a SVM page fault. Optionally migrate to device
802 * memory.
803 *
804 * Return: 0 on success, negative error code on error.
805 */
xe_svm_handle_pagefault(struct xe_vm * vm,struct xe_vma * vma,struct xe_gt * gt,u64 fault_addr,bool atomic)806 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
807 struct xe_gt *gt, u64 fault_addr,
808 bool atomic)
809 {
810 struct drm_gpusvm_ctx ctx = {
811 .read_only = xe_vma_read_only(vma),
812 .devmem_possible = IS_DGFX(vm->xe) &&
813 IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
814 .check_pages_threshold = IS_DGFX(vm->xe) &&
815 IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K : 0,
816 .devmem_only = atomic && IS_DGFX(vm->xe) &&
817 IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
818 .timeslice_ms = atomic && IS_DGFX(vm->xe) &&
819 IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ?
820 vm->xe->atomic_svm_timeslice_ms : 0,
821 };
822 struct xe_svm_range *range;
823 struct dma_fence *fence;
824 struct xe_tile *tile = gt_to_tile(gt);
825 int migrate_try_count = ctx.devmem_only ? 3 : 1;
826 ktime_t end = 0;
827 int err;
828
829 lockdep_assert_held_write(&vm->lock);
830 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
831
832 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
833
834 retry:
835 /* Always process UNMAPs first so view SVM ranges is current */
836 err = xe_svm_garbage_collector(vm);
837 if (err)
838 return err;
839
840 range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
841
842 if (IS_ERR(range))
843 return PTR_ERR(range);
844
845 if (ctx.devmem_only && !range->base.flags.migrate_devmem)
846 return -EACCES;
847
848 if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
849 return 0;
850
851 range_debug(range, "PAGE FAULT");
852
853 if (--migrate_try_count >= 0 &&
854 xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) {
855 err = xe_svm_alloc_vram(tile, range, &ctx);
856 ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
857 if (err) {
858 if (migrate_try_count || !ctx.devmem_only) {
859 drm_dbg(&vm->xe->drm,
860 "VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
861 vm->usm.asid, ERR_PTR(err));
862 goto retry;
863 } else {
864 drm_err(&vm->xe->drm,
865 "VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
866 vm->usm.asid, ERR_PTR(err));
867 return err;
868 }
869 }
870 }
871
872 range_debug(range, "GET PAGES");
873 err = xe_svm_range_get_pages(vm, range, &ctx);
874 /* Corner where CPU mappings have changed */
875 if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
876 ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
877 if (migrate_try_count > 0 || !ctx.devmem_only) {
878 drm_dbg(&vm->xe->drm,
879 "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
880 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
881 range_debug(range, "PAGE FAULT - RETRY PAGES");
882 goto retry;
883 } else {
884 drm_err(&vm->xe->drm,
885 "Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
886 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
887 }
888 }
889 if (err) {
890 range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
891 goto err_out;
892 }
893
894 range_debug(range, "PAGE FAULT - BIND");
895
896 retry_bind:
897 xe_vm_lock(vm, false);
898 fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
899 if (IS_ERR(fence)) {
900 xe_vm_unlock(vm);
901 err = PTR_ERR(fence);
902 if (err == -EAGAIN) {
903 ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
904 range_debug(range, "PAGE FAULT - RETRY BIND");
905 goto retry;
906 }
907 if (xe_vm_validate_should_retry(NULL, err, &end))
908 goto retry_bind;
909 goto err_out;
910 }
911 xe_vm_unlock(vm);
912
913 dma_fence_wait(fence, false);
914 dma_fence_put(fence);
915
916 err_out:
917
918 return err;
919 }
920
921 /**
922 * xe_svm_has_mapping() - SVM has mappings
923 * @vm: The VM.
924 * @start: Start address.
925 * @end: End address.
926 *
927 * Check if an address range has SVM mappings.
928 *
929 * Return: True if address range has a SVM mapping, False otherwise
930 */
xe_svm_has_mapping(struct xe_vm * vm,u64 start,u64 end)931 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
932 {
933 return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
934 }
935
936 /**
937 * xe_svm_bo_evict() - SVM evict BO to system memory
938 * @bo: BO to evict
939 *
940 * SVM evict BO to system memory. GPU SVM layer ensures all device pages
941 * are evicted before returning.
942 *
943 * Return: 0 on success standard error code otherwise
944 */
xe_svm_bo_evict(struct xe_bo * bo)945 int xe_svm_bo_evict(struct xe_bo *bo)
946 {
947 return drm_pagemap_evict_to_ram(&bo->devmem_allocation);
948 }
949
950 /**
951 * xe_svm_range_find_or_insert- Find or insert GPU SVM range
952 * @vm: xe_vm pointer
953 * @addr: address for which range needs to be found/inserted
954 * @vma: Pointer to struct xe_vma which mirrors CPU
955 * @ctx: GPU SVM context
956 *
957 * This function finds or inserts a newly allocated a SVM range based on the
958 * address.
959 *
960 * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
961 */
xe_svm_range_find_or_insert(struct xe_vm * vm,u64 addr,struct xe_vma * vma,struct drm_gpusvm_ctx * ctx)962 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
963 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
964 {
965 struct drm_gpusvm_range *r;
966
967 r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
968 xe_vma_start(vma), xe_vma_end(vma), ctx);
969 if (IS_ERR(r))
970 return ERR_PTR(PTR_ERR(r));
971
972 return to_xe_range(r);
973 }
974
975 /**
976 * xe_svm_range_get_pages() - Get pages for a SVM range
977 * @vm: Pointer to the struct xe_vm
978 * @range: Pointer to the xe SVM range structure
979 * @ctx: GPU SVM context
980 *
981 * This function gets pages for a SVM range and ensures they are mapped for
982 * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
983 *
984 * Return: 0 on success, negative error code on failure.
985 */
xe_svm_range_get_pages(struct xe_vm * vm,struct xe_svm_range * range,struct drm_gpusvm_ctx * ctx)986 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
987 struct drm_gpusvm_ctx *ctx)
988 {
989 int err = 0;
990
991 err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
992 if (err == -EOPNOTSUPP) {
993 range_debug(range, "PAGE FAULT - EVICT PAGES");
994 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
995 }
996
997 return err;
998 }
999
1000 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
1001
1002 /**
1003 * xe_svm_alloc_vram()- Allocate device memory pages for range,
1004 * migrating existing data.
1005 * @tile: tile to allocate vram from
1006 * @range: SVM range
1007 * @ctx: DRM GPU SVM context
1008 *
1009 * Return: 0 on success, error code on failure.
1010 */
xe_svm_alloc_vram(struct xe_tile * tile,struct xe_svm_range * range,const struct drm_gpusvm_ctx * ctx)1011 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
1012 const struct drm_gpusvm_ctx *ctx)
1013 {
1014 struct drm_pagemap *dpagemap;
1015
1016 xe_assert(tile_to_xe(tile), range->base.flags.migrate_devmem);
1017 range_debug(range, "ALLOCATE VRAM");
1018
1019 dpagemap = xe_tile_local_pagemap(tile);
1020 return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
1021 xe_svm_range_end(range),
1022 range->base.gpusvm->mm,
1023 ctx->timeslice_ms);
1024 }
1025
1026 static struct drm_pagemap_device_addr
xe_drm_pagemap_device_map(struct drm_pagemap * dpagemap,struct device * dev,struct page * page,unsigned int order,enum dma_data_direction dir)1027 xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
1028 struct device *dev,
1029 struct page *page,
1030 unsigned int order,
1031 enum dma_data_direction dir)
1032 {
1033 struct device *pgmap_dev = dpagemap->dev;
1034 enum drm_interconnect_protocol prot;
1035 dma_addr_t addr;
1036
1037 if (pgmap_dev == dev) {
1038 addr = xe_vram_region_page_to_dpa(page_to_vr(page), page);
1039 prot = XE_INTERCONNECT_VRAM;
1040 } else {
1041 addr = DMA_MAPPING_ERROR;
1042 prot = 0;
1043 }
1044
1045 return drm_pagemap_device_addr_encode(addr, prot, order, dir);
1046 }
1047
1048 static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
1049 .device_map = xe_drm_pagemap_device_map,
1050 .populate_mm = xe_drm_pagemap_populate_mm,
1051 };
1052
1053 /**
1054 * xe_devm_add: Remap and provide memmap backing for device memory
1055 * @tile: tile that the memory region belongs to
1056 * @vr: vram memory region to remap
1057 *
1058 * This remap device memory to host physical address space and create
1059 * struct page to back device memory
1060 *
1061 * Return: 0 on success standard error code otherwise
1062 */
xe_devm_add(struct xe_tile * tile,struct xe_vram_region * vr)1063 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1064 {
1065 struct xe_device *xe = tile_to_xe(tile);
1066 struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
1067 struct resource *res;
1068 void *addr;
1069 int ret;
1070
1071 res = devm_request_free_mem_region(dev, &iomem_resource,
1072 vr->usable_size);
1073 if (IS_ERR(res)) {
1074 ret = PTR_ERR(res);
1075 return ret;
1076 }
1077
1078 vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
1079 vr->pagemap.range.start = res->start;
1080 vr->pagemap.range.end = res->end;
1081 vr->pagemap.nr_range = 1;
1082 vr->pagemap.ops = drm_pagemap_pagemap_ops_get();
1083 vr->pagemap.owner = xe_svm_devm_owner(xe);
1084 addr = devm_memremap_pages(dev, &vr->pagemap);
1085
1086 vr->dpagemap.dev = dev;
1087 vr->dpagemap.ops = &xe_drm_pagemap_ops;
1088
1089 if (IS_ERR(addr)) {
1090 devm_release_mem_region(dev, res->start, resource_size(res));
1091 ret = PTR_ERR(addr);
1092 drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
1093 tile->id, ERR_PTR(ret));
1094 return ret;
1095 }
1096 vr->hpa_base = res->start;
1097
1098 drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
1099 tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
1100 return 0;
1101 }
1102 #else
xe_svm_alloc_vram(struct xe_tile * tile,struct xe_svm_range * range,const struct drm_gpusvm_ctx * ctx)1103 int xe_svm_alloc_vram(struct xe_tile *tile,
1104 struct xe_svm_range *range,
1105 const struct drm_gpusvm_ctx *ctx)
1106 {
1107 return -EOPNOTSUPP;
1108 }
1109
xe_devm_add(struct xe_tile * tile,struct xe_vram_region * vr)1110 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1111 {
1112 return 0;
1113 }
1114 #endif
1115
1116 /**
1117 * xe_svm_flush() - SVM flush
1118 * @vm: The VM.
1119 *
1120 * Flush all SVM actions.
1121 */
xe_svm_flush(struct xe_vm * vm)1122 void xe_svm_flush(struct xe_vm *vm)
1123 {
1124 if (xe_vm_in_fault_mode(vm))
1125 flush_work(&vm->svm.garbage_collector.work);
1126 }
1127