xref: /linux/drivers/gpu/drm/xe/xe_svm.c (revision 9b043680446067358913edc2e9dd71bf8ffae208)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <drm/drm_drv.h>
7 
8 #include "xe_bo.h"
9 #include "xe_exec_queue_types.h"
10 #include "xe_gt_stats.h"
11 #include "xe_migrate.h"
12 #include "xe_module.h"
13 #include "xe_pm.h"
14 #include "xe_pt.h"
15 #include "xe_svm.h"
16 #include "xe_tile.h"
17 #include "xe_ttm_vram_mgr.h"
18 #include "xe_vm.h"
19 #include "xe_vm_types.h"
20 #include "xe_vram_types.h"
21 
xe_svm_range_in_vram(struct xe_svm_range * range)22 static bool xe_svm_range_in_vram(struct xe_svm_range *range)
23 {
24 	/*
25 	 * Advisory only check whether the range is currently backed by VRAM
26 	 * memory.
27 	 */
28 
29 	struct drm_gpusvm_pages_flags flags = {
30 		/* Pairs with WRITE_ONCE in drm_gpusvm.c */
31 		.__flags = READ_ONCE(range->base.pages.flags.__flags),
32 	};
33 
34 	return flags.has_devmem_pages;
35 }
36 
xe_svm_range_has_vram_binding(struct xe_svm_range * range)37 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
38 {
39 	/* Not reliable without notifier lock */
40 	return xe_svm_range_in_vram(range) && range->tile_present;
41 }
42 
gpusvm_to_vm(struct drm_gpusvm * gpusvm)43 static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
44 {
45 	return container_of(gpusvm, struct xe_vm, svm.gpusvm);
46 }
47 
range_to_vm(struct drm_gpusvm_range * r)48 static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
49 {
50 	return gpusvm_to_vm(r->gpusvm);
51 }
52 
53 #define range_debug(r__, operation__)					\
54 	vm_dbg(&range_to_vm(&(r__)->base)->xe->drm,			\
55 	       "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
56 	       "start=0x%014lx, end=0x%014lx, size=%lu",		\
57 	       (operation__), range_to_vm(&(r__)->base)->usm.asid,	\
58 	       (r__)->base.gpusvm,					\
59 	       xe_svm_range_in_vram((r__)) ? 1 : 0,			\
60 	       xe_svm_range_has_vram_binding((r__)) ? 1 : 0,		\
61 	       (r__)->base.pages.notifier_seq,				\
62 	       xe_svm_range_start((r__)), xe_svm_range_end((r__)),	\
63 	       xe_svm_range_size((r__)))
64 
xe_svm_range_debug(struct xe_svm_range * range,const char * operation)65 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
66 {
67 	range_debug(range, operation);
68 }
69 
70 static struct drm_gpusvm_range *
xe_svm_range_alloc(struct drm_gpusvm * gpusvm)71 xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
72 {
73 	struct xe_svm_range *range;
74 
75 	range = kzalloc(sizeof(*range), GFP_KERNEL);
76 	if (!range)
77 		return NULL;
78 
79 	INIT_LIST_HEAD(&range->garbage_collector_link);
80 	xe_vm_get(gpusvm_to_vm(gpusvm));
81 
82 	return &range->base;
83 }
84 
xe_svm_range_free(struct drm_gpusvm_range * range)85 static void xe_svm_range_free(struct drm_gpusvm_range *range)
86 {
87 	xe_vm_put(range_to_vm(range));
88 	kfree(range);
89 }
90 
91 static void
xe_svm_garbage_collector_add_range(struct xe_vm * vm,struct xe_svm_range * range,const struct mmu_notifier_range * mmu_range)92 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
93 				   const struct mmu_notifier_range *mmu_range)
94 {
95 	struct xe_device *xe = vm->xe;
96 
97 	range_debug(range, "GARBAGE COLLECTOR ADD");
98 
99 	drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
100 
101 	spin_lock(&vm->svm.garbage_collector.lock);
102 	if (list_empty(&range->garbage_collector_link))
103 		list_add_tail(&range->garbage_collector_link,
104 			      &vm->svm.garbage_collector.range_list);
105 	spin_unlock(&vm->svm.garbage_collector.lock);
106 
107 	queue_work(xe->usm.pf_wq, &vm->svm.garbage_collector.work);
108 }
109 
xe_svm_tlb_inval_count_stats_incr(struct xe_gt * gt)110 static void xe_svm_tlb_inval_count_stats_incr(struct xe_gt *gt)
111 {
112 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT, 1);
113 }
114 
115 static u8
xe_svm_range_notifier_event_begin(struct xe_vm * vm,struct drm_gpusvm_range * r,const struct mmu_notifier_range * mmu_range,u64 * adj_start,u64 * adj_end)116 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
117 				  const struct mmu_notifier_range *mmu_range,
118 				  u64 *adj_start, u64 *adj_end)
119 {
120 	struct xe_svm_range *range = to_xe_range(r);
121 	struct xe_device *xe = vm->xe;
122 	struct xe_tile *tile;
123 	u8 tile_mask = 0;
124 	u8 id;
125 
126 	xe_svm_assert_in_notifier(vm);
127 
128 	range_debug(range, "NOTIFIER");
129 
130 	/* Skip if already unmapped or if no binding exist */
131 	if (range->base.pages.flags.unmapped || !range->tile_present)
132 		return 0;
133 
134 	range_debug(range, "NOTIFIER - EXECUTE");
135 
136 	/* Adjust invalidation to range boundaries */
137 	*adj_start = min(xe_svm_range_start(range), mmu_range->start);
138 	*adj_end = max(xe_svm_range_end(range), mmu_range->end);
139 
140 	/*
141 	 * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the
142 	 * invalidation code can't correctly cope with sparse ranges or
143 	 * invalidations spanning multiple ranges.
144 	 */
145 	for_each_tile(tile, xe, id)
146 		if (xe_pt_zap_ptes_range(tile, vm, range)) {
147 			/*
148 			 * WRITE_ONCE pairs with READ_ONCE in
149 			 * xe_vm_has_valid_gpu_mapping()
150 			 */
151 			WRITE_ONCE(range->tile_invalidated,
152 				   range->tile_invalidated | BIT(id));
153 
154 			if (!(tile_mask & BIT(id))) {
155 				xe_svm_tlb_inval_count_stats_incr(tile->primary_gt);
156 				if (tile->media_gt)
157 					xe_svm_tlb_inval_count_stats_incr(tile->media_gt);
158 				tile_mask |= BIT(id);
159 			}
160 		}
161 
162 	return tile_mask;
163 }
164 
165 static void
xe_svm_range_notifier_event_end(struct xe_vm * vm,struct drm_gpusvm_range * r,const struct mmu_notifier_range * mmu_range)166 xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
167 				const struct mmu_notifier_range *mmu_range)
168 {
169 	struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
170 
171 	xe_svm_assert_in_notifier(vm);
172 
173 	drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
174 	if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
175 		xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
176 						   mmu_range);
177 }
178 
xe_svm_stats_ktime_us_delta(ktime_t start)179 static s64 xe_svm_stats_ktime_us_delta(ktime_t start)
180 {
181 	return IS_ENABLED(CONFIG_DEBUG_FS) ?
182 		ktime_us_delta(ktime_get(), start) : 0;
183 }
184 
xe_svm_tlb_inval_us_stats_incr(struct xe_gt * gt,ktime_t start)185 static void xe_svm_tlb_inval_us_stats_incr(struct xe_gt *gt, ktime_t start)
186 {
187 	s64 us_delta = xe_svm_stats_ktime_us_delta(start);
188 
189 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_US, us_delta);
190 }
191 
xe_svm_stats_ktime_get(void)192 static ktime_t xe_svm_stats_ktime_get(void)
193 {
194 	return IS_ENABLED(CONFIG_DEBUG_FS) ? ktime_get() : 0;
195 }
196 
xe_svm_invalidate(struct drm_gpusvm * gpusvm,struct drm_gpusvm_notifier * notifier,const struct mmu_notifier_range * mmu_range)197 static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
198 			      struct drm_gpusvm_notifier *notifier,
199 			      const struct mmu_notifier_range *mmu_range)
200 {
201 	struct xe_vm *vm = gpusvm_to_vm(gpusvm);
202 	struct xe_device *xe = vm->xe;
203 	struct drm_gpusvm_range *r, *first;
204 	struct xe_tile *tile;
205 	ktime_t start = xe_svm_stats_ktime_get();
206 	u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
207 	u8 tile_mask = 0, id;
208 	long err;
209 
210 	xe_svm_assert_in_notifier(vm);
211 
212 	vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
213 	       "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
214 	       vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
215 	       mmu_range->start, mmu_range->end, mmu_range->event);
216 
217 	/* Adjust invalidation to notifier boundaries */
218 	adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
219 	adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
220 
221 	first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
222 	if (!first)
223 		return;
224 
225 	/*
226 	 * PTs may be getting destroyed so not safe to touch these but PT should
227 	 * be invalidated at this point in time. Regardless we still need to
228 	 * ensure any dma mappings are unmapped in the here.
229 	 */
230 	if (xe_vm_is_closed(vm))
231 		goto range_notifier_event_end;
232 
233 	/*
234 	 * XXX: Less than ideal to always wait on VM's resv slots if an
235 	 * invalidation is not required. Could walk range list twice to figure
236 	 * out if an invalidations is need, but also not ideal.
237 	 */
238 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
239 				    DMA_RESV_USAGE_BOOKKEEP,
240 				    false, MAX_SCHEDULE_TIMEOUT);
241 	XE_WARN_ON(err <= 0);
242 
243 	r = first;
244 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
245 		tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range,
246 							       &adj_start,
247 							       &adj_end);
248 	if (!tile_mask)
249 		goto range_notifier_event_end;
250 
251 	xe_device_wmb(xe);
252 
253 	err = xe_vm_range_tilemask_tlb_inval(vm, adj_start, adj_end, tile_mask);
254 	WARN_ON_ONCE(err);
255 
256 range_notifier_event_end:
257 	r = first;
258 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
259 		xe_svm_range_notifier_event_end(vm, r, mmu_range);
260 	for_each_tile(tile, xe, id) {
261 		if (tile_mask & BIT(id)) {
262 			xe_svm_tlb_inval_us_stats_incr(tile->primary_gt, start);
263 			if (tile->media_gt)
264 				xe_svm_tlb_inval_us_stats_incr(tile->media_gt, start);
265 		}
266 	}
267 }
268 
__xe_svm_garbage_collector(struct xe_vm * vm,struct xe_svm_range * range)269 static int __xe_svm_garbage_collector(struct xe_vm *vm,
270 				      struct xe_svm_range *range)
271 {
272 	struct dma_fence *fence;
273 
274 	range_debug(range, "GARBAGE COLLECTOR");
275 
276 	xe_vm_lock(vm, false);
277 	fence = xe_vm_range_unbind(vm, range);
278 	xe_vm_unlock(vm);
279 	if (IS_ERR(fence))
280 		return PTR_ERR(fence);
281 	dma_fence_put(fence);
282 
283 	drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
284 
285 	return 0;
286 }
287 
xe_svm_range_set_default_attr(struct xe_vm * vm,u64 range_start,u64 range_end)288 static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 range_end)
289 {
290 	struct xe_vma *vma;
291 	struct xe_vma_mem_attr default_attr = {
292 		.preferred_loc = {
293 			.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
294 			.migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
295 		},
296 		.atomic_access = DRM_XE_ATOMIC_UNDEFINED,
297 	};
298 	int err = 0;
299 
300 	vma = xe_vm_find_vma_by_addr(vm, range_start);
301 	if (!vma)
302 		return -EINVAL;
303 
304 	if (!(vma->gpuva.flags & XE_VMA_MADV_AUTORESET)) {
305 		drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n");
306 		return 0;
307 	}
308 
309 	if (xe_vma_has_default_mem_attrs(vma))
310 		return 0;
311 
312 	vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx",
313 	       xe_vma_start(vma), xe_vma_end(vma));
314 
315 	if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
316 		default_attr.pat_index = vma->attr.default_pat_index;
317 		default_attr.default_pat_index  = vma->attr.default_pat_index;
318 		vma->attr = default_attr;
319 	} else {
320 		vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx",
321 		       range_start, range_end);
322 		err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start);
323 		if (err) {
324 			drm_warn(&vm->xe->drm, "VMA SPLIT failed: %pe\n", ERR_PTR(err));
325 			xe_vm_kill(vm, true);
326 			return err;
327 		}
328 	}
329 
330 	/*
331 	 * On call from xe_svm_handle_pagefault original VMA might be changed
332 	 * signal this to lookup for VMA again.
333 	 */
334 	return -EAGAIN;
335 }
336 
xe_svm_garbage_collector(struct xe_vm * vm)337 static int xe_svm_garbage_collector(struct xe_vm *vm)
338 {
339 	struct xe_svm_range *range;
340 	u64 range_start;
341 	u64 range_end;
342 	int err, ret = 0;
343 
344 	lockdep_assert_held_write(&vm->lock);
345 
346 	if (xe_vm_is_closed_or_banned(vm))
347 		return -ENOENT;
348 
349 	for (;;) {
350 		spin_lock(&vm->svm.garbage_collector.lock);
351 		range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
352 						 typeof(*range),
353 						 garbage_collector_link);
354 		if (!range)
355 			break;
356 
357 		range_start = xe_svm_range_start(range);
358 		range_end = xe_svm_range_end(range);
359 
360 		list_del(&range->garbage_collector_link);
361 		spin_unlock(&vm->svm.garbage_collector.lock);
362 
363 		err = __xe_svm_garbage_collector(vm, range);
364 		if (err) {
365 			drm_warn(&vm->xe->drm,
366 				 "Garbage collection failed: %pe\n",
367 				 ERR_PTR(err));
368 			xe_vm_kill(vm, true);
369 			return err;
370 		}
371 
372 		err = xe_svm_range_set_default_attr(vm, range_start, range_end);
373 		if (err) {
374 			if (err == -EAGAIN)
375 				ret = -EAGAIN;
376 			else
377 				return err;
378 		}
379 	}
380 	spin_unlock(&vm->svm.garbage_collector.lock);
381 
382 	return ret;
383 }
384 
xe_svm_garbage_collector_work_func(struct work_struct * w)385 static void xe_svm_garbage_collector_work_func(struct work_struct *w)
386 {
387 	struct xe_vm *vm = container_of(w, struct xe_vm,
388 					svm.garbage_collector.work);
389 
390 	down_write(&vm->lock);
391 	xe_svm_garbage_collector(vm);
392 	up_write(&vm->lock);
393 }
394 
395 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
396 
page_to_vr(struct page * page)397 static struct xe_vram_region *page_to_vr(struct page *page)
398 {
399 	return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
400 }
401 
xe_vram_region_page_to_dpa(struct xe_vram_region * vr,struct page * page)402 static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
403 				      struct page *page)
404 {
405 	u64 dpa;
406 	u64 pfn = page_to_pfn(page);
407 	u64 offset;
408 
409 	xe_assert(vr->xe, is_device_private_page(page));
410 	xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base);
411 
412 	offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
413 	dpa = vr->dpa_base + offset;
414 
415 	return dpa;
416 }
417 
418 enum xe_svm_copy_dir {
419 	XE_SVM_COPY_TO_VRAM,
420 	XE_SVM_COPY_TO_SRAM,
421 };
422 
xe_svm_copy_kb_stats_incr(struct xe_gt * gt,const enum xe_svm_copy_dir dir,int kb)423 static void xe_svm_copy_kb_stats_incr(struct xe_gt *gt,
424 				      const enum xe_svm_copy_dir dir,
425 				      int kb)
426 {
427 	if (dir == XE_SVM_COPY_TO_VRAM)
428 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_KB, kb);
429 	else
430 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_KB, kb);
431 }
432 
xe_svm_copy_us_stats_incr(struct xe_gt * gt,const enum xe_svm_copy_dir dir,unsigned long npages,ktime_t start)433 static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
434 				      const enum xe_svm_copy_dir dir,
435 				      unsigned long npages,
436 				      ktime_t start)
437 {
438 	s64 us_delta = xe_svm_stats_ktime_us_delta(start);
439 
440 	if (dir == XE_SVM_COPY_TO_VRAM) {
441 		switch (npages) {
442 		case 1:
443 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US,
444 					 us_delta);
445 			break;
446 		case 16:
447 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US,
448 					 us_delta);
449 			break;
450 		case 512:
451 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US,
452 					 us_delta);
453 			break;
454 		}
455 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_US,
456 				 us_delta);
457 	} else {
458 		switch (npages) {
459 		case 1:
460 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_CPU_COPY_US,
461 					 us_delta);
462 			break;
463 		case 16:
464 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_CPU_COPY_US,
465 					 us_delta);
466 			break;
467 		case 512:
468 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_CPU_COPY_US,
469 					 us_delta);
470 			break;
471 		}
472 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_US,
473 				 us_delta);
474 	}
475 }
476 
xe_svm_copy(struct page ** pages,struct drm_pagemap_addr * pagemap_addr,unsigned long npages,const enum xe_svm_copy_dir dir,struct dma_fence * pre_migrate_fence)477 static int xe_svm_copy(struct page **pages,
478 		       struct drm_pagemap_addr *pagemap_addr,
479 		       unsigned long npages, const enum xe_svm_copy_dir dir,
480 		       struct dma_fence *pre_migrate_fence)
481 {
482 	struct xe_vram_region *vr = NULL;
483 	struct xe_gt *gt = NULL;
484 	struct xe_device *xe;
485 	struct dma_fence *fence = NULL;
486 	unsigned long i;
487 #define XE_VRAM_ADDR_INVALID	~0x0ull
488 	u64 vram_addr = XE_VRAM_ADDR_INVALID;
489 	int err = 0, pos = 0;
490 	bool sram = dir == XE_SVM_COPY_TO_SRAM;
491 	ktime_t start = xe_svm_stats_ktime_get();
492 
493 	/*
494 	 * This flow is complex: it locates physically contiguous device pages,
495 	 * derives the starting physical address, and performs a single GPU copy
496 	 * to for every 8M chunk in a DMA address array. Both device pages and
497 	 * DMA addresses may be sparsely populated. If either is NULL, a copy is
498 	 * triggered based on the current search state. The last GPU copy is
499 	 * waited on to ensure all copies are complete.
500 	 */
501 
502 	for (i = 0; i < npages; ++i) {
503 		struct page *spage = pages[i];
504 		struct dma_fence *__fence;
505 		u64 __vram_addr;
506 		bool match = false, chunk, last;
507 
508 #define XE_MIGRATE_CHUNK_SIZE	SZ_8M
509 		chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
510 		last = (i + 1) == npages;
511 
512 		/* No CPU page and no device pages queue'd to copy */
513 		if (!pagemap_addr[i].addr && vram_addr == XE_VRAM_ADDR_INVALID)
514 			continue;
515 
516 		if (!vr && spage) {
517 			vr = page_to_vr(spage);
518 			gt = xe_migrate_exec_queue(vr->migrate)->gt;
519 			xe = vr->xe;
520 		}
521 		XE_WARN_ON(spage && page_to_vr(spage) != vr);
522 
523 		/*
524 		 * CPU page and device page valid, capture physical address on
525 		 * first device page, check if physical contiguous on subsequent
526 		 * device pages.
527 		 */
528 		if (pagemap_addr[i].addr && spage) {
529 			__vram_addr = xe_vram_region_page_to_dpa(vr, spage);
530 			if (vram_addr == XE_VRAM_ADDR_INVALID) {
531 				vram_addr = __vram_addr;
532 				pos = i;
533 			}
534 
535 			match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
536 			/* Expected with contiguous memory */
537 			xe_assert(vr->xe, match);
538 
539 			if (pagemap_addr[i].order) {
540 				i += NR_PAGES(pagemap_addr[i].order) - 1;
541 				chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
542 				last = (i + 1) == npages;
543 			}
544 		}
545 
546 		/*
547 		 * Mismatched physical address, 8M copy chunk, or last page -
548 		 * trigger a copy.
549 		 */
550 		if (!match || chunk || last) {
551 			/*
552 			 * Extra page for first copy if last page and matching
553 			 * physical address.
554 			 */
555 			int incr = (match && last) ? 1 : 0;
556 
557 			if (vram_addr != XE_VRAM_ADDR_INVALID) {
558 				xe_svm_copy_kb_stats_incr(gt, dir,
559 							  (i - pos + incr) *
560 							  (PAGE_SIZE / SZ_1K));
561 				if (sram) {
562 					vm_dbg(&xe->drm,
563 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
564 					       vram_addr,
565 					       (u64)pagemap_addr[pos].addr, i - pos + incr);
566 					__fence = xe_migrate_from_vram(vr->migrate,
567 								       i - pos + incr,
568 								       vram_addr,
569 								       &pagemap_addr[pos],
570 								       pre_migrate_fence);
571 				} else {
572 					vm_dbg(&xe->drm,
573 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
574 					       (u64)pagemap_addr[pos].addr, vram_addr,
575 					       i - pos + incr);
576 					__fence = xe_migrate_to_vram(vr->migrate,
577 								     i - pos + incr,
578 								     &pagemap_addr[pos],
579 								     vram_addr,
580 								     pre_migrate_fence);
581 				}
582 				if (IS_ERR(__fence)) {
583 					err = PTR_ERR(__fence);
584 					goto err_out;
585 				}
586 				pre_migrate_fence = NULL;
587 				dma_fence_put(fence);
588 				fence = __fence;
589 			}
590 
591 			/* Setup physical address of next device page */
592 			if (pagemap_addr[i].addr && spage) {
593 				vram_addr = __vram_addr;
594 				pos = i;
595 			} else {
596 				vram_addr = XE_VRAM_ADDR_INVALID;
597 			}
598 
599 			/* Extra mismatched device page, copy it */
600 			if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
601 				xe_svm_copy_kb_stats_incr(gt, dir,
602 							  (PAGE_SIZE / SZ_1K));
603 				if (sram) {
604 					vm_dbg(&xe->drm,
605 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
606 					       vram_addr, (u64)pagemap_addr[pos].addr, 1);
607 					__fence = xe_migrate_from_vram(vr->migrate, 1,
608 								       vram_addr,
609 								       &pagemap_addr[pos],
610 								       pre_migrate_fence);
611 				} else {
612 					vm_dbg(&xe->drm,
613 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
614 					       (u64)pagemap_addr[pos].addr, vram_addr, 1);
615 					__fence = xe_migrate_to_vram(vr->migrate, 1,
616 								     &pagemap_addr[pos],
617 								     vram_addr,
618 								     pre_migrate_fence);
619 				}
620 				if (IS_ERR(__fence)) {
621 					err = PTR_ERR(__fence);
622 					goto err_out;
623 				}
624 				pre_migrate_fence = NULL;
625 				dma_fence_put(fence);
626 				fence = __fence;
627 			}
628 		}
629 	}
630 
631 err_out:
632 	/* Wait for all copies to complete */
633 	if (fence) {
634 		dma_fence_wait(fence, false);
635 		dma_fence_put(fence);
636 	}
637 	if (pre_migrate_fence)
638 		dma_fence_wait(pre_migrate_fence, false);
639 
640 	/*
641 	 * XXX: We can't derive the GT here (or anywhere in this functions, but
642 	 * compute always uses the primary GT so accumulate stats on the likely
643 	 * GT of the fault.
644 	 */
645 	if (gt)
646 		xe_svm_copy_us_stats_incr(gt, dir, npages, start);
647 
648 	return err;
649 #undef XE_MIGRATE_CHUNK_SIZE
650 #undef XE_VRAM_ADDR_INVALID
651 }
652 
xe_svm_copy_to_devmem(struct page ** pages,struct drm_pagemap_addr * pagemap_addr,unsigned long npages,struct dma_fence * pre_migrate_fence)653 static int xe_svm_copy_to_devmem(struct page **pages,
654 				 struct drm_pagemap_addr *pagemap_addr,
655 				 unsigned long npages,
656 				 struct dma_fence *pre_migrate_fence)
657 {
658 	return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM,
659 			   pre_migrate_fence);
660 }
661 
xe_svm_copy_to_ram(struct page ** pages,struct drm_pagemap_addr * pagemap_addr,unsigned long npages,struct dma_fence * pre_migrate_fence)662 static int xe_svm_copy_to_ram(struct page **pages,
663 			      struct drm_pagemap_addr *pagemap_addr,
664 			      unsigned long npages,
665 			      struct dma_fence *pre_migrate_fence)
666 {
667 	return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM,
668 			   pre_migrate_fence);
669 }
670 
to_xe_bo(struct drm_pagemap_devmem * devmem_allocation)671 static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
672 {
673 	return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
674 }
675 
xe_svm_devmem_release(struct drm_pagemap_devmem * devmem_allocation)676 static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
677 {
678 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
679 	struct xe_device *xe = xe_bo_device(bo);
680 
681 	dma_fence_put(devmem_allocation->pre_migrate_fence);
682 	xe_bo_put_async(bo);
683 	xe_pm_runtime_put(xe);
684 }
685 
block_offset_to_pfn(struct xe_vram_region * vr,u64 offset)686 static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
687 {
688 	return PHYS_PFN(offset + vr->hpa_base);
689 }
690 
vram_to_buddy(struct xe_vram_region * vram)691 static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
692 {
693 	return &vram->ttm.mm;
694 }
695 
xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem * devmem_allocation,unsigned long npages,unsigned long * pfn)696 static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
697 				      unsigned long npages, unsigned long *pfn)
698 {
699 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
700 	struct ttm_resource *res = bo->ttm.resource;
701 	struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks;
702 	struct drm_buddy_block *block;
703 	int j = 0;
704 
705 	list_for_each_entry(block, blocks, link) {
706 		struct xe_vram_region *vr = block->private;
707 		struct drm_buddy *buddy = vram_to_buddy(vr);
708 		u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
709 		int i;
710 
711 		for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
712 			pfn[j++] = block_pfn + i;
713 	}
714 
715 	return 0;
716 }
717 
718 static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
719 	.devmem_release = xe_svm_devmem_release,
720 	.populate_devmem_pfn = xe_svm_populate_devmem_pfn,
721 	.copy_to_devmem = xe_svm_copy_to_devmem,
722 	.copy_to_ram = xe_svm_copy_to_ram,
723 };
724 
725 #endif
726 
727 static const struct drm_gpusvm_ops gpusvm_ops = {
728 	.range_alloc = xe_svm_range_alloc,
729 	.range_free = xe_svm_range_free,
730 	.invalidate = xe_svm_invalidate,
731 };
732 
733 static const unsigned long fault_chunk_sizes[] = {
734 	SZ_2M,
735 	SZ_64K,
736 	SZ_4K,
737 };
738 
739 /**
740  * xe_svm_init() - SVM initialize
741  * @vm: The VM.
742  *
743  * Initialize SVM state which is embedded within the VM.
744  *
745  * Return: 0 on success, negative error code on error.
746  */
xe_svm_init(struct xe_vm * vm)747 int xe_svm_init(struct xe_vm *vm)
748 {
749 	int err;
750 
751 	if (vm->flags & XE_VM_FLAG_FAULT_MODE) {
752 		spin_lock_init(&vm->svm.garbage_collector.lock);
753 		INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
754 		INIT_WORK(&vm->svm.garbage_collector.work,
755 			  xe_svm_garbage_collector_work_func);
756 
757 		err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
758 				      current->mm, 0, vm->size,
759 				      xe_modparam.svm_notifier_size * SZ_1M,
760 				      &gpusvm_ops, fault_chunk_sizes,
761 				      ARRAY_SIZE(fault_chunk_sizes));
762 		drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
763 	} else {
764 		err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)",
765 				      &vm->xe->drm, NULL, 0, 0, 0, NULL,
766 				      NULL, 0);
767 	}
768 
769 	return err;
770 }
771 
772 /**
773  * xe_svm_close() - SVM close
774  * @vm: The VM.
775  *
776  * Close SVM state (i.e., stop and flush all SVM actions).
777  */
xe_svm_close(struct xe_vm * vm)778 void xe_svm_close(struct xe_vm *vm)
779 {
780 	xe_assert(vm->xe, xe_vm_is_closed(vm));
781 	flush_work(&vm->svm.garbage_collector.work);
782 }
783 
784 /**
785  * xe_svm_fini() - SVM finalize
786  * @vm: The VM.
787  *
788  * Finalize SVM state which is embedded within the VM.
789  */
xe_svm_fini(struct xe_vm * vm)790 void xe_svm_fini(struct xe_vm *vm)
791 {
792 	xe_assert(vm->xe, xe_vm_is_closed(vm));
793 
794 	drm_gpusvm_fini(&vm->svm.gpusvm);
795 }
796 
xe_svm_range_is_valid(struct xe_svm_range * range,struct xe_tile * tile,bool devmem_only)797 static bool xe_svm_range_is_valid(struct xe_svm_range *range,
798 				  struct xe_tile *tile,
799 				  bool devmem_only)
800 {
801 	return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
802 					    range->tile_invalidated) &&
803 		(!devmem_only || xe_svm_range_in_vram(range)));
804 }
805 
806 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
807  * @vm: xe_vm pointer
808  * @range: Pointer to the SVM range structure
809  *
810  * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
811  * and migrates them to SMEM
812  */
xe_svm_range_migrate_to_smem(struct xe_vm * vm,struct xe_svm_range * range)813 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
814 {
815 	if (xe_svm_range_in_vram(range))
816 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
817 }
818 
819 /**
820  * xe_svm_range_validate() - Check if the SVM range is valid
821  * @vm: xe_vm pointer
822  * @range: Pointer to the SVM range structure
823  * @tile_mask: Mask representing the tiles to be checked
824  * @devmem_preferred : if true range needs to be in devmem
825  *
826  * The xe_svm_range_validate() function checks if a range is
827  * valid and located in the desired memory region.
828  *
829  * Return: true if the range is valid, false otherwise
830  */
xe_svm_range_validate(struct xe_vm * vm,struct xe_svm_range * range,u8 tile_mask,bool devmem_preferred)831 bool xe_svm_range_validate(struct xe_vm *vm,
832 			   struct xe_svm_range *range,
833 			   u8 tile_mask, bool devmem_preferred)
834 {
835 	bool ret;
836 
837 	xe_svm_notifier_lock(vm);
838 
839 	ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
840 	       (devmem_preferred == range->base.pages.flags.has_devmem_pages);
841 
842 	xe_svm_notifier_unlock(vm);
843 
844 	return ret;
845 }
846 
847 /**
848  * xe_svm_find_vma_start - Find start of CPU VMA
849  * @vm: xe_vm pointer
850  * @start: start address
851  * @end: end address
852  * @vma: Pointer to struct xe_vma
853  *
854  *
855  * This function searches for a cpu vma, within the specified
856  * range [start, end] in the given VM. It adjusts the range based on the
857  * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
858  *
859  * Return: The starting address of the VMA within the range,
860  * or ULONG_MAX if no VMA is found
861  */
xe_svm_find_vma_start(struct xe_vm * vm,u64 start,u64 end,struct xe_vma * vma)862 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
863 {
864 	return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
865 					 max(start, xe_vma_start(vma)),
866 					 min(end, xe_vma_end(vma)));
867 }
868 
869 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
xe_drm_pagemap_populate_mm(struct drm_pagemap * dpagemap,unsigned long start,unsigned long end,struct mm_struct * mm,unsigned long timeslice_ms)870 static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
871 				      unsigned long start, unsigned long end,
872 				      struct mm_struct *mm,
873 				      unsigned long timeslice_ms)
874 {
875 	struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
876 	struct dma_fence *pre_migrate_fence = NULL;
877 	struct xe_device *xe = vr->xe;
878 	struct device *dev = xe->drm.dev;
879 	struct drm_buddy_block *block;
880 	struct xe_validation_ctx vctx;
881 	struct list_head *blocks;
882 	struct drm_exec exec;
883 	struct xe_bo *bo;
884 	int err = 0, idx;
885 
886 	if (!drm_dev_enter(&xe->drm, &idx))
887 		return -ENODEV;
888 
889 	xe_pm_runtime_get(xe);
890 
891 	xe_validation_guard(&vctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
892 		bo = xe_bo_create_locked(xe, NULL, NULL, end - start,
893 					 ttm_bo_type_device,
894 					 (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) |
895 					 XE_BO_FLAG_CPU_ADDR_MIRROR, &exec);
896 		drm_exec_retry_on_contention(&exec);
897 		if (IS_ERR(bo)) {
898 			err = PTR_ERR(bo);
899 			xe_validation_retry_on_oom(&vctx, &err);
900 			break;
901 		}
902 
903 		/* Ensure that any clearing or async eviction will complete before migration. */
904 		if (!dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL)) {
905 			err = dma_resv_get_singleton(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
906 						     &pre_migrate_fence);
907 			if (err)
908 				dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
909 						      false, MAX_SCHEDULE_TIMEOUT);
910 			else if (pre_migrate_fence)
911 				dma_fence_enable_sw_signaling(pre_migrate_fence);
912 		}
913 
914 		drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
915 					&dpagemap_devmem_ops, dpagemap, end - start,
916 					pre_migrate_fence);
917 
918 		blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
919 		list_for_each_entry(block, blocks, link)
920 			block->private = vr;
921 
922 		xe_bo_get(bo);
923 
924 		/* Ensure the device has a pm ref while there are device pages active. */
925 		xe_pm_runtime_get_noresume(xe);
926 		err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
927 						    start, end, timeslice_ms,
928 						    xe_svm_devm_owner(xe));
929 		if (err)
930 			xe_svm_devmem_release(&bo->devmem_allocation);
931 		xe_bo_unlock(bo);
932 		xe_bo_put(bo);
933 	}
934 	xe_pm_runtime_put(xe);
935 	drm_dev_exit(idx);
936 
937 	return err;
938 }
939 #endif
940 
supports_4K_migration(struct xe_device * xe)941 static bool supports_4K_migration(struct xe_device *xe)
942 {
943 	if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
944 		return false;
945 
946 	return true;
947 }
948 
949 /**
950  * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
951  * @range: SVM range for which migration needs to be decided
952  * @vma: vma which has range
953  * @preferred_region_is_vram: preferred region for range is vram
954  *
955  * Return: True for range needing migration and migration is supported else false
956  */
xe_svm_range_needs_migrate_to_vram(struct xe_svm_range * range,struct xe_vma * vma,bool preferred_region_is_vram)957 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
958 					bool preferred_region_is_vram)
959 {
960 	struct xe_vm *vm = range_to_vm(&range->base);
961 	u64 range_size = xe_svm_range_size(range);
962 
963 	if (!range->base.pages.flags.migrate_devmem || !preferred_region_is_vram)
964 		return false;
965 
966 	xe_assert(vm->xe, IS_DGFX(vm->xe));
967 
968 	if (xe_svm_range_in_vram(range)) {
969 		drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
970 		return false;
971 	}
972 
973 	if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
974 		drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
975 		return false;
976 	}
977 
978 	return true;
979 }
980 
981 #define DECL_SVM_RANGE_COUNT_STATS(elem, stat) \
982 static void xe_svm_range_##elem##_count_stats_incr(struct xe_gt *gt, \
983 						   struct xe_svm_range *range) \
984 { \
985 	switch (xe_svm_range_size(range)) { \
986 	case SZ_4K: \
987 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_COUNT, 1); \
988 		break; \
989 	case SZ_64K: \
990 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_COUNT, 1); \
991 		break; \
992 	case SZ_2M: \
993 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_COUNT, 1); \
994 		break; \
995 	} \
996 } \
997 
DECL_SVM_RANGE_COUNT_STATS(fault,PAGEFAULT)998 DECL_SVM_RANGE_COUNT_STATS(fault, PAGEFAULT)
999 DECL_SVM_RANGE_COUNT_STATS(valid_fault, VALID_PAGEFAULT)
1000 DECL_SVM_RANGE_COUNT_STATS(migrate, MIGRATE)
1001 
1002 #define DECL_SVM_RANGE_US_STATS(elem, stat) \
1003 static void xe_svm_range_##elem##_us_stats_incr(struct xe_gt *gt, \
1004 						struct xe_svm_range *range, \
1005 						ktime_t start) \
1006 { \
1007 	s64 us_delta = xe_svm_stats_ktime_us_delta(start); \
1008 \
1009 	switch (xe_svm_range_size(range)) { \
1010 	case SZ_4K: \
1011 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_US, \
1012 				 us_delta); \
1013 		break; \
1014 	case SZ_64K: \
1015 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_US, \
1016 				 us_delta); \
1017 		break; \
1018 	case SZ_2M: \
1019 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_US, \
1020 				 us_delta); \
1021 		break; \
1022 	} \
1023 } \
1024 
1025 DECL_SVM_RANGE_US_STATS(migrate, MIGRATE)
1026 DECL_SVM_RANGE_US_STATS(get_pages, GET_PAGES)
1027 DECL_SVM_RANGE_US_STATS(bind, BIND)
1028 DECL_SVM_RANGE_US_STATS(fault, PAGEFAULT)
1029 
1030 static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
1031 				     struct xe_gt *gt, u64 fault_addr,
1032 				     bool need_vram)
1033 {
1034 	int devmem_possible = IS_DGFX(vm->xe) &&
1035 		IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
1036 	struct drm_gpusvm_ctx ctx = {
1037 		.read_only = xe_vma_read_only(vma),
1038 		.devmem_possible = devmem_possible,
1039 		.check_pages_threshold = devmem_possible ? SZ_64K : 0,
1040 		.devmem_only = need_vram && devmem_possible,
1041 		.timeslice_ms = need_vram && devmem_possible ?
1042 			vm->xe->atomic_svm_timeslice_ms : 0,
1043 		.device_private_page_owner = xe_svm_devm_owner(vm->xe),
1044 	};
1045 	struct xe_validation_ctx vctx;
1046 	struct drm_exec exec;
1047 	struct xe_svm_range *range;
1048 	struct dma_fence *fence;
1049 	struct drm_pagemap *dpagemap;
1050 	struct xe_tile *tile = gt_to_tile(gt);
1051 	int migrate_try_count = ctx.devmem_only ? 3 : 1;
1052 	ktime_t start = xe_svm_stats_ktime_get(), bind_start, get_pages_start;
1053 	int err;
1054 
1055 	lockdep_assert_held_write(&vm->lock);
1056 	xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
1057 
1058 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
1059 
1060 retry:
1061 	/* Always process UNMAPs first so view SVM ranges is current */
1062 	err = xe_svm_garbage_collector(vm);
1063 	if (err)
1064 		return err;
1065 
1066 	dpagemap = xe_vma_resolve_pagemap(vma, tile);
1067 	if (!dpagemap && !ctx.devmem_only)
1068 		ctx.device_private_page_owner = NULL;
1069 	range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
1070 
1071 	if (IS_ERR(range))
1072 		return PTR_ERR(range);
1073 
1074 	xe_svm_range_fault_count_stats_incr(gt, range);
1075 
1076 	if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
1077 		err = -EACCES;
1078 		goto out;
1079 	}
1080 
1081 	if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) {
1082 		xe_svm_range_valid_fault_count_stats_incr(gt, range);
1083 		range_debug(range, "PAGE FAULT - VALID");
1084 		goto out;
1085 	}
1086 
1087 	range_debug(range, "PAGE FAULT");
1088 
1089 	if (--migrate_try_count >= 0 &&
1090 	    xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
1091 		ktime_t migrate_start = xe_svm_stats_ktime_get();
1092 
1093 		/* TODO : For multi-device dpagemap will be used to find the
1094 		 * remote tile and remote device. Will need to modify
1095 		 * xe_svm_alloc_vram to use dpagemap for future multi-device
1096 		 * support.
1097 		 */
1098 		xe_svm_range_migrate_count_stats_incr(gt, range);
1099 		err = xe_svm_alloc_vram(tile, range, &ctx);
1100 		xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start);
1101 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
1102 		if (err) {
1103 			if (migrate_try_count || !ctx.devmem_only) {
1104 				drm_dbg(&vm->xe->drm,
1105 					"VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
1106 					vm->usm.asid, ERR_PTR(err));
1107 
1108 				/*
1109 				 * In the devmem-only case, mixed mappings may
1110 				 * be found. The get_pages function will fix
1111 				 * these up to a single location, allowing the
1112 				 * page fault handler to make forward progress.
1113 				 */
1114 				if (ctx.devmem_only)
1115 					goto get_pages;
1116 				else
1117 					goto retry;
1118 			} else {
1119 				drm_err(&vm->xe->drm,
1120 					"VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
1121 					vm->usm.asid, ERR_PTR(err));
1122 				return err;
1123 			}
1124 		}
1125 	}
1126 
1127 get_pages:
1128 	get_pages_start = xe_svm_stats_ktime_get();
1129 
1130 	range_debug(range, "GET PAGES");
1131 	err = xe_svm_range_get_pages(vm, range, &ctx);
1132 	/* Corner where CPU mappings have changed */
1133 	if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
1134 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
1135 		if (migrate_try_count > 0 || !ctx.devmem_only) {
1136 			drm_dbg(&vm->xe->drm,
1137 				"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
1138 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1139 			range_debug(range, "PAGE FAULT - RETRY PAGES");
1140 			goto retry;
1141 		} else {
1142 			drm_err(&vm->xe->drm,
1143 				"Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
1144 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1145 		}
1146 	}
1147 	if (err) {
1148 		range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
1149 		goto out;
1150 	}
1151 
1152 	xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
1153 	range_debug(range, "PAGE FAULT - BIND");
1154 
1155 	bind_start = xe_svm_stats_ktime_get();
1156 	xe_validation_guard(&vctx, &vm->xe->val, &exec, (struct xe_val_flags) {}, err) {
1157 		err = xe_vm_drm_exec_lock(vm, &exec);
1158 		drm_exec_retry_on_contention(&exec);
1159 
1160 		xe_vm_set_validation_exec(vm, &exec);
1161 		fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
1162 		xe_vm_set_validation_exec(vm, NULL);
1163 		if (IS_ERR(fence)) {
1164 			drm_exec_retry_on_contention(&exec);
1165 			err = PTR_ERR(fence);
1166 			xe_validation_retry_on_oom(&vctx, &err);
1167 			xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1168 			break;
1169 		}
1170 	}
1171 	if (err)
1172 		goto err_out;
1173 
1174 	dma_fence_wait(fence, false);
1175 	dma_fence_put(fence);
1176 	xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1177 
1178 out:
1179 	xe_svm_range_fault_us_stats_incr(gt, range, start);
1180 	return 0;
1181 
1182 err_out:
1183 	if (err == -EAGAIN) {
1184 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
1185 		range_debug(range, "PAGE FAULT - RETRY BIND");
1186 		goto retry;
1187 	}
1188 
1189 	return err;
1190 }
1191 
1192 /**
1193  * xe_svm_handle_pagefault() - SVM handle page fault
1194  * @vm: The VM.
1195  * @vma: The CPU address mirror VMA.
1196  * @gt: The gt upon the fault occurred.
1197  * @fault_addr: The GPU fault address.
1198  * @atomic: The fault atomic access bit.
1199  *
1200  * Create GPU bindings for a SVM page fault. Optionally migrate to device
1201  * memory.
1202  *
1203  * Return: 0 on success, negative error code on error.
1204  */
xe_svm_handle_pagefault(struct xe_vm * vm,struct xe_vma * vma,struct xe_gt * gt,u64 fault_addr,bool atomic)1205 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
1206 			    struct xe_gt *gt, u64 fault_addr,
1207 			    bool atomic)
1208 {
1209 	int need_vram, ret;
1210 retry:
1211 	need_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
1212 	if (need_vram < 0)
1213 		return need_vram;
1214 
1215 	ret =  __xe_svm_handle_pagefault(vm, vma, gt, fault_addr,
1216 					 need_vram ? true : false);
1217 	if (ret == -EAGAIN) {
1218 		/*
1219 		 * Retry once on -EAGAIN to re-lookup the VMA, as the original VMA
1220 		 * may have been split by xe_svm_range_set_default_attr.
1221 		 */
1222 		vma = xe_vm_find_vma_by_addr(vm, fault_addr);
1223 		if (!vma)
1224 			return -EINVAL;
1225 
1226 		goto retry;
1227 	}
1228 	return ret;
1229 }
1230 
1231 /**
1232  * xe_svm_has_mapping() - SVM has mappings
1233  * @vm: The VM.
1234  * @start: Start address.
1235  * @end: End address.
1236  *
1237  * Check if an address range has SVM mappings.
1238  *
1239  * Return: True if address range has a SVM mapping, False otherwise
1240  */
xe_svm_has_mapping(struct xe_vm * vm,u64 start,u64 end)1241 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
1242 {
1243 	return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
1244 }
1245 
1246 /**
1247  * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
1248  * @vm: The VM
1249  * @start: start addr
1250  * @end: end addr
1251  *
1252  * This function UNMAPS svm ranges if start or end address are inside them.
1253  */
xe_svm_unmap_address_range(struct xe_vm * vm,u64 start,u64 end)1254 void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
1255 {
1256 	struct drm_gpusvm_notifier *notifier, *next;
1257 
1258 	lockdep_assert_held_write(&vm->lock);
1259 
1260 	drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
1261 		struct drm_gpusvm_range *range, *__next;
1262 
1263 		drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
1264 			if (start > drm_gpusvm_range_start(range) ||
1265 			    end < drm_gpusvm_range_end(range)) {
1266 				if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
1267 					drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
1268 				drm_gpusvm_range_get(range);
1269 				__xe_svm_garbage_collector(vm, to_xe_range(range));
1270 				if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
1271 					spin_lock(&vm->svm.garbage_collector.lock);
1272 					list_del(&to_xe_range(range)->garbage_collector_link);
1273 					spin_unlock(&vm->svm.garbage_collector.lock);
1274 				}
1275 				drm_gpusvm_range_put(range);
1276 			}
1277 		}
1278 	}
1279 }
1280 
1281 /**
1282  * xe_svm_bo_evict() - SVM evict BO to system memory
1283  * @bo: BO to evict
1284  *
1285  * SVM evict BO to system memory. GPU SVM layer ensures all device pages
1286  * are evicted before returning.
1287  *
1288  * Return: 0 on success standard error code otherwise
1289  */
xe_svm_bo_evict(struct xe_bo * bo)1290 int xe_svm_bo_evict(struct xe_bo *bo)
1291 {
1292 	return drm_pagemap_evict_to_ram(&bo->devmem_allocation);
1293 }
1294 
1295 /**
1296  * xe_svm_range_find_or_insert- Find or insert GPU SVM range
1297  * @vm: xe_vm pointer
1298  * @addr: address for which range needs to be found/inserted
1299  * @vma:  Pointer to struct xe_vma which mirrors CPU
1300  * @ctx: GPU SVM context
1301  *
1302  * This function finds or inserts a newly allocated a SVM range based on the
1303  * address.
1304  *
1305  * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
1306  */
xe_svm_range_find_or_insert(struct xe_vm * vm,u64 addr,struct xe_vma * vma,struct drm_gpusvm_ctx * ctx)1307 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
1308 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
1309 {
1310 	struct drm_gpusvm_range *r;
1311 
1312 	r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
1313 					    xe_vma_start(vma), xe_vma_end(vma), ctx);
1314 	if (IS_ERR(r))
1315 		return ERR_CAST(r);
1316 
1317 	return to_xe_range(r);
1318 }
1319 
1320 /**
1321  * xe_svm_range_get_pages() - Get pages for a SVM range
1322  * @vm: Pointer to the struct xe_vm
1323  * @range: Pointer to the xe SVM range structure
1324  * @ctx: GPU SVM context
1325  *
1326  * This function gets pages for a SVM range and ensures they are mapped for
1327  * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
1328  *
1329  * Return: 0 on success, negative error code on failure.
1330  */
xe_svm_range_get_pages(struct xe_vm * vm,struct xe_svm_range * range,struct drm_gpusvm_ctx * ctx)1331 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
1332 			   struct drm_gpusvm_ctx *ctx)
1333 {
1334 	int err = 0;
1335 
1336 	err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
1337 	if (err == -EOPNOTSUPP) {
1338 		range_debug(range, "PAGE FAULT - EVICT PAGES");
1339 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
1340 	}
1341 
1342 	return err;
1343 }
1344 
1345 /**
1346  * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
1347  * @vm: Pointer to the xe_vm structure
1348  * @start: Start of the input range
1349  * @end: End of the input range
1350  *
1351  * This function removes the page table entries (PTEs) associated
1352  * with the svm ranges within the given input start and end
1353  *
1354  * Return: tile_mask for which gt's need to be tlb invalidated.
1355  */
xe_svm_ranges_zap_ptes_in_range(struct xe_vm * vm,u64 start,u64 end)1356 u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
1357 {
1358 	struct drm_gpusvm_notifier *notifier;
1359 	struct xe_svm_range *range;
1360 	u64 adj_start, adj_end;
1361 	struct xe_tile *tile;
1362 	u8 tile_mask = 0;
1363 	u8 id;
1364 
1365 	lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
1366 		       lockdep_is_held_type(&vm->lock, 0));
1367 
1368 	drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
1369 		struct drm_gpusvm_range *r = NULL;
1370 
1371 		adj_start = max(start, drm_gpusvm_notifier_start(notifier));
1372 		adj_end = min(end, drm_gpusvm_notifier_end(notifier));
1373 		drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
1374 			range = to_xe_range(r);
1375 			for_each_tile(tile, vm->xe, id) {
1376 				if (xe_pt_zap_ptes_range(tile, vm, range)) {
1377 					tile_mask |= BIT(id);
1378 					/*
1379 					 * WRITE_ONCE pairs with READ_ONCE in
1380 					 * xe_vm_has_valid_gpu_mapping().
1381 					 * Must not fail after setting
1382 					 * tile_invalidated and before
1383 					 * TLB invalidation.
1384 					 */
1385 					WRITE_ONCE(range->tile_invalidated,
1386 						   range->tile_invalidated | BIT(id));
1387 				}
1388 			}
1389 		}
1390 	}
1391 
1392 	return tile_mask;
1393 }
1394 
1395 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
1396 
tile_local_pagemap(struct xe_tile * tile)1397 static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile)
1398 {
1399 	return &tile->mem.vram->dpagemap;
1400 }
1401 
1402 /**
1403  * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
1404  * @vma: Pointer to the xe_vma structure containing memory attributes
1405  * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
1406  *
1407  * This function determines the correct DRM pagemap to use for a given VMA.
1408  * It first checks if a valid devmem_fd is provided in the VMA's preferred
1409  * location. If the devmem_fd is negative, it returns NULL, indicating no
1410  * pagemap is available and smem to be used as preferred location.
1411  * If the devmem_fd is equal to the default faulting
1412  * GT identifier, it returns the VRAM pagemap associated with the tile.
1413  *
1414  * Future support for multi-device configurations may use drm_pagemap_from_fd()
1415  * to resolve pagemaps from arbitrary file descriptors.
1416  *
1417  * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
1418  */
xe_vma_resolve_pagemap(struct xe_vma * vma,struct xe_tile * tile)1419 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1420 {
1421 	s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
1422 
1423 	if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
1424 		return NULL;
1425 
1426 	if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
1427 		return IS_DGFX(tile_to_xe(tile)) ? tile_local_pagemap(tile) : NULL;
1428 
1429 	/* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
1430 	return NULL;
1431 }
1432 
1433 /**
1434  * xe_svm_alloc_vram()- Allocate device memory pages for range,
1435  * migrating existing data.
1436  * @tile: tile to allocate vram from
1437  * @range: SVM range
1438  * @ctx: DRM GPU SVM context
1439  *
1440  * Return: 0 on success, error code on failure.
1441  */
xe_svm_alloc_vram(struct xe_tile * tile,struct xe_svm_range * range,const struct drm_gpusvm_ctx * ctx)1442 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
1443 		      const struct drm_gpusvm_ctx *ctx)
1444 {
1445 	struct drm_pagemap *dpagemap;
1446 
1447 	xe_assert(tile_to_xe(tile), range->base.pages.flags.migrate_devmem);
1448 	range_debug(range, "ALLOCATE VRAM");
1449 
1450 	dpagemap = tile_local_pagemap(tile);
1451 	return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
1452 				       xe_svm_range_end(range),
1453 				       range->base.gpusvm->mm,
1454 				       ctx->timeslice_ms);
1455 }
1456 
1457 static struct drm_pagemap_addr
xe_drm_pagemap_device_map(struct drm_pagemap * dpagemap,struct device * dev,struct page * page,unsigned int order,enum dma_data_direction dir)1458 xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
1459 			  struct device *dev,
1460 			  struct page *page,
1461 			  unsigned int order,
1462 			  enum dma_data_direction dir)
1463 {
1464 	struct device *pgmap_dev = dpagemap->dev;
1465 	enum drm_interconnect_protocol prot;
1466 	dma_addr_t addr;
1467 
1468 	if (pgmap_dev == dev) {
1469 		addr = xe_vram_region_page_to_dpa(page_to_vr(page), page);
1470 		prot = XE_INTERCONNECT_VRAM;
1471 	} else {
1472 		addr = DMA_MAPPING_ERROR;
1473 		prot = 0;
1474 	}
1475 
1476 	return drm_pagemap_addr_encode(addr, prot, order, dir);
1477 }
1478 
1479 static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
1480 	.device_map = xe_drm_pagemap_device_map,
1481 	.populate_mm = xe_drm_pagemap_populate_mm,
1482 };
1483 
1484 /**
1485  * xe_devm_add: Remap and provide memmap backing for device memory
1486  * @tile: tile that the memory region belongs to
1487  * @vr: vram memory region to remap
1488  *
1489  * This remap device memory to host physical address space and create
1490  * struct page to back device memory
1491  *
1492  * Return: 0 on success standard error code otherwise
1493  */
xe_devm_add(struct xe_tile * tile,struct xe_vram_region * vr)1494 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1495 {
1496 	struct xe_device *xe = tile_to_xe(tile);
1497 	struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
1498 	struct resource *res;
1499 	void *addr;
1500 	int ret;
1501 
1502 	res = devm_request_free_mem_region(dev, &iomem_resource,
1503 					   vr->usable_size);
1504 	if (IS_ERR(res)) {
1505 		ret = PTR_ERR(res);
1506 		return ret;
1507 	}
1508 
1509 	vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
1510 	vr->pagemap.range.start = res->start;
1511 	vr->pagemap.range.end = res->end;
1512 	vr->pagemap.nr_range = 1;
1513 	vr->pagemap.ops = drm_pagemap_pagemap_ops_get();
1514 	vr->pagemap.owner = xe_svm_devm_owner(xe);
1515 	addr = devm_memremap_pages(dev, &vr->pagemap);
1516 
1517 	vr->dpagemap.dev = dev;
1518 	vr->dpagemap.ops = &xe_drm_pagemap_ops;
1519 
1520 	if (IS_ERR(addr)) {
1521 		devm_release_mem_region(dev, res->start, resource_size(res));
1522 		ret = PTR_ERR(addr);
1523 		drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
1524 			tile->id, ERR_PTR(ret));
1525 		return ret;
1526 	}
1527 	vr->hpa_base = res->start;
1528 
1529 	drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
1530 		tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
1531 	return 0;
1532 }
1533 #else
xe_svm_alloc_vram(struct xe_tile * tile,struct xe_svm_range * range,const struct drm_gpusvm_ctx * ctx)1534 int xe_svm_alloc_vram(struct xe_tile *tile,
1535 		      struct xe_svm_range *range,
1536 		      const struct drm_gpusvm_ctx *ctx)
1537 {
1538 	return -EOPNOTSUPP;
1539 }
1540 
xe_devm_add(struct xe_tile * tile,struct xe_vram_region * vr)1541 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1542 {
1543 	return 0;
1544 }
1545 
xe_vma_resolve_pagemap(struct xe_vma * vma,struct xe_tile * tile)1546 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1547 {
1548 	return NULL;
1549 }
1550 #endif
1551 
1552 /**
1553  * xe_svm_flush() - SVM flush
1554  * @vm: The VM.
1555  *
1556  * Flush all SVM actions.
1557  */
xe_svm_flush(struct xe_vm * vm)1558 void xe_svm_flush(struct xe_vm *vm)
1559 {
1560 	if (xe_vm_in_fault_mode(vm))
1561 		flush_work(&vm->svm.garbage_collector.work);
1562 }
1563