xref: /linux/drivers/gpu/drm/xe/xe_svm.c (revision 390db60f8e2bd21fae544917eb3a8618265c058c)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <drm/drm_drv.h>
7 
8 #include "xe_bo.h"
9 #include "xe_exec_queue_types.h"
10 #include "xe_gt_stats.h"
11 #include "xe_migrate.h"
12 #include "xe_module.h"
13 #include "xe_pm.h"
14 #include "xe_pt.h"
15 #include "xe_svm.h"
16 #include "xe_tile.h"
17 #include "xe_ttm_vram_mgr.h"
18 #include "xe_vm.h"
19 #include "xe_vm_types.h"
20 #include "xe_vram_types.h"
21 
xe_svm_range_in_vram(struct xe_svm_range * range)22 static bool xe_svm_range_in_vram(struct xe_svm_range *range)
23 {
24 	/*
25 	 * Advisory only check whether the range is currently backed by VRAM
26 	 * memory.
27 	 */
28 
29 	struct drm_gpusvm_pages_flags flags = {
30 		/* Pairs with WRITE_ONCE in drm_gpusvm.c */
31 		.__flags = READ_ONCE(range->base.pages.flags.__flags),
32 	};
33 
34 	return flags.has_devmem_pages;
35 }
36 
xe_svm_range_has_vram_binding(struct xe_svm_range * range)37 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
38 {
39 	/* Not reliable without notifier lock */
40 	return xe_svm_range_in_vram(range) && range->tile_present;
41 }
42 
gpusvm_to_vm(struct drm_gpusvm * gpusvm)43 static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
44 {
45 	return container_of(gpusvm, struct xe_vm, svm.gpusvm);
46 }
47 
range_to_vm(struct drm_gpusvm_range * r)48 static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
49 {
50 	return gpusvm_to_vm(r->gpusvm);
51 }
52 
53 #define range_debug(r__, operation__)					\
54 	vm_dbg(&range_to_vm(&(r__)->base)->xe->drm,			\
55 	       "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
56 	       "start=0x%014lx, end=0x%014lx, size=%lu",		\
57 	       (operation__), range_to_vm(&(r__)->base)->usm.asid,	\
58 	       (r__)->base.gpusvm,					\
59 	       xe_svm_range_in_vram((r__)) ? 1 : 0,			\
60 	       xe_svm_range_has_vram_binding((r__)) ? 1 : 0,		\
61 	       (r__)->base.pages.notifier_seq,				\
62 	       xe_svm_range_start((r__)), xe_svm_range_end((r__)),	\
63 	       xe_svm_range_size((r__)))
64 
xe_svm_range_debug(struct xe_svm_range * range,const char * operation)65 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
66 {
67 	range_debug(range, operation);
68 }
69 
70 static struct drm_gpusvm_range *
xe_svm_range_alloc(struct drm_gpusvm * gpusvm)71 xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
72 {
73 	struct xe_svm_range *range;
74 
75 	range = kzalloc(sizeof(*range), GFP_KERNEL);
76 	if (!range)
77 		return NULL;
78 
79 	INIT_LIST_HEAD(&range->garbage_collector_link);
80 	xe_vm_get(gpusvm_to_vm(gpusvm));
81 
82 	return &range->base;
83 }
84 
xe_svm_range_free(struct drm_gpusvm_range * range)85 static void xe_svm_range_free(struct drm_gpusvm_range *range)
86 {
87 	xe_vm_put(range_to_vm(range));
88 	kfree(range);
89 }
90 
91 static void
xe_svm_garbage_collector_add_range(struct xe_vm * vm,struct xe_svm_range * range,const struct mmu_notifier_range * mmu_range)92 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
93 				   const struct mmu_notifier_range *mmu_range)
94 {
95 	struct xe_device *xe = vm->xe;
96 
97 	range_debug(range, "GARBAGE COLLECTOR ADD");
98 
99 	drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
100 
101 	spin_lock(&vm->svm.garbage_collector.lock);
102 	if (list_empty(&range->garbage_collector_link))
103 		list_add_tail(&range->garbage_collector_link,
104 			      &vm->svm.garbage_collector.range_list);
105 	spin_unlock(&vm->svm.garbage_collector.lock);
106 
107 	queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq,
108 		   &vm->svm.garbage_collector.work);
109 }
110 
xe_svm_tlb_inval_count_stats_incr(struct xe_gt * gt)111 static void xe_svm_tlb_inval_count_stats_incr(struct xe_gt *gt)
112 {
113 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT, 1);
114 }
115 
116 static u8
xe_svm_range_notifier_event_begin(struct xe_vm * vm,struct drm_gpusvm_range * r,const struct mmu_notifier_range * mmu_range,u64 * adj_start,u64 * adj_end)117 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
118 				  const struct mmu_notifier_range *mmu_range,
119 				  u64 *adj_start, u64 *adj_end)
120 {
121 	struct xe_svm_range *range = to_xe_range(r);
122 	struct xe_device *xe = vm->xe;
123 	struct xe_tile *tile;
124 	u8 tile_mask = 0;
125 	u8 id;
126 
127 	xe_svm_assert_in_notifier(vm);
128 
129 	range_debug(range, "NOTIFIER");
130 
131 	/* Skip if already unmapped or if no binding exist */
132 	if (range->base.pages.flags.unmapped || !range->tile_present)
133 		return 0;
134 
135 	range_debug(range, "NOTIFIER - EXECUTE");
136 
137 	/* Adjust invalidation to range boundaries */
138 	*adj_start = min(xe_svm_range_start(range), mmu_range->start);
139 	*adj_end = max(xe_svm_range_end(range), mmu_range->end);
140 
141 	/*
142 	 * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the
143 	 * invalidation code can't correctly cope with sparse ranges or
144 	 * invalidations spanning multiple ranges.
145 	 */
146 	for_each_tile(tile, xe, id)
147 		if (xe_pt_zap_ptes_range(tile, vm, range)) {
148 			/*
149 			 * WRITE_ONCE pairs with READ_ONCE in
150 			 * xe_vm_has_valid_gpu_mapping()
151 			 */
152 			WRITE_ONCE(range->tile_invalidated,
153 				   range->tile_invalidated | BIT(id));
154 
155 			if (!(tile_mask & BIT(id))) {
156 				xe_svm_tlb_inval_count_stats_incr(tile->primary_gt);
157 				if (tile->media_gt)
158 					xe_svm_tlb_inval_count_stats_incr(tile->media_gt);
159 				tile_mask |= BIT(id);
160 			}
161 		}
162 
163 	return tile_mask;
164 }
165 
166 static void
xe_svm_range_notifier_event_end(struct xe_vm * vm,struct drm_gpusvm_range * r,const struct mmu_notifier_range * mmu_range)167 xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
168 				const struct mmu_notifier_range *mmu_range)
169 {
170 	struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
171 
172 	xe_svm_assert_in_notifier(vm);
173 
174 	drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
175 	if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
176 		xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
177 						   mmu_range);
178 }
179 
xe_svm_stats_ktime_us_delta(ktime_t start)180 static s64 xe_svm_stats_ktime_us_delta(ktime_t start)
181 {
182 	return IS_ENABLED(CONFIG_DEBUG_FS) ?
183 		ktime_us_delta(ktime_get(), start) : 0;
184 }
185 
xe_svm_tlb_inval_us_stats_incr(struct xe_gt * gt,ktime_t start)186 static void xe_svm_tlb_inval_us_stats_incr(struct xe_gt *gt, ktime_t start)
187 {
188 	s64 us_delta = xe_svm_stats_ktime_us_delta(start);
189 
190 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_US, us_delta);
191 }
192 
xe_svm_stats_ktime_get(void)193 static ktime_t xe_svm_stats_ktime_get(void)
194 {
195 	return IS_ENABLED(CONFIG_DEBUG_FS) ? ktime_get() : 0;
196 }
197 
xe_svm_invalidate(struct drm_gpusvm * gpusvm,struct drm_gpusvm_notifier * notifier,const struct mmu_notifier_range * mmu_range)198 static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
199 			      struct drm_gpusvm_notifier *notifier,
200 			      const struct mmu_notifier_range *mmu_range)
201 {
202 	struct xe_vm *vm = gpusvm_to_vm(gpusvm);
203 	struct xe_device *xe = vm->xe;
204 	struct drm_gpusvm_range *r, *first;
205 	struct xe_tile *tile;
206 	ktime_t start = xe_svm_stats_ktime_get();
207 	u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
208 	u8 tile_mask = 0, id;
209 	long err;
210 
211 	xe_svm_assert_in_notifier(vm);
212 
213 	vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
214 	       "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
215 	       vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
216 	       mmu_range->start, mmu_range->end, mmu_range->event);
217 
218 	/* Adjust invalidation to notifier boundaries */
219 	adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
220 	adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
221 
222 	first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
223 	if (!first)
224 		return;
225 
226 	/*
227 	 * PTs may be getting destroyed so not safe to touch these but PT should
228 	 * be invalidated at this point in time. Regardless we still need to
229 	 * ensure any dma mappings are unmapped in the here.
230 	 */
231 	if (xe_vm_is_closed(vm))
232 		goto range_notifier_event_end;
233 
234 	/*
235 	 * XXX: Less than ideal to always wait on VM's resv slots if an
236 	 * invalidation is not required. Could walk range list twice to figure
237 	 * out if an invalidations is need, but also not ideal.
238 	 */
239 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
240 				    DMA_RESV_USAGE_BOOKKEEP,
241 				    false, MAX_SCHEDULE_TIMEOUT);
242 	XE_WARN_ON(err <= 0);
243 
244 	r = first;
245 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
246 		tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range,
247 							       &adj_start,
248 							       &adj_end);
249 	if (!tile_mask)
250 		goto range_notifier_event_end;
251 
252 	xe_device_wmb(xe);
253 
254 	err = xe_vm_range_tilemask_tlb_inval(vm, adj_start, adj_end, tile_mask);
255 	WARN_ON_ONCE(err);
256 
257 range_notifier_event_end:
258 	r = first;
259 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
260 		xe_svm_range_notifier_event_end(vm, r, mmu_range);
261 	for_each_tile(tile, xe, id) {
262 		if (tile_mask & BIT(id)) {
263 			xe_svm_tlb_inval_us_stats_incr(tile->primary_gt, start);
264 			if (tile->media_gt)
265 				xe_svm_tlb_inval_us_stats_incr(tile->media_gt, start);
266 		}
267 	}
268 }
269 
__xe_svm_garbage_collector(struct xe_vm * vm,struct xe_svm_range * range)270 static int __xe_svm_garbage_collector(struct xe_vm *vm,
271 				      struct xe_svm_range *range)
272 {
273 	struct dma_fence *fence;
274 
275 	range_debug(range, "GARBAGE COLLECTOR");
276 
277 	xe_vm_lock(vm, false);
278 	fence = xe_vm_range_unbind(vm, range);
279 	xe_vm_unlock(vm);
280 	if (IS_ERR(fence))
281 		return PTR_ERR(fence);
282 	dma_fence_put(fence);
283 
284 	drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
285 
286 	return 0;
287 }
288 
xe_svm_range_set_default_attr(struct xe_vm * vm,u64 range_start,u64 range_end)289 static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 range_end)
290 {
291 	struct xe_vma *vma;
292 	struct xe_vma_mem_attr default_attr = {
293 		.preferred_loc = {
294 			.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
295 			.migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
296 		},
297 		.atomic_access = DRM_XE_ATOMIC_UNDEFINED,
298 	};
299 	int err = 0;
300 
301 	vma = xe_vm_find_vma_by_addr(vm, range_start);
302 	if (!vma)
303 		return -EINVAL;
304 
305 	if (!(vma->gpuva.flags & XE_VMA_MADV_AUTORESET)) {
306 		drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n");
307 		return 0;
308 	}
309 
310 	if (xe_vma_has_default_mem_attrs(vma))
311 		return 0;
312 
313 	vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx",
314 	       xe_vma_start(vma), xe_vma_end(vma));
315 
316 	if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
317 		default_attr.pat_index = vma->attr.default_pat_index;
318 		default_attr.default_pat_index  = vma->attr.default_pat_index;
319 		vma->attr = default_attr;
320 	} else {
321 		vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx",
322 		       range_start, range_end);
323 		err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start);
324 		if (err) {
325 			drm_warn(&vm->xe->drm, "VMA SPLIT failed: %pe\n", ERR_PTR(err));
326 			xe_vm_kill(vm, true);
327 			return err;
328 		}
329 	}
330 
331 	/*
332 	 * On call from xe_svm_handle_pagefault original VMA might be changed
333 	 * signal this to lookup for VMA again.
334 	 */
335 	return -EAGAIN;
336 }
337 
xe_svm_garbage_collector(struct xe_vm * vm)338 static int xe_svm_garbage_collector(struct xe_vm *vm)
339 {
340 	struct xe_svm_range *range;
341 	u64 range_start;
342 	u64 range_end;
343 	int err, ret = 0;
344 
345 	lockdep_assert_held_write(&vm->lock);
346 
347 	if (xe_vm_is_closed_or_banned(vm))
348 		return -ENOENT;
349 
350 	for (;;) {
351 		spin_lock(&vm->svm.garbage_collector.lock);
352 		range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
353 						 typeof(*range),
354 						 garbage_collector_link);
355 		if (!range)
356 			break;
357 
358 		range_start = xe_svm_range_start(range);
359 		range_end = xe_svm_range_end(range);
360 
361 		list_del(&range->garbage_collector_link);
362 		spin_unlock(&vm->svm.garbage_collector.lock);
363 
364 		err = __xe_svm_garbage_collector(vm, range);
365 		if (err) {
366 			drm_warn(&vm->xe->drm,
367 				 "Garbage collection failed: %pe\n",
368 				 ERR_PTR(err));
369 			xe_vm_kill(vm, true);
370 			return err;
371 		}
372 
373 		err = xe_svm_range_set_default_attr(vm, range_start, range_end);
374 		if (err) {
375 			if (err == -EAGAIN)
376 				ret = -EAGAIN;
377 			else
378 				return err;
379 		}
380 	}
381 	spin_unlock(&vm->svm.garbage_collector.lock);
382 
383 	return ret;
384 }
385 
xe_svm_garbage_collector_work_func(struct work_struct * w)386 static void xe_svm_garbage_collector_work_func(struct work_struct *w)
387 {
388 	struct xe_vm *vm = container_of(w, struct xe_vm,
389 					svm.garbage_collector.work);
390 
391 	down_write(&vm->lock);
392 	xe_svm_garbage_collector(vm);
393 	up_write(&vm->lock);
394 }
395 
396 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
397 
page_to_vr(struct page * page)398 static struct xe_vram_region *page_to_vr(struct page *page)
399 {
400 	return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
401 }
402 
xe_vram_region_page_to_dpa(struct xe_vram_region * vr,struct page * page)403 static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
404 				      struct page *page)
405 {
406 	u64 dpa;
407 	u64 pfn = page_to_pfn(page);
408 	u64 offset;
409 
410 	xe_assert(vr->xe, is_device_private_page(page));
411 	xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base);
412 
413 	offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
414 	dpa = vr->dpa_base + offset;
415 
416 	return dpa;
417 }
418 
419 enum xe_svm_copy_dir {
420 	XE_SVM_COPY_TO_VRAM,
421 	XE_SVM_COPY_TO_SRAM,
422 };
423 
xe_svm_copy_kb_stats_incr(struct xe_gt * gt,const enum xe_svm_copy_dir dir,int kb)424 static void xe_svm_copy_kb_stats_incr(struct xe_gt *gt,
425 				      const enum xe_svm_copy_dir dir,
426 				      int kb)
427 {
428 	if (dir == XE_SVM_COPY_TO_VRAM)
429 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_KB, kb);
430 	else
431 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_KB, kb);
432 }
433 
xe_svm_copy_us_stats_incr(struct xe_gt * gt,const enum xe_svm_copy_dir dir,unsigned long npages,ktime_t start)434 static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
435 				      const enum xe_svm_copy_dir dir,
436 				      unsigned long npages,
437 				      ktime_t start)
438 {
439 	s64 us_delta = xe_svm_stats_ktime_us_delta(start);
440 
441 	if (dir == XE_SVM_COPY_TO_VRAM) {
442 		switch (npages) {
443 		case 1:
444 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US,
445 					 us_delta);
446 			break;
447 		case 16:
448 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US,
449 					 us_delta);
450 			break;
451 		case 512:
452 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US,
453 					 us_delta);
454 			break;
455 		}
456 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_US,
457 				 us_delta);
458 	} else {
459 		switch (npages) {
460 		case 1:
461 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_CPU_COPY_US,
462 					 us_delta);
463 			break;
464 		case 16:
465 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_CPU_COPY_US,
466 					 us_delta);
467 			break;
468 		case 512:
469 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_CPU_COPY_US,
470 					 us_delta);
471 			break;
472 		}
473 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_US,
474 				 us_delta);
475 	}
476 }
477 
xe_svm_copy(struct page ** pages,struct drm_pagemap_addr * pagemap_addr,unsigned long npages,const enum xe_svm_copy_dir dir)478 static int xe_svm_copy(struct page **pages,
479 		       struct drm_pagemap_addr *pagemap_addr,
480 		       unsigned long npages, const enum xe_svm_copy_dir dir)
481 {
482 	struct xe_vram_region *vr = NULL;
483 	struct xe_gt *gt = NULL;
484 	struct xe_device *xe;
485 	struct dma_fence *fence = NULL;
486 	unsigned long i;
487 #define XE_VRAM_ADDR_INVALID	~0x0ull
488 	u64 vram_addr = XE_VRAM_ADDR_INVALID;
489 	int err = 0, pos = 0;
490 	bool sram = dir == XE_SVM_COPY_TO_SRAM;
491 	ktime_t start = xe_svm_stats_ktime_get();
492 
493 	/*
494 	 * This flow is complex: it locates physically contiguous device pages,
495 	 * derives the starting physical address, and performs a single GPU copy
496 	 * to for every 8M chunk in a DMA address array. Both device pages and
497 	 * DMA addresses may be sparsely populated. If either is NULL, a copy is
498 	 * triggered based on the current search state. The last GPU copy is
499 	 * waited on to ensure all copies are complete.
500 	 */
501 
502 	for (i = 0; i < npages; ++i) {
503 		struct page *spage = pages[i];
504 		struct dma_fence *__fence;
505 		u64 __vram_addr;
506 		bool match = false, chunk, last;
507 
508 #define XE_MIGRATE_CHUNK_SIZE	SZ_8M
509 		chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
510 		last = (i + 1) == npages;
511 
512 		/* No CPU page and no device pages queue'd to copy */
513 		if (!pagemap_addr[i].addr && vram_addr == XE_VRAM_ADDR_INVALID)
514 			continue;
515 
516 		if (!vr && spage) {
517 			vr = page_to_vr(spage);
518 			gt = xe_migrate_exec_queue(vr->migrate)->gt;
519 			xe = vr->xe;
520 		}
521 		XE_WARN_ON(spage && page_to_vr(spage) != vr);
522 
523 		/*
524 		 * CPU page and device page valid, capture physical address on
525 		 * first device page, check if physical contiguous on subsequent
526 		 * device pages.
527 		 */
528 		if (pagemap_addr[i].addr && spage) {
529 			__vram_addr = xe_vram_region_page_to_dpa(vr, spage);
530 			if (vram_addr == XE_VRAM_ADDR_INVALID) {
531 				vram_addr = __vram_addr;
532 				pos = i;
533 			}
534 
535 			match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
536 			/* Expected with contiguous memory */
537 			xe_assert(vr->xe, match);
538 
539 			if (pagemap_addr[i].order) {
540 				i += NR_PAGES(pagemap_addr[i].order) - 1;
541 				chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
542 				last = (i + 1) == npages;
543 			}
544 		}
545 
546 		/*
547 		 * Mismatched physical address, 8M copy chunk, or last page -
548 		 * trigger a copy.
549 		 */
550 		if (!match || chunk || last) {
551 			/*
552 			 * Extra page for first copy if last page and matching
553 			 * physical address.
554 			 */
555 			int incr = (match && last) ? 1 : 0;
556 
557 			if (vram_addr != XE_VRAM_ADDR_INVALID) {
558 				xe_svm_copy_kb_stats_incr(gt, dir,
559 							  (i - pos + incr) *
560 							  (PAGE_SIZE / SZ_1K));
561 				if (sram) {
562 					vm_dbg(&xe->drm,
563 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
564 					       vram_addr,
565 					       (u64)pagemap_addr[pos].addr, i - pos + incr);
566 					__fence = xe_migrate_from_vram(vr->migrate,
567 								       i - pos + incr,
568 								       vram_addr,
569 								       &pagemap_addr[pos]);
570 				} else {
571 					vm_dbg(&xe->drm,
572 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
573 					       (u64)pagemap_addr[pos].addr, vram_addr,
574 					       i - pos + incr);
575 					__fence = xe_migrate_to_vram(vr->migrate,
576 								     i - pos + incr,
577 								     &pagemap_addr[pos],
578 								     vram_addr);
579 				}
580 				if (IS_ERR(__fence)) {
581 					err = PTR_ERR(__fence);
582 					goto err_out;
583 				}
584 
585 				dma_fence_put(fence);
586 				fence = __fence;
587 			}
588 
589 			/* Setup physical address of next device page */
590 			if (pagemap_addr[i].addr && spage) {
591 				vram_addr = __vram_addr;
592 				pos = i;
593 			} else {
594 				vram_addr = XE_VRAM_ADDR_INVALID;
595 			}
596 
597 			/* Extra mismatched device page, copy it */
598 			if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
599 				xe_svm_copy_kb_stats_incr(gt, dir,
600 							  (PAGE_SIZE / SZ_1K));
601 				if (sram) {
602 					vm_dbg(&xe->drm,
603 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
604 					       vram_addr, (u64)pagemap_addr[pos].addr, 1);
605 					__fence = xe_migrate_from_vram(vr->migrate, 1,
606 								       vram_addr,
607 								       &pagemap_addr[pos]);
608 				} else {
609 					vm_dbg(&xe->drm,
610 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
611 					       (u64)pagemap_addr[pos].addr, vram_addr, 1);
612 					__fence = xe_migrate_to_vram(vr->migrate, 1,
613 								     &pagemap_addr[pos],
614 								     vram_addr);
615 				}
616 				if (IS_ERR(__fence)) {
617 					err = PTR_ERR(__fence);
618 					goto err_out;
619 				}
620 
621 				dma_fence_put(fence);
622 				fence = __fence;
623 			}
624 		}
625 	}
626 
627 err_out:
628 	/* Wait for all copies to complete */
629 	if (fence) {
630 		dma_fence_wait(fence, false);
631 		dma_fence_put(fence);
632 	}
633 
634 	/*
635 	 * XXX: We can't derive the GT here (or anywhere in this functions, but
636 	 * compute always uses the primary GT so accumlate stats on the likely
637 	 * GT of the fault.
638 	 */
639 	if (gt)
640 		xe_svm_copy_us_stats_incr(gt, dir, npages, start);
641 
642 	return err;
643 #undef XE_MIGRATE_CHUNK_SIZE
644 #undef XE_VRAM_ADDR_INVALID
645 }
646 
xe_svm_copy_to_devmem(struct page ** pages,struct drm_pagemap_addr * pagemap_addr,unsigned long npages)647 static int xe_svm_copy_to_devmem(struct page **pages,
648 				 struct drm_pagemap_addr *pagemap_addr,
649 				 unsigned long npages)
650 {
651 	return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
652 }
653 
xe_svm_copy_to_ram(struct page ** pages,struct drm_pagemap_addr * pagemap_addr,unsigned long npages)654 static int xe_svm_copy_to_ram(struct page **pages,
655 			      struct drm_pagemap_addr *pagemap_addr,
656 			      unsigned long npages)
657 {
658 	return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
659 }
660 
to_xe_bo(struct drm_pagemap_devmem * devmem_allocation)661 static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
662 {
663 	return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
664 }
665 
xe_svm_devmem_release(struct drm_pagemap_devmem * devmem_allocation)666 static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
667 {
668 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
669 	struct xe_device *xe = xe_bo_device(bo);
670 
671 	xe_bo_put_async(bo);
672 	xe_pm_runtime_put(xe);
673 }
674 
block_offset_to_pfn(struct xe_vram_region * vr,u64 offset)675 static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
676 {
677 	return PHYS_PFN(offset + vr->hpa_base);
678 }
679 
vram_to_buddy(struct xe_vram_region * vram)680 static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
681 {
682 	return &vram->ttm.mm;
683 }
684 
xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem * devmem_allocation,unsigned long npages,unsigned long * pfn)685 static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
686 				      unsigned long npages, unsigned long *pfn)
687 {
688 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
689 	struct ttm_resource *res = bo->ttm.resource;
690 	struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks;
691 	struct drm_buddy_block *block;
692 	int j = 0;
693 
694 	list_for_each_entry(block, blocks, link) {
695 		struct xe_vram_region *vr = block->private;
696 		struct drm_buddy *buddy = vram_to_buddy(vr);
697 		u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
698 		int i;
699 
700 		for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
701 			pfn[j++] = block_pfn + i;
702 	}
703 
704 	return 0;
705 }
706 
707 static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
708 	.devmem_release = xe_svm_devmem_release,
709 	.populate_devmem_pfn = xe_svm_populate_devmem_pfn,
710 	.copy_to_devmem = xe_svm_copy_to_devmem,
711 	.copy_to_ram = xe_svm_copy_to_ram,
712 };
713 
714 #endif
715 
716 static const struct drm_gpusvm_ops gpusvm_ops = {
717 	.range_alloc = xe_svm_range_alloc,
718 	.range_free = xe_svm_range_free,
719 	.invalidate = xe_svm_invalidate,
720 };
721 
722 static const unsigned long fault_chunk_sizes[] = {
723 	SZ_2M,
724 	SZ_64K,
725 	SZ_4K,
726 };
727 
728 /**
729  * xe_svm_init() - SVM initialize
730  * @vm: The VM.
731  *
732  * Initialize SVM state which is embedded within the VM.
733  *
734  * Return: 0 on success, negative error code on error.
735  */
xe_svm_init(struct xe_vm * vm)736 int xe_svm_init(struct xe_vm *vm)
737 {
738 	int err;
739 
740 	if (vm->flags & XE_VM_FLAG_FAULT_MODE) {
741 		spin_lock_init(&vm->svm.garbage_collector.lock);
742 		INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
743 		INIT_WORK(&vm->svm.garbage_collector.work,
744 			  xe_svm_garbage_collector_work_func);
745 
746 		err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
747 				      current->mm, 0, vm->size,
748 				      xe_modparam.svm_notifier_size * SZ_1M,
749 				      &gpusvm_ops, fault_chunk_sizes,
750 				      ARRAY_SIZE(fault_chunk_sizes));
751 		drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
752 	} else {
753 		err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)",
754 				      &vm->xe->drm, NULL, 0, 0, 0, NULL,
755 				      NULL, 0);
756 	}
757 
758 	return err;
759 }
760 
761 /**
762  * xe_svm_close() - SVM close
763  * @vm: The VM.
764  *
765  * Close SVM state (i.e., stop and flush all SVM actions).
766  */
xe_svm_close(struct xe_vm * vm)767 void xe_svm_close(struct xe_vm *vm)
768 {
769 	xe_assert(vm->xe, xe_vm_is_closed(vm));
770 	flush_work(&vm->svm.garbage_collector.work);
771 }
772 
773 /**
774  * xe_svm_fini() - SVM finalize
775  * @vm: The VM.
776  *
777  * Finalize SVM state which is embedded within the VM.
778  */
xe_svm_fini(struct xe_vm * vm)779 void xe_svm_fini(struct xe_vm *vm)
780 {
781 	xe_assert(vm->xe, xe_vm_is_closed(vm));
782 
783 	drm_gpusvm_fini(&vm->svm.gpusvm);
784 }
785 
xe_svm_range_is_valid(struct xe_svm_range * range,struct xe_tile * tile,bool devmem_only)786 static bool xe_svm_range_is_valid(struct xe_svm_range *range,
787 				  struct xe_tile *tile,
788 				  bool devmem_only)
789 {
790 	return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
791 					    range->tile_invalidated) &&
792 		(!devmem_only || xe_svm_range_in_vram(range)));
793 }
794 
795 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
796  * @vm: xe_vm pointer
797  * @range: Pointer to the SVM range structure
798  *
799  * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
800  * and migrates them to SMEM
801  */
xe_svm_range_migrate_to_smem(struct xe_vm * vm,struct xe_svm_range * range)802 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
803 {
804 	if (xe_svm_range_in_vram(range))
805 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
806 }
807 
808 /**
809  * xe_svm_range_validate() - Check if the SVM range is valid
810  * @vm: xe_vm pointer
811  * @range: Pointer to the SVM range structure
812  * @tile_mask: Mask representing the tiles to be checked
813  * @devmem_preferred : if true range needs to be in devmem
814  *
815  * The xe_svm_range_validate() function checks if a range is
816  * valid and located in the desired memory region.
817  *
818  * Return: true if the range is valid, false otherwise
819  */
xe_svm_range_validate(struct xe_vm * vm,struct xe_svm_range * range,u8 tile_mask,bool devmem_preferred)820 bool xe_svm_range_validate(struct xe_vm *vm,
821 			   struct xe_svm_range *range,
822 			   u8 tile_mask, bool devmem_preferred)
823 {
824 	bool ret;
825 
826 	xe_svm_notifier_lock(vm);
827 
828 	ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
829 	       (devmem_preferred == range->base.pages.flags.has_devmem_pages);
830 
831 	xe_svm_notifier_unlock(vm);
832 
833 	return ret;
834 }
835 
836 /**
837  * xe_svm_find_vma_start - Find start of CPU VMA
838  * @vm: xe_vm pointer
839  * @start: start address
840  * @end: end address
841  * @vma: Pointer to struct xe_vma
842  *
843  *
844  * This function searches for a cpu vma, within the specified
845  * range [start, end] in the given VM. It adjusts the range based on the
846  * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
847  *
848  * Return: The starting address of the VMA within the range,
849  * or ULONG_MAX if no VMA is found
850  */
xe_svm_find_vma_start(struct xe_vm * vm,u64 start,u64 end,struct xe_vma * vma)851 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
852 {
853 	return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
854 					 max(start, xe_vma_start(vma)),
855 					 min(end, xe_vma_end(vma)));
856 }
857 
858 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
xe_drm_pagemap_populate_mm(struct drm_pagemap * dpagemap,unsigned long start,unsigned long end,struct mm_struct * mm,unsigned long timeslice_ms)859 static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
860 				      unsigned long start, unsigned long end,
861 				      struct mm_struct *mm,
862 				      unsigned long timeslice_ms)
863 {
864 	struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
865 	struct xe_device *xe = vr->xe;
866 	struct device *dev = xe->drm.dev;
867 	struct drm_buddy_block *block;
868 	struct xe_validation_ctx vctx;
869 	struct list_head *blocks;
870 	struct drm_exec exec;
871 	struct xe_bo *bo;
872 	int err = 0, idx;
873 
874 	if (!drm_dev_enter(&xe->drm, &idx))
875 		return -ENODEV;
876 
877 	xe_pm_runtime_get(xe);
878 
879 	xe_validation_guard(&vctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
880 		bo = xe_bo_create_locked(xe, NULL, NULL, end - start,
881 					 ttm_bo_type_device,
882 					 (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) |
883 					 XE_BO_FLAG_CPU_ADDR_MIRROR, &exec);
884 		drm_exec_retry_on_contention(&exec);
885 		if (IS_ERR(bo)) {
886 			err = PTR_ERR(bo);
887 			xe_validation_retry_on_oom(&vctx, &err);
888 			break;
889 		}
890 
891 		drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
892 					&dpagemap_devmem_ops, dpagemap, end - start);
893 
894 		blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
895 		list_for_each_entry(block, blocks, link)
896 			block->private = vr;
897 
898 		xe_bo_get(bo);
899 
900 		/* Ensure the device has a pm ref while there are device pages active. */
901 		xe_pm_runtime_get_noresume(xe);
902 		err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
903 						    start, end, timeslice_ms,
904 						    xe_svm_devm_owner(xe));
905 		if (err)
906 			xe_svm_devmem_release(&bo->devmem_allocation);
907 		xe_bo_unlock(bo);
908 		xe_bo_put(bo);
909 	}
910 	xe_pm_runtime_put(xe);
911 	drm_dev_exit(idx);
912 
913 	return err;
914 }
915 #endif
916 
supports_4K_migration(struct xe_device * xe)917 static bool supports_4K_migration(struct xe_device *xe)
918 {
919 	if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
920 		return false;
921 
922 	return true;
923 }
924 
925 /**
926  * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
927  * @range: SVM range for which migration needs to be decided
928  * @vma: vma which has range
929  * @preferred_region_is_vram: preferred region for range is vram
930  *
931  * Return: True for range needing migration and migration is supported else false
932  */
xe_svm_range_needs_migrate_to_vram(struct xe_svm_range * range,struct xe_vma * vma,bool preferred_region_is_vram)933 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
934 					bool preferred_region_is_vram)
935 {
936 	struct xe_vm *vm = range_to_vm(&range->base);
937 	u64 range_size = xe_svm_range_size(range);
938 
939 	if (!range->base.pages.flags.migrate_devmem || !preferred_region_is_vram)
940 		return false;
941 
942 	xe_assert(vm->xe, IS_DGFX(vm->xe));
943 
944 	if (xe_svm_range_in_vram(range)) {
945 		drm_info(&vm->xe->drm, "Range is already in VRAM\n");
946 		return false;
947 	}
948 
949 	if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
950 		drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
951 		return false;
952 	}
953 
954 	return true;
955 }
956 
957 #define DECL_SVM_RANGE_COUNT_STATS(elem, stat) \
958 static void xe_svm_range_##elem##_count_stats_incr(struct xe_gt *gt, \
959 						   struct xe_svm_range *range) \
960 { \
961 	switch (xe_svm_range_size(range)) { \
962 	case SZ_4K: \
963 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_COUNT, 1); \
964 		break; \
965 	case SZ_64K: \
966 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_COUNT, 1); \
967 		break; \
968 	case SZ_2M: \
969 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_COUNT, 1); \
970 		break; \
971 	} \
972 } \
973 
DECL_SVM_RANGE_COUNT_STATS(fault,PAGEFAULT)974 DECL_SVM_RANGE_COUNT_STATS(fault, PAGEFAULT)
975 DECL_SVM_RANGE_COUNT_STATS(valid_fault, VALID_PAGEFAULT)
976 DECL_SVM_RANGE_COUNT_STATS(migrate, MIGRATE)
977 
978 #define DECL_SVM_RANGE_US_STATS(elem, stat) \
979 static void xe_svm_range_##elem##_us_stats_incr(struct xe_gt *gt, \
980 						struct xe_svm_range *range, \
981 						ktime_t start) \
982 { \
983 	s64 us_delta = xe_svm_stats_ktime_us_delta(start); \
984 \
985 	switch (xe_svm_range_size(range)) { \
986 	case SZ_4K: \
987 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_US, \
988 				 us_delta); \
989 		break; \
990 	case SZ_64K: \
991 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_US, \
992 				 us_delta); \
993 		break; \
994 	case SZ_2M: \
995 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_US, \
996 				 us_delta); \
997 		break; \
998 	} \
999 } \
1000 
1001 DECL_SVM_RANGE_US_STATS(migrate, MIGRATE)
1002 DECL_SVM_RANGE_US_STATS(get_pages, GET_PAGES)
1003 DECL_SVM_RANGE_US_STATS(bind, BIND)
1004 DECL_SVM_RANGE_US_STATS(fault, PAGEFAULT)
1005 
1006 static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
1007 				     struct xe_gt *gt, u64 fault_addr,
1008 				     bool need_vram)
1009 {
1010 	int devmem_possible = IS_DGFX(vm->xe) &&
1011 		IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
1012 	struct drm_gpusvm_ctx ctx = {
1013 		.read_only = xe_vma_read_only(vma),
1014 		.devmem_possible = devmem_possible,
1015 		.check_pages_threshold = devmem_possible ? SZ_64K : 0,
1016 		.devmem_only = need_vram && devmem_possible,
1017 		.timeslice_ms = need_vram && devmem_possible ?
1018 			vm->xe->atomic_svm_timeslice_ms : 0,
1019 		.device_private_page_owner = xe_svm_devm_owner(vm->xe),
1020 	};
1021 	struct xe_validation_ctx vctx;
1022 	struct drm_exec exec;
1023 	struct xe_svm_range *range;
1024 	struct dma_fence *fence;
1025 	struct drm_pagemap *dpagemap;
1026 	struct xe_tile *tile = gt_to_tile(gt);
1027 	int migrate_try_count = ctx.devmem_only ? 3 : 1;
1028 	ktime_t start = xe_svm_stats_ktime_get(), bind_start, get_pages_start;
1029 	int err;
1030 
1031 	lockdep_assert_held_write(&vm->lock);
1032 	xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
1033 
1034 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
1035 
1036 retry:
1037 	/* Always process UNMAPs first so view SVM ranges is current */
1038 	err = xe_svm_garbage_collector(vm);
1039 	if (err)
1040 		return err;
1041 
1042 	dpagemap = xe_vma_resolve_pagemap(vma, tile);
1043 	if (!dpagemap && !ctx.devmem_only)
1044 		ctx.device_private_page_owner = NULL;
1045 	range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
1046 
1047 	if (IS_ERR(range))
1048 		return PTR_ERR(range);
1049 
1050 	xe_svm_range_fault_count_stats_incr(gt, range);
1051 
1052 	if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
1053 		err = -EACCES;
1054 		goto out;
1055 	}
1056 
1057 	if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) {
1058 		xe_svm_range_valid_fault_count_stats_incr(gt, range);
1059 		range_debug(range, "PAGE FAULT - VALID");
1060 		goto out;
1061 	}
1062 
1063 	range_debug(range, "PAGE FAULT");
1064 
1065 	if (--migrate_try_count >= 0 &&
1066 	    xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
1067 		ktime_t migrate_start = xe_svm_stats_ktime_get();
1068 
1069 		/* TODO : For multi-device dpagemap will be used to find the
1070 		 * remote tile and remote device. Will need to modify
1071 		 * xe_svm_alloc_vram to use dpagemap for future multi-device
1072 		 * support.
1073 		 */
1074 		xe_svm_range_migrate_count_stats_incr(gt, range);
1075 		err = xe_svm_alloc_vram(tile, range, &ctx);
1076 		xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start);
1077 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
1078 		if (err) {
1079 			if (migrate_try_count || !ctx.devmem_only) {
1080 				drm_dbg(&vm->xe->drm,
1081 					"VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
1082 					vm->usm.asid, ERR_PTR(err));
1083 
1084 				/*
1085 				 * In the devmem-only case, mixed mappings may
1086 				 * be found. The get_pages function will fix
1087 				 * these up to a single location, allowing the
1088 				 * page fault handler to make forward progress.
1089 				 */
1090 				if (ctx.devmem_only)
1091 					goto get_pages;
1092 				else
1093 					goto retry;
1094 			} else {
1095 				drm_err(&vm->xe->drm,
1096 					"VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
1097 					vm->usm.asid, ERR_PTR(err));
1098 				return err;
1099 			}
1100 		}
1101 	}
1102 
1103 get_pages:
1104 	get_pages_start = xe_svm_stats_ktime_get();
1105 
1106 	range_debug(range, "GET PAGES");
1107 	err = xe_svm_range_get_pages(vm, range, &ctx);
1108 	/* Corner where CPU mappings have changed */
1109 	if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
1110 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
1111 		if (migrate_try_count > 0 || !ctx.devmem_only) {
1112 			drm_dbg(&vm->xe->drm,
1113 				"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
1114 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1115 			range_debug(range, "PAGE FAULT - RETRY PAGES");
1116 			goto retry;
1117 		} else {
1118 			drm_err(&vm->xe->drm,
1119 				"Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
1120 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1121 		}
1122 	}
1123 	if (err) {
1124 		range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
1125 		goto out;
1126 	}
1127 
1128 	xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
1129 	range_debug(range, "PAGE FAULT - BIND");
1130 
1131 	bind_start = xe_svm_stats_ktime_get();
1132 	xe_validation_guard(&vctx, &vm->xe->val, &exec, (struct xe_val_flags) {}, err) {
1133 		err = xe_vm_drm_exec_lock(vm, &exec);
1134 		drm_exec_retry_on_contention(&exec);
1135 
1136 		xe_vm_set_validation_exec(vm, &exec);
1137 		fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
1138 		xe_vm_set_validation_exec(vm, NULL);
1139 		if (IS_ERR(fence)) {
1140 			drm_exec_retry_on_contention(&exec);
1141 			err = PTR_ERR(fence);
1142 			xe_validation_retry_on_oom(&vctx, &err);
1143 			xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1144 			break;
1145 		}
1146 	}
1147 	if (err)
1148 		goto err_out;
1149 
1150 	dma_fence_wait(fence, false);
1151 	dma_fence_put(fence);
1152 	xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1153 
1154 out:
1155 	xe_svm_range_fault_us_stats_incr(gt, range, start);
1156 	return 0;
1157 
1158 err_out:
1159 	if (err == -EAGAIN) {
1160 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
1161 		range_debug(range, "PAGE FAULT - RETRY BIND");
1162 		goto retry;
1163 	}
1164 
1165 	return err;
1166 }
1167 
1168 /**
1169  * xe_svm_handle_pagefault() - SVM handle page fault
1170  * @vm: The VM.
1171  * @vma: The CPU address mirror VMA.
1172  * @gt: The gt upon the fault occurred.
1173  * @fault_addr: The GPU fault address.
1174  * @atomic: The fault atomic access bit.
1175  *
1176  * Create GPU bindings for a SVM page fault. Optionally migrate to device
1177  * memory.
1178  *
1179  * Return: 0 on success, negative error code on error.
1180  */
xe_svm_handle_pagefault(struct xe_vm * vm,struct xe_vma * vma,struct xe_gt * gt,u64 fault_addr,bool atomic)1181 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
1182 			    struct xe_gt *gt, u64 fault_addr,
1183 			    bool atomic)
1184 {
1185 	int need_vram, ret;
1186 retry:
1187 	need_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
1188 	if (need_vram < 0)
1189 		return need_vram;
1190 
1191 	ret =  __xe_svm_handle_pagefault(vm, vma, gt, fault_addr,
1192 					 need_vram ? true : false);
1193 	if (ret == -EAGAIN) {
1194 		/*
1195 		 * Retry once on -EAGAIN to re-lookup the VMA, as the original VMA
1196 		 * may have been split by xe_svm_range_set_default_attr.
1197 		 */
1198 		vma = xe_vm_find_vma_by_addr(vm, fault_addr);
1199 		if (!vma)
1200 			return -EINVAL;
1201 
1202 		goto retry;
1203 	}
1204 	return ret;
1205 }
1206 
1207 /**
1208  * xe_svm_has_mapping() - SVM has mappings
1209  * @vm: The VM.
1210  * @start: Start address.
1211  * @end: End address.
1212  *
1213  * Check if an address range has SVM mappings.
1214  *
1215  * Return: True if address range has a SVM mapping, False otherwise
1216  */
xe_svm_has_mapping(struct xe_vm * vm,u64 start,u64 end)1217 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
1218 {
1219 	return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
1220 }
1221 
1222 /**
1223  * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
1224  * @vm: The VM
1225  * @start: start addr
1226  * @end: end addr
1227  *
1228  * This function UNMAPS svm ranges if start or end address are inside them.
1229  */
xe_svm_unmap_address_range(struct xe_vm * vm,u64 start,u64 end)1230 void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
1231 {
1232 	struct drm_gpusvm_notifier *notifier, *next;
1233 
1234 	lockdep_assert_held_write(&vm->lock);
1235 
1236 	drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
1237 		struct drm_gpusvm_range *range, *__next;
1238 
1239 		drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
1240 			if (start > drm_gpusvm_range_start(range) ||
1241 			    end < drm_gpusvm_range_end(range)) {
1242 				if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
1243 					drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
1244 				drm_gpusvm_range_get(range);
1245 				__xe_svm_garbage_collector(vm, to_xe_range(range));
1246 				if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
1247 					spin_lock(&vm->svm.garbage_collector.lock);
1248 					list_del(&to_xe_range(range)->garbage_collector_link);
1249 					spin_unlock(&vm->svm.garbage_collector.lock);
1250 				}
1251 				drm_gpusvm_range_put(range);
1252 			}
1253 		}
1254 	}
1255 }
1256 
1257 /**
1258  * xe_svm_bo_evict() - SVM evict BO to system memory
1259  * @bo: BO to evict
1260  *
1261  * SVM evict BO to system memory. GPU SVM layer ensures all device pages
1262  * are evicted before returning.
1263  *
1264  * Return: 0 on success standard error code otherwise
1265  */
xe_svm_bo_evict(struct xe_bo * bo)1266 int xe_svm_bo_evict(struct xe_bo *bo)
1267 {
1268 	return drm_pagemap_evict_to_ram(&bo->devmem_allocation);
1269 }
1270 
1271 /**
1272  * xe_svm_range_find_or_insert- Find or insert GPU SVM range
1273  * @vm: xe_vm pointer
1274  * @addr: address for which range needs to be found/inserted
1275  * @vma:  Pointer to struct xe_vma which mirrors CPU
1276  * @ctx: GPU SVM context
1277  *
1278  * This function finds or inserts a newly allocated a SVM range based on the
1279  * address.
1280  *
1281  * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
1282  */
xe_svm_range_find_or_insert(struct xe_vm * vm,u64 addr,struct xe_vma * vma,struct drm_gpusvm_ctx * ctx)1283 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
1284 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
1285 {
1286 	struct drm_gpusvm_range *r;
1287 
1288 	r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
1289 					    xe_vma_start(vma), xe_vma_end(vma), ctx);
1290 	if (IS_ERR(r))
1291 		return ERR_CAST(r);
1292 
1293 	return to_xe_range(r);
1294 }
1295 
1296 /**
1297  * xe_svm_range_get_pages() - Get pages for a SVM range
1298  * @vm: Pointer to the struct xe_vm
1299  * @range: Pointer to the xe SVM range structure
1300  * @ctx: GPU SVM context
1301  *
1302  * This function gets pages for a SVM range and ensures they are mapped for
1303  * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
1304  *
1305  * Return: 0 on success, negative error code on failure.
1306  */
xe_svm_range_get_pages(struct xe_vm * vm,struct xe_svm_range * range,struct drm_gpusvm_ctx * ctx)1307 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
1308 			   struct drm_gpusvm_ctx *ctx)
1309 {
1310 	int err = 0;
1311 
1312 	err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
1313 	if (err == -EOPNOTSUPP) {
1314 		range_debug(range, "PAGE FAULT - EVICT PAGES");
1315 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
1316 	}
1317 
1318 	return err;
1319 }
1320 
1321 /**
1322  * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
1323  * @vm: Pointer to the xe_vm structure
1324  * @start: Start of the input range
1325  * @end: End of the input range
1326  *
1327  * This function removes the page table entries (PTEs) associated
1328  * with the svm ranges within the given input start and end
1329  *
1330  * Return: tile_mask for which gt's need to be tlb invalidated.
1331  */
xe_svm_ranges_zap_ptes_in_range(struct xe_vm * vm,u64 start,u64 end)1332 u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
1333 {
1334 	struct drm_gpusvm_notifier *notifier;
1335 	struct xe_svm_range *range;
1336 	u64 adj_start, adj_end;
1337 	struct xe_tile *tile;
1338 	u8 tile_mask = 0;
1339 	u8 id;
1340 
1341 	lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
1342 		       lockdep_is_held_type(&vm->lock, 0));
1343 
1344 	drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
1345 		struct drm_gpusvm_range *r = NULL;
1346 
1347 		adj_start = max(start, drm_gpusvm_notifier_start(notifier));
1348 		adj_end = min(end, drm_gpusvm_notifier_end(notifier));
1349 		drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
1350 			range = to_xe_range(r);
1351 			for_each_tile(tile, vm->xe, id) {
1352 				if (xe_pt_zap_ptes_range(tile, vm, range)) {
1353 					tile_mask |= BIT(id);
1354 					/*
1355 					 * WRITE_ONCE pairs with READ_ONCE in
1356 					 * xe_vm_has_valid_gpu_mapping().
1357 					 * Must not fail after setting
1358 					 * tile_invalidated and before
1359 					 * TLB invalidation.
1360 					 */
1361 					WRITE_ONCE(range->tile_invalidated,
1362 						   range->tile_invalidated | BIT(id));
1363 				}
1364 			}
1365 		}
1366 	}
1367 
1368 	return tile_mask;
1369 }
1370 
1371 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
1372 
tile_local_pagemap(struct xe_tile * tile)1373 static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile)
1374 {
1375 	return &tile->mem.vram->dpagemap;
1376 }
1377 
1378 /**
1379  * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
1380  * @vma: Pointer to the xe_vma structure containing memory attributes
1381  * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
1382  *
1383  * This function determines the correct DRM pagemap to use for a given VMA.
1384  * It first checks if a valid devmem_fd is provided in the VMA's preferred
1385  * location. If the devmem_fd is negative, it returns NULL, indicating no
1386  * pagemap is available and smem to be used as preferred location.
1387  * If the devmem_fd is equal to the default faulting
1388  * GT identifier, it returns the VRAM pagemap associated with the tile.
1389  *
1390  * Future support for multi-device configurations may use drm_pagemap_from_fd()
1391  * to resolve pagemaps from arbitrary file descriptors.
1392  *
1393  * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
1394  */
xe_vma_resolve_pagemap(struct xe_vma * vma,struct xe_tile * tile)1395 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1396 {
1397 	s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
1398 
1399 	if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
1400 		return NULL;
1401 
1402 	if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
1403 		return IS_DGFX(tile_to_xe(tile)) ? tile_local_pagemap(tile) : NULL;
1404 
1405 	/* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
1406 	return NULL;
1407 }
1408 
1409 /**
1410  * xe_svm_alloc_vram()- Allocate device memory pages for range,
1411  * migrating existing data.
1412  * @tile: tile to allocate vram from
1413  * @range: SVM range
1414  * @ctx: DRM GPU SVM context
1415  *
1416  * Return: 0 on success, error code on failure.
1417  */
xe_svm_alloc_vram(struct xe_tile * tile,struct xe_svm_range * range,const struct drm_gpusvm_ctx * ctx)1418 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
1419 		      const struct drm_gpusvm_ctx *ctx)
1420 {
1421 	struct drm_pagemap *dpagemap;
1422 
1423 	xe_assert(tile_to_xe(tile), range->base.pages.flags.migrate_devmem);
1424 	range_debug(range, "ALLOCATE VRAM");
1425 
1426 	dpagemap = tile_local_pagemap(tile);
1427 	return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
1428 				       xe_svm_range_end(range),
1429 				       range->base.gpusvm->mm,
1430 				       ctx->timeslice_ms);
1431 }
1432 
1433 static struct drm_pagemap_addr
xe_drm_pagemap_device_map(struct drm_pagemap * dpagemap,struct device * dev,struct page * page,unsigned int order,enum dma_data_direction dir)1434 xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
1435 			  struct device *dev,
1436 			  struct page *page,
1437 			  unsigned int order,
1438 			  enum dma_data_direction dir)
1439 {
1440 	struct device *pgmap_dev = dpagemap->dev;
1441 	enum drm_interconnect_protocol prot;
1442 	dma_addr_t addr;
1443 
1444 	if (pgmap_dev == dev) {
1445 		addr = xe_vram_region_page_to_dpa(page_to_vr(page), page);
1446 		prot = XE_INTERCONNECT_VRAM;
1447 	} else {
1448 		addr = DMA_MAPPING_ERROR;
1449 		prot = 0;
1450 	}
1451 
1452 	return drm_pagemap_addr_encode(addr, prot, order, dir);
1453 }
1454 
1455 static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
1456 	.device_map = xe_drm_pagemap_device_map,
1457 	.populate_mm = xe_drm_pagemap_populate_mm,
1458 };
1459 
1460 /**
1461  * xe_devm_add: Remap and provide memmap backing for device memory
1462  * @tile: tile that the memory region belongs to
1463  * @vr: vram memory region to remap
1464  *
1465  * This remap device memory to host physical address space and create
1466  * struct page to back device memory
1467  *
1468  * Return: 0 on success standard error code otherwise
1469  */
xe_devm_add(struct xe_tile * tile,struct xe_vram_region * vr)1470 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1471 {
1472 	struct xe_device *xe = tile_to_xe(tile);
1473 	struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
1474 	struct resource *res;
1475 	void *addr;
1476 	int ret;
1477 
1478 	res = devm_request_free_mem_region(dev, &iomem_resource,
1479 					   vr->usable_size);
1480 	if (IS_ERR(res)) {
1481 		ret = PTR_ERR(res);
1482 		return ret;
1483 	}
1484 
1485 	vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
1486 	vr->pagemap.range.start = res->start;
1487 	vr->pagemap.range.end = res->end;
1488 	vr->pagemap.nr_range = 1;
1489 	vr->pagemap.ops = drm_pagemap_pagemap_ops_get();
1490 	vr->pagemap.owner = xe_svm_devm_owner(xe);
1491 	addr = devm_memremap_pages(dev, &vr->pagemap);
1492 
1493 	vr->dpagemap.dev = dev;
1494 	vr->dpagemap.ops = &xe_drm_pagemap_ops;
1495 
1496 	if (IS_ERR(addr)) {
1497 		devm_release_mem_region(dev, res->start, resource_size(res));
1498 		ret = PTR_ERR(addr);
1499 		drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
1500 			tile->id, ERR_PTR(ret));
1501 		return ret;
1502 	}
1503 	vr->hpa_base = res->start;
1504 
1505 	drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
1506 		tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
1507 	return 0;
1508 }
1509 #else
xe_svm_alloc_vram(struct xe_tile * tile,struct xe_svm_range * range,const struct drm_gpusvm_ctx * ctx)1510 int xe_svm_alloc_vram(struct xe_tile *tile,
1511 		      struct xe_svm_range *range,
1512 		      const struct drm_gpusvm_ctx *ctx)
1513 {
1514 	return -EOPNOTSUPP;
1515 }
1516 
xe_devm_add(struct xe_tile * tile,struct xe_vram_region * vr)1517 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1518 {
1519 	return 0;
1520 }
1521 
xe_vma_resolve_pagemap(struct xe_vma * vma,struct xe_tile * tile)1522 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1523 {
1524 	return NULL;
1525 }
1526 #endif
1527 
1528 /**
1529  * xe_svm_flush() - SVM flush
1530  * @vm: The VM.
1531  *
1532  * Flush all SVM actions.
1533  */
xe_svm_flush(struct xe_vm * vm)1534 void xe_svm_flush(struct xe_vm *vm)
1535 {
1536 	if (xe_vm_in_fault_mode(vm))
1537 		flush_work(&vm->svm.garbage_collector.work);
1538 }
1539