xref: /linux/drivers/gpu/drm/xe/xe_svm.c (revision e3966940559d52aa1800a008dcfeec218dd31f88)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <drm/drm_drv.h>
7 
8 #include "xe_bo.h"
9 #include "xe_exec_queue_types.h"
10 #include "xe_gt_stats.h"
11 #include "xe_migrate.h"
12 #include "xe_module.h"
13 #include "xe_pm.h"
14 #include "xe_pt.h"
15 #include "xe_svm.h"
16 #include "xe_tile.h"
17 #include "xe_ttm_vram_mgr.h"
18 #include "xe_vm.h"
19 #include "xe_vm_types.h"
20 #include "xe_vram_types.h"
21 
22 static bool xe_svm_range_in_vram(struct xe_svm_range *range)
23 {
24 	/*
25 	 * Advisory only check whether the range is currently backed by VRAM
26 	 * memory.
27 	 */
28 
29 	struct drm_gpusvm_pages_flags flags = {
30 		/* Pairs with WRITE_ONCE in drm_gpusvm.c */
31 		.__flags = READ_ONCE(range->base.pages.flags.__flags),
32 	};
33 
34 	return flags.has_devmem_pages;
35 }
36 
37 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
38 {
39 	/* Not reliable without notifier lock */
40 	return xe_svm_range_in_vram(range) && range->tile_present;
41 }
42 
43 static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
44 {
45 	return container_of(gpusvm, struct xe_vm, svm.gpusvm);
46 }
47 
48 static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
49 {
50 	return gpusvm_to_vm(r->gpusvm);
51 }
52 
53 #define range_debug(r__, operation__)					\
54 	vm_dbg(&range_to_vm(&(r__)->base)->xe->drm,			\
55 	       "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
56 	       "start=0x%014lx, end=0x%014lx, size=%lu",		\
57 	       (operation__), range_to_vm(&(r__)->base)->usm.asid,	\
58 	       (r__)->base.gpusvm,					\
59 	       xe_svm_range_in_vram((r__)) ? 1 : 0,			\
60 	       xe_svm_range_has_vram_binding((r__)) ? 1 : 0,		\
61 	       (r__)->base.pages.notifier_seq,				\
62 	       xe_svm_range_start((r__)), xe_svm_range_end((r__)),	\
63 	       xe_svm_range_size((r__)))
64 
65 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
66 {
67 	range_debug(range, operation);
68 }
69 
70 static struct drm_gpusvm_range *
71 xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
72 {
73 	struct xe_svm_range *range;
74 
75 	range = kzalloc(sizeof(*range), GFP_KERNEL);
76 	if (!range)
77 		return NULL;
78 
79 	INIT_LIST_HEAD(&range->garbage_collector_link);
80 	xe_vm_get(gpusvm_to_vm(gpusvm));
81 
82 	return &range->base;
83 }
84 
85 static void xe_svm_range_free(struct drm_gpusvm_range *range)
86 {
87 	xe_vm_put(range_to_vm(range));
88 	kfree(range);
89 }
90 
91 static void
92 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
93 				   const struct mmu_notifier_range *mmu_range)
94 {
95 	struct xe_device *xe = vm->xe;
96 
97 	range_debug(range, "GARBAGE COLLECTOR ADD");
98 
99 	drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
100 
101 	spin_lock(&vm->svm.garbage_collector.lock);
102 	if (list_empty(&range->garbage_collector_link))
103 		list_add_tail(&range->garbage_collector_link,
104 			      &vm->svm.garbage_collector.range_list);
105 	spin_unlock(&vm->svm.garbage_collector.lock);
106 
107 	queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq,
108 		   &vm->svm.garbage_collector.work);
109 }
110 
111 static void xe_svm_tlb_inval_count_stats_incr(struct xe_gt *gt)
112 {
113 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT, 1);
114 }
115 
116 static u8
117 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
118 				  const struct mmu_notifier_range *mmu_range,
119 				  u64 *adj_start, u64 *adj_end)
120 {
121 	struct xe_svm_range *range = to_xe_range(r);
122 	struct xe_device *xe = vm->xe;
123 	struct xe_tile *tile;
124 	u8 tile_mask = 0;
125 	u8 id;
126 
127 	xe_svm_assert_in_notifier(vm);
128 
129 	range_debug(range, "NOTIFIER");
130 
131 	/* Skip if already unmapped or if no binding exist */
132 	if (range->base.pages.flags.unmapped || !range->tile_present)
133 		return 0;
134 
135 	range_debug(range, "NOTIFIER - EXECUTE");
136 
137 	/* Adjust invalidation to range boundaries */
138 	*adj_start = min(xe_svm_range_start(range), mmu_range->start);
139 	*adj_end = max(xe_svm_range_end(range), mmu_range->end);
140 
141 	/*
142 	 * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the
143 	 * invalidation code can't correctly cope with sparse ranges or
144 	 * invalidations spanning multiple ranges.
145 	 */
146 	for_each_tile(tile, xe, id)
147 		if (xe_pt_zap_ptes_range(tile, vm, range)) {
148 			/*
149 			 * WRITE_ONCE pairs with READ_ONCE in
150 			 * xe_vm_has_valid_gpu_mapping()
151 			 */
152 			WRITE_ONCE(range->tile_invalidated,
153 				   range->tile_invalidated | BIT(id));
154 
155 			if (!(tile_mask & BIT(id))) {
156 				xe_svm_tlb_inval_count_stats_incr(tile->primary_gt);
157 				if (tile->media_gt)
158 					xe_svm_tlb_inval_count_stats_incr(tile->media_gt);
159 				tile_mask |= BIT(id);
160 			}
161 		}
162 
163 	return tile_mask;
164 }
165 
166 static void
167 xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
168 				const struct mmu_notifier_range *mmu_range)
169 {
170 	struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
171 
172 	xe_svm_assert_in_notifier(vm);
173 
174 	drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
175 	if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
176 		xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
177 						   mmu_range);
178 }
179 
180 static s64 xe_svm_stats_ktime_us_delta(ktime_t start)
181 {
182 	return IS_ENABLED(CONFIG_DEBUG_FS) ?
183 		ktime_us_delta(ktime_get(), start) : 0;
184 }
185 
186 static void xe_svm_tlb_inval_us_stats_incr(struct xe_gt *gt, ktime_t start)
187 {
188 	s64 us_delta = xe_svm_stats_ktime_us_delta(start);
189 
190 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_US, us_delta);
191 }
192 
193 static ktime_t xe_svm_stats_ktime_get(void)
194 {
195 	return IS_ENABLED(CONFIG_DEBUG_FS) ? ktime_get() : 0;
196 }
197 
198 static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
199 			      struct drm_gpusvm_notifier *notifier,
200 			      const struct mmu_notifier_range *mmu_range)
201 {
202 	struct xe_vm *vm = gpusvm_to_vm(gpusvm);
203 	struct xe_device *xe = vm->xe;
204 	struct drm_gpusvm_range *r, *first;
205 	struct xe_tile *tile;
206 	ktime_t start = xe_svm_stats_ktime_get();
207 	u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
208 	u8 tile_mask = 0, id;
209 	long err;
210 
211 	xe_svm_assert_in_notifier(vm);
212 
213 	vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
214 	       "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
215 	       vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
216 	       mmu_range->start, mmu_range->end, mmu_range->event);
217 
218 	/* Adjust invalidation to notifier boundaries */
219 	adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
220 	adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
221 
222 	first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
223 	if (!first)
224 		return;
225 
226 	/*
227 	 * PTs may be getting destroyed so not safe to touch these but PT should
228 	 * be invalidated at this point in time. Regardless we still need to
229 	 * ensure any dma mappings are unmapped in the here.
230 	 */
231 	if (xe_vm_is_closed(vm))
232 		goto range_notifier_event_end;
233 
234 	/*
235 	 * XXX: Less than ideal to always wait on VM's resv slots if an
236 	 * invalidation is not required. Could walk range list twice to figure
237 	 * out if an invalidations is need, but also not ideal.
238 	 */
239 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
240 				    DMA_RESV_USAGE_BOOKKEEP,
241 				    false, MAX_SCHEDULE_TIMEOUT);
242 	XE_WARN_ON(err <= 0);
243 
244 	r = first;
245 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
246 		tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range,
247 							       &adj_start,
248 							       &adj_end);
249 	if (!tile_mask)
250 		goto range_notifier_event_end;
251 
252 	xe_device_wmb(xe);
253 
254 	err = xe_vm_range_tilemask_tlb_inval(vm, adj_start, adj_end, tile_mask);
255 	WARN_ON_ONCE(err);
256 
257 range_notifier_event_end:
258 	r = first;
259 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
260 		xe_svm_range_notifier_event_end(vm, r, mmu_range);
261 	for_each_tile(tile, xe, id) {
262 		if (tile_mask & BIT(id)) {
263 			xe_svm_tlb_inval_us_stats_incr(tile->primary_gt, start);
264 			if (tile->media_gt)
265 				xe_svm_tlb_inval_us_stats_incr(tile->media_gt, start);
266 		}
267 	}
268 }
269 
270 static int __xe_svm_garbage_collector(struct xe_vm *vm,
271 				      struct xe_svm_range *range)
272 {
273 	struct dma_fence *fence;
274 
275 	range_debug(range, "GARBAGE COLLECTOR");
276 
277 	xe_vm_lock(vm, false);
278 	fence = xe_vm_range_unbind(vm, range);
279 	xe_vm_unlock(vm);
280 	if (IS_ERR(fence))
281 		return PTR_ERR(fence);
282 	dma_fence_put(fence);
283 
284 	drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
285 
286 	return 0;
287 }
288 
289 static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 range_end)
290 {
291 	struct xe_vma *vma;
292 	struct xe_vma_mem_attr default_attr = {
293 		.preferred_loc = {
294 			.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
295 			.migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
296 		},
297 		.atomic_access = DRM_XE_ATOMIC_UNDEFINED,
298 	};
299 	int err = 0;
300 
301 	vma = xe_vm_find_vma_by_addr(vm, range_start);
302 	if (!vma)
303 		return -EINVAL;
304 
305 	if (xe_vma_has_default_mem_attrs(vma))
306 		return 0;
307 
308 	vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx",
309 	       xe_vma_start(vma), xe_vma_end(vma));
310 
311 	if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
312 		default_attr.pat_index = vma->attr.default_pat_index;
313 		default_attr.default_pat_index  = vma->attr.default_pat_index;
314 		vma->attr = default_attr;
315 	} else {
316 		vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx",
317 		       range_start, range_end);
318 		err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start);
319 		if (err) {
320 			drm_warn(&vm->xe->drm, "VMA SPLIT failed: %pe\n", ERR_PTR(err));
321 			xe_vm_kill(vm, true);
322 			return err;
323 		}
324 	}
325 
326 	/*
327 	 * On call from xe_svm_handle_pagefault original VMA might be changed
328 	 * signal this to lookup for VMA again.
329 	 */
330 	return -EAGAIN;
331 }
332 
333 static int xe_svm_garbage_collector(struct xe_vm *vm)
334 {
335 	struct xe_svm_range *range;
336 	u64 range_start;
337 	u64 range_end;
338 	int err, ret = 0;
339 
340 	lockdep_assert_held_write(&vm->lock);
341 
342 	if (xe_vm_is_closed_or_banned(vm))
343 		return -ENOENT;
344 
345 	for (;;) {
346 		spin_lock(&vm->svm.garbage_collector.lock);
347 		range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
348 						 typeof(*range),
349 						 garbage_collector_link);
350 		if (!range)
351 			break;
352 
353 		range_start = xe_svm_range_start(range);
354 		range_end = xe_svm_range_end(range);
355 
356 		list_del(&range->garbage_collector_link);
357 		spin_unlock(&vm->svm.garbage_collector.lock);
358 
359 		err = __xe_svm_garbage_collector(vm, range);
360 		if (err) {
361 			drm_warn(&vm->xe->drm,
362 				 "Garbage collection failed: %pe\n",
363 				 ERR_PTR(err));
364 			xe_vm_kill(vm, true);
365 			return err;
366 		}
367 
368 		err = xe_svm_range_set_default_attr(vm, range_start, range_end);
369 		if (err) {
370 			if (err == -EAGAIN)
371 				ret = -EAGAIN;
372 			else
373 				return err;
374 		}
375 	}
376 	spin_unlock(&vm->svm.garbage_collector.lock);
377 
378 	return ret;
379 }
380 
381 static void xe_svm_garbage_collector_work_func(struct work_struct *w)
382 {
383 	struct xe_vm *vm = container_of(w, struct xe_vm,
384 					svm.garbage_collector.work);
385 
386 	down_write(&vm->lock);
387 	xe_svm_garbage_collector(vm);
388 	up_write(&vm->lock);
389 }
390 
391 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
392 
393 static struct xe_vram_region *page_to_vr(struct page *page)
394 {
395 	return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
396 }
397 
398 static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
399 				      struct page *page)
400 {
401 	u64 dpa;
402 	u64 pfn = page_to_pfn(page);
403 	u64 offset;
404 
405 	xe_assert(vr->xe, is_device_private_page(page));
406 	xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base);
407 
408 	offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
409 	dpa = vr->dpa_base + offset;
410 
411 	return dpa;
412 }
413 
414 enum xe_svm_copy_dir {
415 	XE_SVM_COPY_TO_VRAM,
416 	XE_SVM_COPY_TO_SRAM,
417 };
418 
419 static void xe_svm_copy_kb_stats_incr(struct xe_gt *gt,
420 				      const enum xe_svm_copy_dir dir,
421 				      int kb)
422 {
423 	if (dir == XE_SVM_COPY_TO_VRAM)
424 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_KB, kb);
425 	else
426 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_KB, kb);
427 }
428 
429 static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
430 				      const enum xe_svm_copy_dir dir,
431 				      unsigned long npages,
432 				      ktime_t start)
433 {
434 	s64 us_delta = xe_svm_stats_ktime_us_delta(start);
435 
436 	if (dir == XE_SVM_COPY_TO_VRAM) {
437 		switch (npages) {
438 		case 1:
439 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US,
440 					 us_delta);
441 			break;
442 		case 16:
443 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US,
444 					 us_delta);
445 			break;
446 		case 512:
447 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US,
448 					 us_delta);
449 			break;
450 		}
451 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_US,
452 				 us_delta);
453 	} else {
454 		switch (npages) {
455 		case 1:
456 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_CPU_COPY_US,
457 					 us_delta);
458 			break;
459 		case 16:
460 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_CPU_COPY_US,
461 					 us_delta);
462 			break;
463 		case 512:
464 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_CPU_COPY_US,
465 					 us_delta);
466 			break;
467 		}
468 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_US,
469 				 us_delta);
470 	}
471 }
472 
473 static int xe_svm_copy(struct page **pages,
474 		       struct drm_pagemap_addr *pagemap_addr,
475 		       unsigned long npages, const enum xe_svm_copy_dir dir)
476 {
477 	struct xe_vram_region *vr = NULL;
478 	struct xe_gt *gt = NULL;
479 	struct xe_device *xe;
480 	struct dma_fence *fence = NULL;
481 	unsigned long i;
482 #define XE_VRAM_ADDR_INVALID	~0x0ull
483 	u64 vram_addr = XE_VRAM_ADDR_INVALID;
484 	int err = 0, pos = 0;
485 	bool sram = dir == XE_SVM_COPY_TO_SRAM;
486 	ktime_t start = xe_svm_stats_ktime_get();
487 
488 	/*
489 	 * This flow is complex: it locates physically contiguous device pages,
490 	 * derives the starting physical address, and performs a single GPU copy
491 	 * to for every 8M chunk in a DMA address array. Both device pages and
492 	 * DMA addresses may be sparsely populated. If either is NULL, a copy is
493 	 * triggered based on the current search state. The last GPU copy is
494 	 * waited on to ensure all copies are complete.
495 	 */
496 
497 	for (i = 0; i < npages; ++i) {
498 		struct page *spage = pages[i];
499 		struct dma_fence *__fence;
500 		u64 __vram_addr;
501 		bool match = false, chunk, last;
502 
503 #define XE_MIGRATE_CHUNK_SIZE	SZ_8M
504 		chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
505 		last = (i + 1) == npages;
506 
507 		/* No CPU page and no device pages queue'd to copy */
508 		if (!pagemap_addr[i].addr && vram_addr == XE_VRAM_ADDR_INVALID)
509 			continue;
510 
511 		if (!vr && spage) {
512 			vr = page_to_vr(spage);
513 			gt = xe_migrate_exec_queue(vr->migrate)->gt;
514 			xe = vr->xe;
515 		}
516 		XE_WARN_ON(spage && page_to_vr(spage) != vr);
517 
518 		/*
519 		 * CPU page and device page valid, capture physical address on
520 		 * first device page, check if physical contiguous on subsequent
521 		 * device pages.
522 		 */
523 		if (pagemap_addr[i].addr && spage) {
524 			__vram_addr = xe_vram_region_page_to_dpa(vr, spage);
525 			if (vram_addr == XE_VRAM_ADDR_INVALID) {
526 				vram_addr = __vram_addr;
527 				pos = i;
528 			}
529 
530 			match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
531 			/* Expected with contiguous memory */
532 			xe_assert(vr->xe, match);
533 
534 			if (pagemap_addr[i].order) {
535 				i += NR_PAGES(pagemap_addr[i].order) - 1;
536 				chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
537 				last = (i + 1) == npages;
538 			}
539 		}
540 
541 		/*
542 		 * Mismatched physical address, 8M copy chunk, or last page -
543 		 * trigger a copy.
544 		 */
545 		if (!match || chunk || last) {
546 			/*
547 			 * Extra page for first copy if last page and matching
548 			 * physical address.
549 			 */
550 			int incr = (match && last) ? 1 : 0;
551 
552 			if (vram_addr != XE_VRAM_ADDR_INVALID) {
553 				xe_svm_copy_kb_stats_incr(gt, dir,
554 							  (i - pos + incr) *
555 							  (PAGE_SIZE / SZ_1K));
556 				if (sram) {
557 					vm_dbg(&xe->drm,
558 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
559 					       vram_addr,
560 					       (u64)pagemap_addr[pos].addr, i - pos + incr);
561 					__fence = xe_migrate_from_vram(vr->migrate,
562 								       i - pos + incr,
563 								       vram_addr,
564 								       &pagemap_addr[pos]);
565 				} else {
566 					vm_dbg(&xe->drm,
567 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
568 					       (u64)pagemap_addr[pos].addr, vram_addr,
569 					       i - pos + incr);
570 					__fence = xe_migrate_to_vram(vr->migrate,
571 								     i - pos + incr,
572 								     &pagemap_addr[pos],
573 								     vram_addr);
574 				}
575 				if (IS_ERR(__fence)) {
576 					err = PTR_ERR(__fence);
577 					goto err_out;
578 				}
579 
580 				dma_fence_put(fence);
581 				fence = __fence;
582 			}
583 
584 			/* Setup physical address of next device page */
585 			if (pagemap_addr[i].addr && spage) {
586 				vram_addr = __vram_addr;
587 				pos = i;
588 			} else {
589 				vram_addr = XE_VRAM_ADDR_INVALID;
590 			}
591 
592 			/* Extra mismatched device page, copy it */
593 			if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
594 				xe_svm_copy_kb_stats_incr(gt, dir,
595 							  (PAGE_SIZE / SZ_1K));
596 				if (sram) {
597 					vm_dbg(&xe->drm,
598 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
599 					       vram_addr, (u64)pagemap_addr[pos].addr, 1);
600 					__fence = xe_migrate_from_vram(vr->migrate, 1,
601 								       vram_addr,
602 								       &pagemap_addr[pos]);
603 				} else {
604 					vm_dbg(&xe->drm,
605 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
606 					       (u64)pagemap_addr[pos].addr, vram_addr, 1);
607 					__fence = xe_migrate_to_vram(vr->migrate, 1,
608 								     &pagemap_addr[pos],
609 								     vram_addr);
610 				}
611 				if (IS_ERR(__fence)) {
612 					err = PTR_ERR(__fence);
613 					goto err_out;
614 				}
615 
616 				dma_fence_put(fence);
617 				fence = __fence;
618 			}
619 		}
620 	}
621 
622 err_out:
623 	/* Wait for all copies to complete */
624 	if (fence) {
625 		dma_fence_wait(fence, false);
626 		dma_fence_put(fence);
627 	}
628 
629 	/*
630 	 * XXX: We can't derive the GT here (or anywhere in this functions, but
631 	 * compute always uses the primary GT so accumlate stats on the likely
632 	 * GT of the fault.
633 	 */
634 	if (gt)
635 		xe_svm_copy_us_stats_incr(gt, dir, npages, start);
636 
637 	return err;
638 #undef XE_MIGRATE_CHUNK_SIZE
639 #undef XE_VRAM_ADDR_INVALID
640 }
641 
642 static int xe_svm_copy_to_devmem(struct page **pages,
643 				 struct drm_pagemap_addr *pagemap_addr,
644 				 unsigned long npages)
645 {
646 	return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
647 }
648 
649 static int xe_svm_copy_to_ram(struct page **pages,
650 			      struct drm_pagemap_addr *pagemap_addr,
651 			      unsigned long npages)
652 {
653 	return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
654 }
655 
656 static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
657 {
658 	return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
659 }
660 
661 static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
662 {
663 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
664 	struct xe_device *xe = xe_bo_device(bo);
665 
666 	xe_bo_put_async(bo);
667 	xe_pm_runtime_put(xe);
668 }
669 
670 static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
671 {
672 	return PHYS_PFN(offset + vr->hpa_base);
673 }
674 
675 static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
676 {
677 	return &vram->ttm.mm;
678 }
679 
680 static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
681 				      unsigned long npages, unsigned long *pfn)
682 {
683 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
684 	struct ttm_resource *res = bo->ttm.resource;
685 	struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks;
686 	struct drm_buddy_block *block;
687 	int j = 0;
688 
689 	list_for_each_entry(block, blocks, link) {
690 		struct xe_vram_region *vr = block->private;
691 		struct drm_buddy *buddy = vram_to_buddy(vr);
692 		u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
693 		int i;
694 
695 		for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
696 			pfn[j++] = block_pfn + i;
697 	}
698 
699 	return 0;
700 }
701 
702 static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
703 	.devmem_release = xe_svm_devmem_release,
704 	.populate_devmem_pfn = xe_svm_populate_devmem_pfn,
705 	.copy_to_devmem = xe_svm_copy_to_devmem,
706 	.copy_to_ram = xe_svm_copy_to_ram,
707 };
708 
709 #endif
710 
711 static const struct drm_gpusvm_ops gpusvm_ops = {
712 	.range_alloc = xe_svm_range_alloc,
713 	.range_free = xe_svm_range_free,
714 	.invalidate = xe_svm_invalidate,
715 };
716 
717 static const unsigned long fault_chunk_sizes[] = {
718 	SZ_2M,
719 	SZ_64K,
720 	SZ_4K,
721 };
722 
723 /**
724  * xe_svm_init() - SVM initialize
725  * @vm: The VM.
726  *
727  * Initialize SVM state which is embedded within the VM.
728  *
729  * Return: 0 on success, negative error code on error.
730  */
731 int xe_svm_init(struct xe_vm *vm)
732 {
733 	int err;
734 
735 	if (vm->flags & XE_VM_FLAG_FAULT_MODE) {
736 		spin_lock_init(&vm->svm.garbage_collector.lock);
737 		INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
738 		INIT_WORK(&vm->svm.garbage_collector.work,
739 			  xe_svm_garbage_collector_work_func);
740 
741 		err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
742 				      current->mm, 0, vm->size,
743 				      xe_modparam.svm_notifier_size * SZ_1M,
744 				      &gpusvm_ops, fault_chunk_sizes,
745 				      ARRAY_SIZE(fault_chunk_sizes));
746 		drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
747 	} else {
748 		err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)",
749 				      &vm->xe->drm, NULL, 0, 0, 0, NULL,
750 				      NULL, 0);
751 	}
752 
753 	return err;
754 }
755 
756 /**
757  * xe_svm_close() - SVM close
758  * @vm: The VM.
759  *
760  * Close SVM state (i.e., stop and flush all SVM actions).
761  */
762 void xe_svm_close(struct xe_vm *vm)
763 {
764 	xe_assert(vm->xe, xe_vm_is_closed(vm));
765 	flush_work(&vm->svm.garbage_collector.work);
766 }
767 
768 /**
769  * xe_svm_fini() - SVM finalize
770  * @vm: The VM.
771  *
772  * Finalize SVM state which is embedded within the VM.
773  */
774 void xe_svm_fini(struct xe_vm *vm)
775 {
776 	xe_assert(vm->xe, xe_vm_is_closed(vm));
777 
778 	drm_gpusvm_fini(&vm->svm.gpusvm);
779 }
780 
781 static bool xe_svm_range_is_valid(struct xe_svm_range *range,
782 				  struct xe_tile *tile,
783 				  bool devmem_only)
784 {
785 	return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
786 					    range->tile_invalidated) &&
787 		(!devmem_only || xe_svm_range_in_vram(range)));
788 }
789 
790 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
791  * @vm: xe_vm pointer
792  * @range: Pointer to the SVM range structure
793  *
794  * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
795  * and migrates them to SMEM
796  */
797 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
798 {
799 	if (xe_svm_range_in_vram(range))
800 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
801 }
802 
803 /**
804  * xe_svm_range_validate() - Check if the SVM range is valid
805  * @vm: xe_vm pointer
806  * @range: Pointer to the SVM range structure
807  * @tile_mask: Mask representing the tiles to be checked
808  * @devmem_preferred : if true range needs to be in devmem
809  *
810  * The xe_svm_range_validate() function checks if a range is
811  * valid and located in the desired memory region.
812  *
813  * Return: true if the range is valid, false otherwise
814  */
815 bool xe_svm_range_validate(struct xe_vm *vm,
816 			   struct xe_svm_range *range,
817 			   u8 tile_mask, bool devmem_preferred)
818 {
819 	bool ret;
820 
821 	xe_svm_notifier_lock(vm);
822 
823 	ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
824 	       (devmem_preferred == range->base.pages.flags.has_devmem_pages);
825 
826 	xe_svm_notifier_unlock(vm);
827 
828 	return ret;
829 }
830 
831 /**
832  * xe_svm_find_vma_start - Find start of CPU VMA
833  * @vm: xe_vm pointer
834  * @start: start address
835  * @end: end address
836  * @vma: Pointer to struct xe_vma
837  *
838  *
839  * This function searches for a cpu vma, within the specified
840  * range [start, end] in the given VM. It adjusts the range based on the
841  * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
842  *
843  * Return: The starting address of the VMA within the range,
844  * or ULONG_MAX if no VMA is found
845  */
846 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
847 {
848 	return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
849 					 max(start, xe_vma_start(vma)),
850 					 min(end, xe_vma_end(vma)));
851 }
852 
853 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
854 static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
855 				      unsigned long start, unsigned long end,
856 				      struct mm_struct *mm,
857 				      unsigned long timeslice_ms)
858 {
859 	struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
860 	struct xe_device *xe = vr->xe;
861 	struct device *dev = xe->drm.dev;
862 	struct drm_buddy_block *block;
863 	struct xe_validation_ctx vctx;
864 	struct list_head *blocks;
865 	struct drm_exec exec;
866 	struct xe_bo *bo;
867 	int err = 0, idx;
868 
869 	if (!drm_dev_enter(&xe->drm, &idx))
870 		return -ENODEV;
871 
872 	xe_pm_runtime_get(xe);
873 
874 	xe_validation_guard(&vctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
875 		bo = xe_bo_create_locked(xe, NULL, NULL, end - start,
876 					 ttm_bo_type_device,
877 					 (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) |
878 					 XE_BO_FLAG_CPU_ADDR_MIRROR, &exec);
879 		drm_exec_retry_on_contention(&exec);
880 		if (IS_ERR(bo)) {
881 			err = PTR_ERR(bo);
882 			xe_validation_retry_on_oom(&vctx, &err);
883 			break;
884 		}
885 
886 		drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
887 					&dpagemap_devmem_ops, dpagemap, end - start);
888 
889 		blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
890 		list_for_each_entry(block, blocks, link)
891 			block->private = vr;
892 
893 		xe_bo_get(bo);
894 
895 		/* Ensure the device has a pm ref while there are device pages active. */
896 		xe_pm_runtime_get_noresume(xe);
897 		err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
898 						    start, end, timeslice_ms,
899 						    xe_svm_devm_owner(xe));
900 		if (err)
901 			xe_svm_devmem_release(&bo->devmem_allocation);
902 		xe_bo_unlock(bo);
903 		xe_bo_put(bo);
904 	}
905 	xe_pm_runtime_put(xe);
906 	drm_dev_exit(idx);
907 
908 	return err;
909 }
910 #endif
911 
912 static bool supports_4K_migration(struct xe_device *xe)
913 {
914 	if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
915 		return false;
916 
917 	return true;
918 }
919 
920 /**
921  * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
922  * @range: SVM range for which migration needs to be decided
923  * @vma: vma which has range
924  * @preferred_region_is_vram: preferred region for range is vram
925  *
926  * Return: True for range needing migration and migration is supported else false
927  */
928 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
929 					bool preferred_region_is_vram)
930 {
931 	struct xe_vm *vm = range_to_vm(&range->base);
932 	u64 range_size = xe_svm_range_size(range);
933 
934 	if (!range->base.pages.flags.migrate_devmem || !preferred_region_is_vram)
935 		return false;
936 
937 	xe_assert(vm->xe, IS_DGFX(vm->xe));
938 
939 	if (xe_svm_range_in_vram(range)) {
940 		drm_info(&vm->xe->drm, "Range is already in VRAM\n");
941 		return false;
942 	}
943 
944 	if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
945 		drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
946 		return false;
947 	}
948 
949 	return true;
950 }
951 
952 #define DECL_SVM_RANGE_COUNT_STATS(elem, stat) \
953 static void xe_svm_range_##elem##_count_stats_incr(struct xe_gt *gt, \
954 						   struct xe_svm_range *range) \
955 { \
956 	switch (xe_svm_range_size(range)) { \
957 	case SZ_4K: \
958 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_COUNT, 1); \
959 		break; \
960 	case SZ_64K: \
961 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_COUNT, 1); \
962 		break; \
963 	case SZ_2M: \
964 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_COUNT, 1); \
965 		break; \
966 	} \
967 } \
968 
969 DECL_SVM_RANGE_COUNT_STATS(fault, PAGEFAULT)
970 DECL_SVM_RANGE_COUNT_STATS(valid_fault, VALID_PAGEFAULT)
971 DECL_SVM_RANGE_COUNT_STATS(migrate, MIGRATE)
972 
973 #define DECL_SVM_RANGE_US_STATS(elem, stat) \
974 static void xe_svm_range_##elem##_us_stats_incr(struct xe_gt *gt, \
975 						struct xe_svm_range *range, \
976 						ktime_t start) \
977 { \
978 	s64 us_delta = xe_svm_stats_ktime_us_delta(start); \
979 \
980 	switch (xe_svm_range_size(range)) { \
981 	case SZ_4K: \
982 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_US, \
983 				 us_delta); \
984 		break; \
985 	case SZ_64K: \
986 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_US, \
987 				 us_delta); \
988 		break; \
989 	case SZ_2M: \
990 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_US, \
991 				 us_delta); \
992 		break; \
993 	} \
994 } \
995 
996 DECL_SVM_RANGE_US_STATS(migrate, MIGRATE)
997 DECL_SVM_RANGE_US_STATS(get_pages, GET_PAGES)
998 DECL_SVM_RANGE_US_STATS(bind, BIND)
999 DECL_SVM_RANGE_US_STATS(fault, PAGEFAULT)
1000 
1001 static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
1002 				     struct xe_gt *gt, u64 fault_addr,
1003 				     bool need_vram)
1004 {
1005 	int devmem_possible = IS_DGFX(vm->xe) &&
1006 		IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
1007 	struct drm_gpusvm_ctx ctx = {
1008 		.read_only = xe_vma_read_only(vma),
1009 		.devmem_possible = devmem_possible,
1010 		.check_pages_threshold = devmem_possible ? SZ_64K : 0,
1011 		.devmem_only = need_vram && devmem_possible,
1012 		.timeslice_ms = need_vram && devmem_possible ?
1013 			vm->xe->atomic_svm_timeslice_ms : 0,
1014 		.device_private_page_owner = xe_svm_devm_owner(vm->xe),
1015 	};
1016 	struct xe_validation_ctx vctx;
1017 	struct drm_exec exec;
1018 	struct xe_svm_range *range;
1019 	struct dma_fence *fence;
1020 	struct drm_pagemap *dpagemap;
1021 	struct xe_tile *tile = gt_to_tile(gt);
1022 	int migrate_try_count = ctx.devmem_only ? 3 : 1;
1023 	ktime_t start = xe_svm_stats_ktime_get(), bind_start, get_pages_start;
1024 	int err;
1025 
1026 	lockdep_assert_held_write(&vm->lock);
1027 	xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
1028 
1029 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
1030 
1031 retry:
1032 	/* Always process UNMAPs first so view SVM ranges is current */
1033 	err = xe_svm_garbage_collector(vm);
1034 	if (err)
1035 		return err;
1036 
1037 	dpagemap = xe_vma_resolve_pagemap(vma, tile);
1038 	if (!dpagemap && !ctx.devmem_only)
1039 		ctx.device_private_page_owner = NULL;
1040 	range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
1041 
1042 	if (IS_ERR(range))
1043 		return PTR_ERR(range);
1044 
1045 	xe_svm_range_fault_count_stats_incr(gt, range);
1046 
1047 	if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
1048 		err = -EACCES;
1049 		goto out;
1050 	}
1051 
1052 	if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) {
1053 		xe_svm_range_valid_fault_count_stats_incr(gt, range);
1054 		range_debug(range, "PAGE FAULT - VALID");
1055 		goto out;
1056 	}
1057 
1058 	range_debug(range, "PAGE FAULT");
1059 
1060 	if (--migrate_try_count >= 0 &&
1061 	    xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
1062 		ktime_t migrate_start = xe_svm_stats_ktime_get();
1063 
1064 		/* TODO : For multi-device dpagemap will be used to find the
1065 		 * remote tile and remote device. Will need to modify
1066 		 * xe_svm_alloc_vram to use dpagemap for future multi-device
1067 		 * support.
1068 		 */
1069 		xe_svm_range_migrate_count_stats_incr(gt, range);
1070 		err = xe_svm_alloc_vram(tile, range, &ctx);
1071 		xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start);
1072 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
1073 		if (err) {
1074 			if (migrate_try_count || !ctx.devmem_only) {
1075 				drm_dbg(&vm->xe->drm,
1076 					"VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
1077 					vm->usm.asid, ERR_PTR(err));
1078 
1079 				/*
1080 				 * In the devmem-only case, mixed mappings may
1081 				 * be found. The get_pages function will fix
1082 				 * these up to a single location, allowing the
1083 				 * page fault handler to make forward progress.
1084 				 */
1085 				if (ctx.devmem_only)
1086 					goto get_pages;
1087 				else
1088 					goto retry;
1089 			} else {
1090 				drm_err(&vm->xe->drm,
1091 					"VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
1092 					vm->usm.asid, ERR_PTR(err));
1093 				return err;
1094 			}
1095 		}
1096 	}
1097 
1098 get_pages:
1099 	get_pages_start = xe_svm_stats_ktime_get();
1100 
1101 	range_debug(range, "GET PAGES");
1102 	err = xe_svm_range_get_pages(vm, range, &ctx);
1103 	/* Corner where CPU mappings have changed */
1104 	if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
1105 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
1106 		if (migrate_try_count > 0 || !ctx.devmem_only) {
1107 			drm_dbg(&vm->xe->drm,
1108 				"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
1109 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1110 			range_debug(range, "PAGE FAULT - RETRY PAGES");
1111 			goto retry;
1112 		} else {
1113 			drm_err(&vm->xe->drm,
1114 				"Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
1115 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1116 		}
1117 	}
1118 	if (err) {
1119 		range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
1120 		goto out;
1121 	}
1122 
1123 	xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
1124 	range_debug(range, "PAGE FAULT - BIND");
1125 
1126 	bind_start = xe_svm_stats_ktime_get();
1127 	xe_validation_guard(&vctx, &vm->xe->val, &exec, (struct xe_val_flags) {}, err) {
1128 		err = xe_vm_drm_exec_lock(vm, &exec);
1129 		drm_exec_retry_on_contention(&exec);
1130 
1131 		xe_vm_set_validation_exec(vm, &exec);
1132 		fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
1133 		xe_vm_set_validation_exec(vm, NULL);
1134 		if (IS_ERR(fence)) {
1135 			drm_exec_retry_on_contention(&exec);
1136 			err = PTR_ERR(fence);
1137 			xe_validation_retry_on_oom(&vctx, &err);
1138 			xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1139 			break;
1140 		}
1141 	}
1142 	if (err)
1143 		goto err_out;
1144 
1145 	dma_fence_wait(fence, false);
1146 	dma_fence_put(fence);
1147 	xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1148 
1149 out:
1150 	xe_svm_range_fault_us_stats_incr(gt, range, start);
1151 	return 0;
1152 
1153 err_out:
1154 	if (err == -EAGAIN) {
1155 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
1156 		range_debug(range, "PAGE FAULT - RETRY BIND");
1157 		goto retry;
1158 	}
1159 
1160 	return err;
1161 }
1162 
1163 /**
1164  * xe_svm_handle_pagefault() - SVM handle page fault
1165  * @vm: The VM.
1166  * @vma: The CPU address mirror VMA.
1167  * @gt: The gt upon the fault occurred.
1168  * @fault_addr: The GPU fault address.
1169  * @atomic: The fault atomic access bit.
1170  *
1171  * Create GPU bindings for a SVM page fault. Optionally migrate to device
1172  * memory.
1173  *
1174  * Return: 0 on success, negative error code on error.
1175  */
1176 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
1177 			    struct xe_gt *gt, u64 fault_addr,
1178 			    bool atomic)
1179 {
1180 	int need_vram, ret;
1181 retry:
1182 	need_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
1183 	if (need_vram < 0)
1184 		return need_vram;
1185 
1186 	ret =  __xe_svm_handle_pagefault(vm, vma, gt, fault_addr,
1187 					 need_vram ? true : false);
1188 	if (ret == -EAGAIN) {
1189 		/*
1190 		 * Retry once on -EAGAIN to re-lookup the VMA, as the original VMA
1191 		 * may have been split by xe_svm_range_set_default_attr.
1192 		 */
1193 		vma = xe_vm_find_vma_by_addr(vm, fault_addr);
1194 		if (!vma)
1195 			return -EINVAL;
1196 
1197 		goto retry;
1198 	}
1199 	return ret;
1200 }
1201 
1202 /**
1203  * xe_svm_has_mapping() - SVM has mappings
1204  * @vm: The VM.
1205  * @start: Start address.
1206  * @end: End address.
1207  *
1208  * Check if an address range has SVM mappings.
1209  *
1210  * Return: True if address range has a SVM mapping, False otherwise
1211  */
1212 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
1213 {
1214 	return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
1215 }
1216 
1217 /**
1218  * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
1219  * @vm: The VM
1220  * @start: start addr
1221  * @end: end addr
1222  *
1223  * This function UNMAPS svm ranges if start or end address are inside them.
1224  */
1225 void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
1226 {
1227 	struct drm_gpusvm_notifier *notifier, *next;
1228 
1229 	lockdep_assert_held_write(&vm->lock);
1230 
1231 	drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
1232 		struct drm_gpusvm_range *range, *__next;
1233 
1234 		drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
1235 			if (start > drm_gpusvm_range_start(range) ||
1236 			    end < drm_gpusvm_range_end(range)) {
1237 				if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
1238 					drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
1239 				drm_gpusvm_range_get(range);
1240 				__xe_svm_garbage_collector(vm, to_xe_range(range));
1241 				if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
1242 					spin_lock(&vm->svm.garbage_collector.lock);
1243 					list_del(&to_xe_range(range)->garbage_collector_link);
1244 					spin_unlock(&vm->svm.garbage_collector.lock);
1245 				}
1246 				drm_gpusvm_range_put(range);
1247 			}
1248 		}
1249 	}
1250 }
1251 
1252 /**
1253  * xe_svm_bo_evict() - SVM evict BO to system memory
1254  * @bo: BO to evict
1255  *
1256  * SVM evict BO to system memory. GPU SVM layer ensures all device pages
1257  * are evicted before returning.
1258  *
1259  * Return: 0 on success standard error code otherwise
1260  */
1261 int xe_svm_bo_evict(struct xe_bo *bo)
1262 {
1263 	return drm_pagemap_evict_to_ram(&bo->devmem_allocation);
1264 }
1265 
1266 /**
1267  * xe_svm_range_find_or_insert- Find or insert GPU SVM range
1268  * @vm: xe_vm pointer
1269  * @addr: address for which range needs to be found/inserted
1270  * @vma:  Pointer to struct xe_vma which mirrors CPU
1271  * @ctx: GPU SVM context
1272  *
1273  * This function finds or inserts a newly allocated a SVM range based on the
1274  * address.
1275  *
1276  * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
1277  */
1278 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
1279 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
1280 {
1281 	struct drm_gpusvm_range *r;
1282 
1283 	r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
1284 					    xe_vma_start(vma), xe_vma_end(vma), ctx);
1285 	if (IS_ERR(r))
1286 		return ERR_CAST(r);
1287 
1288 	return to_xe_range(r);
1289 }
1290 
1291 /**
1292  * xe_svm_range_get_pages() - Get pages for a SVM range
1293  * @vm: Pointer to the struct xe_vm
1294  * @range: Pointer to the xe SVM range structure
1295  * @ctx: GPU SVM context
1296  *
1297  * This function gets pages for a SVM range and ensures they are mapped for
1298  * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
1299  *
1300  * Return: 0 on success, negative error code on failure.
1301  */
1302 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
1303 			   struct drm_gpusvm_ctx *ctx)
1304 {
1305 	int err = 0;
1306 
1307 	err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
1308 	if (err == -EOPNOTSUPP) {
1309 		range_debug(range, "PAGE FAULT - EVICT PAGES");
1310 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
1311 	}
1312 
1313 	return err;
1314 }
1315 
1316 /**
1317  * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
1318  * @vm: Pointer to the xe_vm structure
1319  * @start: Start of the input range
1320  * @end: End of the input range
1321  *
1322  * This function removes the page table entries (PTEs) associated
1323  * with the svm ranges within the given input start and end
1324  *
1325  * Return: tile_mask for which gt's need to be tlb invalidated.
1326  */
1327 u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
1328 {
1329 	struct drm_gpusvm_notifier *notifier;
1330 	struct xe_svm_range *range;
1331 	u64 adj_start, adj_end;
1332 	struct xe_tile *tile;
1333 	u8 tile_mask = 0;
1334 	u8 id;
1335 
1336 	lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
1337 		       lockdep_is_held_type(&vm->lock, 0));
1338 
1339 	drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
1340 		struct drm_gpusvm_range *r = NULL;
1341 
1342 		adj_start = max(start, drm_gpusvm_notifier_start(notifier));
1343 		adj_end = min(end, drm_gpusvm_notifier_end(notifier));
1344 		drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
1345 			range = to_xe_range(r);
1346 			for_each_tile(tile, vm->xe, id) {
1347 				if (xe_pt_zap_ptes_range(tile, vm, range)) {
1348 					tile_mask |= BIT(id);
1349 					/*
1350 					 * WRITE_ONCE pairs with READ_ONCE in
1351 					 * xe_vm_has_valid_gpu_mapping().
1352 					 * Must not fail after setting
1353 					 * tile_invalidated and before
1354 					 * TLB invalidation.
1355 					 */
1356 					WRITE_ONCE(range->tile_invalidated,
1357 						   range->tile_invalidated | BIT(id));
1358 				}
1359 			}
1360 		}
1361 	}
1362 
1363 	return tile_mask;
1364 }
1365 
1366 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
1367 
1368 static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile)
1369 {
1370 	return &tile->mem.vram->dpagemap;
1371 }
1372 
1373 /**
1374  * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
1375  * @vma: Pointer to the xe_vma structure containing memory attributes
1376  * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
1377  *
1378  * This function determines the correct DRM pagemap to use for a given VMA.
1379  * It first checks if a valid devmem_fd is provided in the VMA's preferred
1380  * location. If the devmem_fd is negative, it returns NULL, indicating no
1381  * pagemap is available and smem to be used as preferred location.
1382  * If the devmem_fd is equal to the default faulting
1383  * GT identifier, it returns the VRAM pagemap associated with the tile.
1384  *
1385  * Future support for multi-device configurations may use drm_pagemap_from_fd()
1386  * to resolve pagemaps from arbitrary file descriptors.
1387  *
1388  * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
1389  */
1390 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1391 {
1392 	s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
1393 
1394 	if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
1395 		return NULL;
1396 
1397 	if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
1398 		return IS_DGFX(tile_to_xe(tile)) ? tile_local_pagemap(tile) : NULL;
1399 
1400 	/* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
1401 	return NULL;
1402 }
1403 
1404 /**
1405  * xe_svm_alloc_vram()- Allocate device memory pages for range,
1406  * migrating existing data.
1407  * @tile: tile to allocate vram from
1408  * @range: SVM range
1409  * @ctx: DRM GPU SVM context
1410  *
1411  * Return: 0 on success, error code on failure.
1412  */
1413 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
1414 		      const struct drm_gpusvm_ctx *ctx)
1415 {
1416 	struct drm_pagemap *dpagemap;
1417 
1418 	xe_assert(tile_to_xe(tile), range->base.pages.flags.migrate_devmem);
1419 	range_debug(range, "ALLOCATE VRAM");
1420 
1421 	dpagemap = tile_local_pagemap(tile);
1422 	return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
1423 				       xe_svm_range_end(range),
1424 				       range->base.gpusvm->mm,
1425 				       ctx->timeslice_ms);
1426 }
1427 
1428 static struct drm_pagemap_addr
1429 xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
1430 			  struct device *dev,
1431 			  struct page *page,
1432 			  unsigned int order,
1433 			  enum dma_data_direction dir)
1434 {
1435 	struct device *pgmap_dev = dpagemap->dev;
1436 	enum drm_interconnect_protocol prot;
1437 	dma_addr_t addr;
1438 
1439 	if (pgmap_dev == dev) {
1440 		addr = xe_vram_region_page_to_dpa(page_to_vr(page), page);
1441 		prot = XE_INTERCONNECT_VRAM;
1442 	} else {
1443 		addr = DMA_MAPPING_ERROR;
1444 		prot = 0;
1445 	}
1446 
1447 	return drm_pagemap_addr_encode(addr, prot, order, dir);
1448 }
1449 
1450 static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
1451 	.device_map = xe_drm_pagemap_device_map,
1452 	.populate_mm = xe_drm_pagemap_populate_mm,
1453 };
1454 
1455 /**
1456  * xe_devm_add: Remap and provide memmap backing for device memory
1457  * @tile: tile that the memory region belongs to
1458  * @vr: vram memory region to remap
1459  *
1460  * This remap device memory to host physical address space and create
1461  * struct page to back device memory
1462  *
1463  * Return: 0 on success standard error code otherwise
1464  */
1465 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1466 {
1467 	struct xe_device *xe = tile_to_xe(tile);
1468 	struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
1469 	struct resource *res;
1470 	void *addr;
1471 	int ret;
1472 
1473 	res = devm_request_free_mem_region(dev, &iomem_resource,
1474 					   vr->usable_size);
1475 	if (IS_ERR(res)) {
1476 		ret = PTR_ERR(res);
1477 		return ret;
1478 	}
1479 
1480 	vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
1481 	vr->pagemap.range.start = res->start;
1482 	vr->pagemap.range.end = res->end;
1483 	vr->pagemap.nr_range = 1;
1484 	vr->pagemap.ops = drm_pagemap_pagemap_ops_get();
1485 	vr->pagemap.owner = xe_svm_devm_owner(xe);
1486 	addr = devm_memremap_pages(dev, &vr->pagemap);
1487 
1488 	vr->dpagemap.dev = dev;
1489 	vr->dpagemap.ops = &xe_drm_pagemap_ops;
1490 
1491 	if (IS_ERR(addr)) {
1492 		devm_release_mem_region(dev, res->start, resource_size(res));
1493 		ret = PTR_ERR(addr);
1494 		drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
1495 			tile->id, ERR_PTR(ret));
1496 		return ret;
1497 	}
1498 	vr->hpa_base = res->start;
1499 
1500 	drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
1501 		tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
1502 	return 0;
1503 }
1504 #else
1505 int xe_svm_alloc_vram(struct xe_tile *tile,
1506 		      struct xe_svm_range *range,
1507 		      const struct drm_gpusvm_ctx *ctx)
1508 {
1509 	return -EOPNOTSUPP;
1510 }
1511 
1512 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1513 {
1514 	return 0;
1515 }
1516 
1517 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1518 {
1519 	return NULL;
1520 }
1521 #endif
1522 
1523 /**
1524  * xe_svm_flush() - SVM flush
1525  * @vm: The VM.
1526  *
1527  * Flush all SVM actions.
1528  */
1529 void xe_svm_flush(struct xe_vm *vm)
1530 {
1531 	if (xe_vm_in_fault_mode(vm))
1532 		flush_work(&vm->svm.garbage_collector.work);
1533 }
1534