xref: /linux/drivers/gpu/drm/xe/xe_svm.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <drm/drm_drv.h>
7 
8 #include "xe_bo.h"
9 #include "xe_exec_queue_types.h"
10 #include "xe_gt_stats.h"
11 #include "xe_migrate.h"
12 #include "xe_module.h"
13 #include "xe_pm.h"
14 #include "xe_pt.h"
15 #include "xe_svm.h"
16 #include "xe_tile.h"
17 #include "xe_ttm_vram_mgr.h"
18 #include "xe_vm.h"
19 #include "xe_vm_types.h"
20 #include "xe_vram_types.h"
21 
22 static bool xe_svm_range_in_vram(struct xe_svm_range *range)
23 {
24 	/*
25 	 * Advisory only check whether the range is currently backed by VRAM
26 	 * memory.
27 	 */
28 
29 	struct drm_gpusvm_pages_flags flags = {
30 		/* Pairs with WRITE_ONCE in drm_gpusvm.c */
31 		.__flags = READ_ONCE(range->base.pages.flags.__flags),
32 	};
33 
34 	return flags.has_devmem_pages;
35 }
36 
37 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
38 {
39 	/* Not reliable without notifier lock */
40 	return xe_svm_range_in_vram(range) && range->tile_present;
41 }
42 
43 static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
44 {
45 	return container_of(gpusvm, struct xe_vm, svm.gpusvm);
46 }
47 
48 static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
49 {
50 	return gpusvm_to_vm(r->gpusvm);
51 }
52 
53 #define range_debug(r__, operation__)					\
54 	vm_dbg(&range_to_vm(&(r__)->base)->xe->drm,			\
55 	       "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
56 	       "start=0x%014lx, end=0x%014lx, size=%lu",		\
57 	       (operation__), range_to_vm(&(r__)->base)->usm.asid,	\
58 	       (r__)->base.gpusvm,					\
59 	       xe_svm_range_in_vram((r__)) ? 1 : 0,			\
60 	       xe_svm_range_has_vram_binding((r__)) ? 1 : 0,		\
61 	       (r__)->base.pages.notifier_seq,				\
62 	       xe_svm_range_start((r__)), xe_svm_range_end((r__)),	\
63 	       xe_svm_range_size((r__)))
64 
65 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
66 {
67 	range_debug(range, operation);
68 }
69 
70 static void *xe_svm_devm_owner(struct xe_device *xe)
71 {
72 	return xe;
73 }
74 
75 static struct drm_gpusvm_range *
76 xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
77 {
78 	struct xe_svm_range *range;
79 
80 	range = kzalloc(sizeof(*range), GFP_KERNEL);
81 	if (!range)
82 		return NULL;
83 
84 	INIT_LIST_HEAD(&range->garbage_collector_link);
85 	xe_vm_get(gpusvm_to_vm(gpusvm));
86 
87 	return &range->base;
88 }
89 
90 static void xe_svm_range_free(struct drm_gpusvm_range *range)
91 {
92 	xe_vm_put(range_to_vm(range));
93 	kfree(range);
94 }
95 
96 static void
97 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
98 				   const struct mmu_notifier_range *mmu_range)
99 {
100 	struct xe_device *xe = vm->xe;
101 
102 	range_debug(range, "GARBAGE COLLECTOR ADD");
103 
104 	drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
105 
106 	spin_lock(&vm->svm.garbage_collector.lock);
107 	if (list_empty(&range->garbage_collector_link))
108 		list_add_tail(&range->garbage_collector_link,
109 			      &vm->svm.garbage_collector.range_list);
110 	spin_unlock(&vm->svm.garbage_collector.lock);
111 
112 	queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq,
113 		   &vm->svm.garbage_collector.work);
114 }
115 
116 static void xe_svm_tlb_inval_count_stats_incr(struct xe_gt *gt)
117 {
118 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT, 1);
119 }
120 
121 static u8
122 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
123 				  const struct mmu_notifier_range *mmu_range,
124 				  u64 *adj_start, u64 *adj_end)
125 {
126 	struct xe_svm_range *range = to_xe_range(r);
127 	struct xe_device *xe = vm->xe;
128 	struct xe_tile *tile;
129 	u8 tile_mask = 0;
130 	u8 id;
131 
132 	xe_svm_assert_in_notifier(vm);
133 
134 	range_debug(range, "NOTIFIER");
135 
136 	/* Skip if already unmapped or if no binding exist */
137 	if (range->base.pages.flags.unmapped || !range->tile_present)
138 		return 0;
139 
140 	range_debug(range, "NOTIFIER - EXECUTE");
141 
142 	/* Adjust invalidation to range boundaries */
143 	*adj_start = min(xe_svm_range_start(range), mmu_range->start);
144 	*adj_end = max(xe_svm_range_end(range), mmu_range->end);
145 
146 	/*
147 	 * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the
148 	 * invalidation code can't correctly cope with sparse ranges or
149 	 * invalidations spanning multiple ranges.
150 	 */
151 	for_each_tile(tile, xe, id)
152 		if (xe_pt_zap_ptes_range(tile, vm, range)) {
153 			/*
154 			 * WRITE_ONCE pairs with READ_ONCE in
155 			 * xe_vm_has_valid_gpu_mapping()
156 			 */
157 			WRITE_ONCE(range->tile_invalidated,
158 				   range->tile_invalidated | BIT(id));
159 
160 			if (!(tile_mask & BIT(id))) {
161 				xe_svm_tlb_inval_count_stats_incr(tile->primary_gt);
162 				if (tile->media_gt)
163 					xe_svm_tlb_inval_count_stats_incr(tile->media_gt);
164 				tile_mask |= BIT(id);
165 			}
166 		}
167 
168 	return tile_mask;
169 }
170 
171 static void
172 xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
173 				const struct mmu_notifier_range *mmu_range)
174 {
175 	struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
176 
177 	xe_svm_assert_in_notifier(vm);
178 
179 	drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
180 	if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
181 		xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
182 						   mmu_range);
183 }
184 
185 static s64 xe_svm_stats_ktime_us_delta(ktime_t start)
186 {
187 	return IS_ENABLED(CONFIG_DEBUG_FS) ?
188 		ktime_us_delta(ktime_get(), start) : 0;
189 }
190 
191 static void xe_svm_tlb_inval_us_stats_incr(struct xe_gt *gt, ktime_t start)
192 {
193 	s64 us_delta = xe_svm_stats_ktime_us_delta(start);
194 
195 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_US, us_delta);
196 }
197 
198 static ktime_t xe_svm_stats_ktime_get(void)
199 {
200 	return IS_ENABLED(CONFIG_DEBUG_FS) ? ktime_get() : 0;
201 }
202 
203 static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
204 			      struct drm_gpusvm_notifier *notifier,
205 			      const struct mmu_notifier_range *mmu_range)
206 {
207 	struct xe_vm *vm = gpusvm_to_vm(gpusvm);
208 	struct xe_device *xe = vm->xe;
209 	struct drm_gpusvm_range *r, *first;
210 	struct xe_tile *tile;
211 	ktime_t start = xe_svm_stats_ktime_get();
212 	u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
213 	u8 tile_mask = 0, id;
214 	long err;
215 
216 	xe_svm_assert_in_notifier(vm);
217 
218 	vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
219 	       "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
220 	       vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
221 	       mmu_range->start, mmu_range->end, mmu_range->event);
222 
223 	/* Adjust invalidation to notifier boundaries */
224 	adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
225 	adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
226 
227 	first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
228 	if (!first)
229 		return;
230 
231 	/*
232 	 * PTs may be getting destroyed so not safe to touch these but PT should
233 	 * be invalidated at this point in time. Regardless we still need to
234 	 * ensure any dma mappings are unmapped in the here.
235 	 */
236 	if (xe_vm_is_closed(vm))
237 		goto range_notifier_event_end;
238 
239 	/*
240 	 * XXX: Less than ideal to always wait on VM's resv slots if an
241 	 * invalidation is not required. Could walk range list twice to figure
242 	 * out if an invalidations is need, but also not ideal.
243 	 */
244 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
245 				    DMA_RESV_USAGE_BOOKKEEP,
246 				    false, MAX_SCHEDULE_TIMEOUT);
247 	XE_WARN_ON(err <= 0);
248 
249 	r = first;
250 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
251 		tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range,
252 							       &adj_start,
253 							       &adj_end);
254 	if (!tile_mask)
255 		goto range_notifier_event_end;
256 
257 	xe_device_wmb(xe);
258 
259 	err = xe_vm_range_tilemask_tlb_inval(vm, adj_start, adj_end, tile_mask);
260 	WARN_ON_ONCE(err);
261 
262 range_notifier_event_end:
263 	r = first;
264 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
265 		xe_svm_range_notifier_event_end(vm, r, mmu_range);
266 	for_each_tile(tile, xe, id) {
267 		if (tile_mask & BIT(id)) {
268 			xe_svm_tlb_inval_us_stats_incr(tile->primary_gt, start);
269 			if (tile->media_gt)
270 				xe_svm_tlb_inval_us_stats_incr(tile->media_gt, start);
271 		}
272 	}
273 }
274 
275 static int __xe_svm_garbage_collector(struct xe_vm *vm,
276 				      struct xe_svm_range *range)
277 {
278 	struct dma_fence *fence;
279 
280 	range_debug(range, "GARBAGE COLLECTOR");
281 
282 	xe_vm_lock(vm, false);
283 	fence = xe_vm_range_unbind(vm, range);
284 	xe_vm_unlock(vm);
285 	if (IS_ERR(fence))
286 		return PTR_ERR(fence);
287 	dma_fence_put(fence);
288 
289 	drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
290 
291 	return 0;
292 }
293 
294 static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 range_end)
295 {
296 	struct xe_vma *vma;
297 	struct xe_vma_mem_attr default_attr = {
298 		.preferred_loc = {
299 			.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
300 			.migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
301 		},
302 		.atomic_access = DRM_XE_ATOMIC_UNDEFINED,
303 	};
304 	int err = 0;
305 
306 	vma = xe_vm_find_vma_by_addr(vm, range_start);
307 	if (!vma)
308 		return -EINVAL;
309 
310 	if (xe_vma_has_default_mem_attrs(vma))
311 		return 0;
312 
313 	vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx",
314 	       xe_vma_start(vma), xe_vma_end(vma));
315 
316 	if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
317 		default_attr.pat_index = vma->attr.default_pat_index;
318 		default_attr.default_pat_index  = vma->attr.default_pat_index;
319 		vma->attr = default_attr;
320 	} else {
321 		vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx",
322 		       range_start, range_end);
323 		err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start);
324 		if (err) {
325 			drm_warn(&vm->xe->drm, "VMA SPLIT failed: %pe\n", ERR_PTR(err));
326 			xe_vm_kill(vm, true);
327 			return err;
328 		}
329 	}
330 
331 	/*
332 	 * On call from xe_svm_handle_pagefault original VMA might be changed
333 	 * signal this to lookup for VMA again.
334 	 */
335 	return -EAGAIN;
336 }
337 
338 static int xe_svm_garbage_collector(struct xe_vm *vm)
339 {
340 	struct xe_svm_range *range;
341 	u64 range_start;
342 	u64 range_end;
343 	int err, ret = 0;
344 
345 	lockdep_assert_held_write(&vm->lock);
346 
347 	if (xe_vm_is_closed_or_banned(vm))
348 		return -ENOENT;
349 
350 	for (;;) {
351 		spin_lock(&vm->svm.garbage_collector.lock);
352 		range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
353 						 typeof(*range),
354 						 garbage_collector_link);
355 		if (!range)
356 			break;
357 
358 		range_start = xe_svm_range_start(range);
359 		range_end = xe_svm_range_end(range);
360 
361 		list_del(&range->garbage_collector_link);
362 		spin_unlock(&vm->svm.garbage_collector.lock);
363 
364 		err = __xe_svm_garbage_collector(vm, range);
365 		if (err) {
366 			drm_warn(&vm->xe->drm,
367 				 "Garbage collection failed: %pe\n",
368 				 ERR_PTR(err));
369 			xe_vm_kill(vm, true);
370 			return err;
371 		}
372 
373 		err = xe_svm_range_set_default_attr(vm, range_start, range_end);
374 		if (err) {
375 			if (err == -EAGAIN)
376 				ret = -EAGAIN;
377 			else
378 				return err;
379 		}
380 	}
381 	spin_unlock(&vm->svm.garbage_collector.lock);
382 
383 	return ret;
384 }
385 
386 static void xe_svm_garbage_collector_work_func(struct work_struct *w)
387 {
388 	struct xe_vm *vm = container_of(w, struct xe_vm,
389 					svm.garbage_collector.work);
390 
391 	down_write(&vm->lock);
392 	xe_svm_garbage_collector(vm);
393 	up_write(&vm->lock);
394 }
395 
396 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
397 
398 static struct xe_vram_region *page_to_vr(struct page *page)
399 {
400 	return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
401 }
402 
403 static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
404 				      struct page *page)
405 {
406 	u64 dpa;
407 	u64 pfn = page_to_pfn(page);
408 	u64 offset;
409 
410 	xe_assert(vr->xe, is_device_private_page(page));
411 	xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base);
412 
413 	offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
414 	dpa = vr->dpa_base + offset;
415 
416 	return dpa;
417 }
418 
419 enum xe_svm_copy_dir {
420 	XE_SVM_COPY_TO_VRAM,
421 	XE_SVM_COPY_TO_SRAM,
422 };
423 
424 static void xe_svm_copy_kb_stats_incr(struct xe_gt *gt,
425 				      const enum xe_svm_copy_dir dir,
426 				      int kb)
427 {
428 	if (dir == XE_SVM_COPY_TO_VRAM)
429 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_KB, kb);
430 	else
431 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_KB, kb);
432 }
433 
434 static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
435 				      const enum xe_svm_copy_dir dir,
436 				      unsigned long npages,
437 				      ktime_t start)
438 {
439 	s64 us_delta = xe_svm_stats_ktime_us_delta(start);
440 
441 	if (dir == XE_SVM_COPY_TO_VRAM) {
442 		switch (npages) {
443 		case 1:
444 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US,
445 					 us_delta);
446 			break;
447 		case 16:
448 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US,
449 					 us_delta);
450 			break;
451 		case 512:
452 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US,
453 					 us_delta);
454 			break;
455 		}
456 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_US,
457 				 us_delta);
458 	} else {
459 		switch (npages) {
460 		case 1:
461 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_CPU_COPY_US,
462 					 us_delta);
463 			break;
464 		case 16:
465 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_CPU_COPY_US,
466 					 us_delta);
467 			break;
468 		case 512:
469 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_CPU_COPY_US,
470 					 us_delta);
471 			break;
472 		}
473 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_US,
474 				 us_delta);
475 	}
476 }
477 
478 static int xe_svm_copy(struct page **pages,
479 		       struct drm_pagemap_addr *pagemap_addr,
480 		       unsigned long npages, const enum xe_svm_copy_dir dir)
481 {
482 	struct xe_vram_region *vr = NULL;
483 	struct xe_gt *gt = NULL;
484 	struct xe_device *xe;
485 	struct dma_fence *fence = NULL;
486 	unsigned long i;
487 #define XE_VRAM_ADDR_INVALID	~0x0ull
488 	u64 vram_addr = XE_VRAM_ADDR_INVALID;
489 	int err = 0, pos = 0;
490 	bool sram = dir == XE_SVM_COPY_TO_SRAM;
491 	ktime_t start = xe_svm_stats_ktime_get();
492 
493 	/*
494 	 * This flow is complex: it locates physically contiguous device pages,
495 	 * derives the starting physical address, and performs a single GPU copy
496 	 * to for every 8M chunk in a DMA address array. Both device pages and
497 	 * DMA addresses may be sparsely populated. If either is NULL, a copy is
498 	 * triggered based on the current search state. The last GPU copy is
499 	 * waited on to ensure all copies are complete.
500 	 */
501 
502 	for (i = 0; i < npages; ++i) {
503 		struct page *spage = pages[i];
504 		struct dma_fence *__fence;
505 		u64 __vram_addr;
506 		bool match = false, chunk, last;
507 
508 #define XE_MIGRATE_CHUNK_SIZE	SZ_8M
509 		chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
510 		last = (i + 1) == npages;
511 
512 		/* No CPU page and no device pages queue'd to copy */
513 		if (!pagemap_addr[i].addr && vram_addr == XE_VRAM_ADDR_INVALID)
514 			continue;
515 
516 		if (!vr && spage) {
517 			vr = page_to_vr(spage);
518 			gt = xe_migrate_exec_queue(vr->migrate)->gt;
519 			xe = vr->xe;
520 		}
521 		XE_WARN_ON(spage && page_to_vr(spage) != vr);
522 
523 		/*
524 		 * CPU page and device page valid, capture physical address on
525 		 * first device page, check if physical contiguous on subsequent
526 		 * device pages.
527 		 */
528 		if (pagemap_addr[i].addr && spage) {
529 			__vram_addr = xe_vram_region_page_to_dpa(vr, spage);
530 			if (vram_addr == XE_VRAM_ADDR_INVALID) {
531 				vram_addr = __vram_addr;
532 				pos = i;
533 			}
534 
535 			match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
536 			/* Expected with contiguous memory */
537 			xe_assert(vr->xe, match);
538 
539 			if (pagemap_addr[i].order) {
540 				i += NR_PAGES(pagemap_addr[i].order) - 1;
541 				chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
542 				last = (i + 1) == npages;
543 			}
544 		}
545 
546 		/*
547 		 * Mismatched physical address, 8M copy chunk, or last page -
548 		 * trigger a copy.
549 		 */
550 		if (!match || chunk || last) {
551 			/*
552 			 * Extra page for first copy if last page and matching
553 			 * physical address.
554 			 */
555 			int incr = (match && last) ? 1 : 0;
556 
557 			if (vram_addr != XE_VRAM_ADDR_INVALID) {
558 				xe_svm_copy_kb_stats_incr(gt, dir,
559 							  (i - pos + incr) *
560 							  (PAGE_SIZE / SZ_1K));
561 				if (sram) {
562 					vm_dbg(&xe->drm,
563 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
564 					       vram_addr,
565 					       (u64)pagemap_addr[pos].addr, i - pos + incr);
566 					__fence = xe_migrate_from_vram(vr->migrate,
567 								       i - pos + incr,
568 								       vram_addr,
569 								       &pagemap_addr[pos]);
570 				} else {
571 					vm_dbg(&xe->drm,
572 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
573 					       (u64)pagemap_addr[pos].addr, vram_addr,
574 					       i - pos + incr);
575 					__fence = xe_migrate_to_vram(vr->migrate,
576 								     i - pos + incr,
577 								     &pagemap_addr[pos],
578 								     vram_addr);
579 				}
580 				if (IS_ERR(__fence)) {
581 					err = PTR_ERR(__fence);
582 					goto err_out;
583 				}
584 
585 				dma_fence_put(fence);
586 				fence = __fence;
587 			}
588 
589 			/* Setup physical address of next device page */
590 			if (pagemap_addr[i].addr && spage) {
591 				vram_addr = __vram_addr;
592 				pos = i;
593 			} else {
594 				vram_addr = XE_VRAM_ADDR_INVALID;
595 			}
596 
597 			/* Extra mismatched device page, copy it */
598 			if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
599 				xe_svm_copy_kb_stats_incr(gt, dir,
600 							  (PAGE_SIZE / SZ_1K));
601 				if (sram) {
602 					vm_dbg(&xe->drm,
603 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
604 					       vram_addr, (u64)pagemap_addr[pos].addr, 1);
605 					__fence = xe_migrate_from_vram(vr->migrate, 1,
606 								       vram_addr,
607 								       &pagemap_addr[pos]);
608 				} else {
609 					vm_dbg(&xe->drm,
610 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
611 					       (u64)pagemap_addr[pos].addr, vram_addr, 1);
612 					__fence = xe_migrate_to_vram(vr->migrate, 1,
613 								     &pagemap_addr[pos],
614 								     vram_addr);
615 				}
616 				if (IS_ERR(__fence)) {
617 					err = PTR_ERR(__fence);
618 					goto err_out;
619 				}
620 
621 				dma_fence_put(fence);
622 				fence = __fence;
623 			}
624 		}
625 	}
626 
627 err_out:
628 	/* Wait for all copies to complete */
629 	if (fence) {
630 		dma_fence_wait(fence, false);
631 		dma_fence_put(fence);
632 	}
633 
634 	/*
635 	 * XXX: We can't derive the GT here (or anywhere in this functions, but
636 	 * compute always uses the primary GT so accumlate stats on the likely
637 	 * GT of the fault.
638 	 */
639 	if (gt)
640 		xe_svm_copy_us_stats_incr(gt, dir, npages, start);
641 
642 	return err;
643 #undef XE_MIGRATE_CHUNK_SIZE
644 #undef XE_VRAM_ADDR_INVALID
645 }
646 
647 static int xe_svm_copy_to_devmem(struct page **pages,
648 				 struct drm_pagemap_addr *pagemap_addr,
649 				 unsigned long npages)
650 {
651 	return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
652 }
653 
654 static int xe_svm_copy_to_ram(struct page **pages,
655 			      struct drm_pagemap_addr *pagemap_addr,
656 			      unsigned long npages)
657 {
658 	return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
659 }
660 
661 static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
662 {
663 	return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
664 }
665 
666 static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
667 {
668 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
669 	struct xe_device *xe = xe_bo_device(bo);
670 
671 	xe_bo_put_async(bo);
672 	xe_pm_runtime_put(xe);
673 }
674 
675 static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
676 {
677 	return PHYS_PFN(offset + vr->hpa_base);
678 }
679 
680 static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
681 {
682 	return &vram->ttm.mm;
683 }
684 
685 static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
686 				      unsigned long npages, unsigned long *pfn)
687 {
688 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
689 	struct ttm_resource *res = bo->ttm.resource;
690 	struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks;
691 	struct drm_buddy_block *block;
692 	int j = 0;
693 
694 	list_for_each_entry(block, blocks, link) {
695 		struct xe_vram_region *vr = block->private;
696 		struct drm_buddy *buddy = vram_to_buddy(vr);
697 		u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
698 		int i;
699 
700 		for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
701 			pfn[j++] = block_pfn + i;
702 	}
703 
704 	return 0;
705 }
706 
707 static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
708 	.devmem_release = xe_svm_devmem_release,
709 	.populate_devmem_pfn = xe_svm_populate_devmem_pfn,
710 	.copy_to_devmem = xe_svm_copy_to_devmem,
711 	.copy_to_ram = xe_svm_copy_to_ram,
712 };
713 
714 #endif
715 
716 static const struct drm_gpusvm_ops gpusvm_ops = {
717 	.range_alloc = xe_svm_range_alloc,
718 	.range_free = xe_svm_range_free,
719 	.invalidate = xe_svm_invalidate,
720 };
721 
722 static const unsigned long fault_chunk_sizes[] = {
723 	SZ_2M,
724 	SZ_64K,
725 	SZ_4K,
726 };
727 
728 /**
729  * xe_svm_init() - SVM initialize
730  * @vm: The VM.
731  *
732  * Initialize SVM state which is embedded within the VM.
733  *
734  * Return: 0 on success, negative error code on error.
735  */
736 int xe_svm_init(struct xe_vm *vm)
737 {
738 	int err;
739 
740 	if (vm->flags & XE_VM_FLAG_FAULT_MODE) {
741 		spin_lock_init(&vm->svm.garbage_collector.lock);
742 		INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
743 		INIT_WORK(&vm->svm.garbage_collector.work,
744 			  xe_svm_garbage_collector_work_func);
745 
746 		err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
747 				      current->mm, xe_svm_devm_owner(vm->xe), 0,
748 				      vm->size,
749 				      xe_modparam.svm_notifier_size * SZ_1M,
750 				      &gpusvm_ops, fault_chunk_sizes,
751 				      ARRAY_SIZE(fault_chunk_sizes));
752 		drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
753 	} else {
754 		err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)",
755 				      &vm->xe->drm, NULL, NULL, 0, 0, 0, NULL,
756 				      NULL, 0);
757 	}
758 
759 	return err;
760 }
761 
762 /**
763  * xe_svm_close() - SVM close
764  * @vm: The VM.
765  *
766  * Close SVM state (i.e., stop and flush all SVM actions).
767  */
768 void xe_svm_close(struct xe_vm *vm)
769 {
770 	xe_assert(vm->xe, xe_vm_is_closed(vm));
771 	flush_work(&vm->svm.garbage_collector.work);
772 }
773 
774 /**
775  * xe_svm_fini() - SVM finalize
776  * @vm: The VM.
777  *
778  * Finalize SVM state which is embedded within the VM.
779  */
780 void xe_svm_fini(struct xe_vm *vm)
781 {
782 	xe_assert(vm->xe, xe_vm_is_closed(vm));
783 
784 	drm_gpusvm_fini(&vm->svm.gpusvm);
785 }
786 
787 static bool xe_svm_range_is_valid(struct xe_svm_range *range,
788 				  struct xe_tile *tile,
789 				  bool devmem_only)
790 {
791 	return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
792 					    range->tile_invalidated) &&
793 		(!devmem_only || xe_svm_range_in_vram(range)));
794 }
795 
796 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
797  * @vm: xe_vm pointer
798  * @range: Pointer to the SVM range structure
799  *
800  * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
801  * and migrates them to SMEM
802  */
803 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
804 {
805 	if (xe_svm_range_in_vram(range))
806 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
807 }
808 
809 /**
810  * xe_svm_range_validate() - Check if the SVM range is valid
811  * @vm: xe_vm pointer
812  * @range: Pointer to the SVM range structure
813  * @tile_mask: Mask representing the tiles to be checked
814  * @devmem_preferred : if true range needs to be in devmem
815  *
816  * The xe_svm_range_validate() function checks if a range is
817  * valid and located in the desired memory region.
818  *
819  * Return: true if the range is valid, false otherwise
820  */
821 bool xe_svm_range_validate(struct xe_vm *vm,
822 			   struct xe_svm_range *range,
823 			   u8 tile_mask, bool devmem_preferred)
824 {
825 	bool ret;
826 
827 	xe_svm_notifier_lock(vm);
828 
829 	ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
830 	       (devmem_preferred == range->base.pages.flags.has_devmem_pages);
831 
832 	xe_svm_notifier_unlock(vm);
833 
834 	return ret;
835 }
836 
837 /**
838  * xe_svm_find_vma_start - Find start of CPU VMA
839  * @vm: xe_vm pointer
840  * @start: start address
841  * @end: end address
842  * @vma: Pointer to struct xe_vma
843  *
844  *
845  * This function searches for a cpu vma, within the specified
846  * range [start, end] in the given VM. It adjusts the range based on the
847  * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
848  *
849  * Return: The starting address of the VMA within the range,
850  * or ULONG_MAX if no VMA is found
851  */
852 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
853 {
854 	return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
855 					 max(start, xe_vma_start(vma)),
856 					 min(end, xe_vma_end(vma)));
857 }
858 
859 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
860 static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
861 				      unsigned long start, unsigned long end,
862 				      struct mm_struct *mm,
863 				      unsigned long timeslice_ms)
864 {
865 	struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
866 	struct xe_device *xe = vr->xe;
867 	struct device *dev = xe->drm.dev;
868 	struct drm_buddy_block *block;
869 	struct xe_validation_ctx vctx;
870 	struct list_head *blocks;
871 	struct drm_exec exec;
872 	struct xe_bo *bo;
873 	int err = 0, idx;
874 
875 	if (!drm_dev_enter(&xe->drm, &idx))
876 		return -ENODEV;
877 
878 	xe_pm_runtime_get(xe);
879 
880 	xe_validation_guard(&vctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
881 		bo = xe_bo_create_locked(xe, NULL, NULL, end - start,
882 					 ttm_bo_type_device,
883 					 (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) |
884 					 XE_BO_FLAG_CPU_ADDR_MIRROR, &exec);
885 		drm_exec_retry_on_contention(&exec);
886 		if (IS_ERR(bo)) {
887 			err = PTR_ERR(bo);
888 			xe_validation_retry_on_oom(&vctx, &err);
889 			break;
890 		}
891 
892 		drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
893 					&dpagemap_devmem_ops, dpagemap, end - start);
894 
895 		blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
896 		list_for_each_entry(block, blocks, link)
897 			block->private = vr;
898 
899 		xe_bo_get(bo);
900 
901 		/* Ensure the device has a pm ref while there are device pages active. */
902 		xe_pm_runtime_get_noresume(xe);
903 		err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
904 						    start, end, timeslice_ms,
905 						    xe_svm_devm_owner(xe));
906 		if (err)
907 			xe_svm_devmem_release(&bo->devmem_allocation);
908 		xe_bo_unlock(bo);
909 		xe_bo_put(bo);
910 	}
911 	xe_pm_runtime_put(xe);
912 	drm_dev_exit(idx);
913 
914 	return err;
915 }
916 #endif
917 
918 static bool supports_4K_migration(struct xe_device *xe)
919 {
920 	if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
921 		return false;
922 
923 	return true;
924 }
925 
926 /**
927  * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
928  * @range: SVM range for which migration needs to be decided
929  * @vma: vma which has range
930  * @preferred_region_is_vram: preferred region for range is vram
931  *
932  * Return: True for range needing migration and migration is supported else false
933  */
934 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
935 					bool preferred_region_is_vram)
936 {
937 	struct xe_vm *vm = range_to_vm(&range->base);
938 	u64 range_size = xe_svm_range_size(range);
939 
940 	if (!range->base.pages.flags.migrate_devmem || !preferred_region_is_vram)
941 		return false;
942 
943 	xe_assert(vm->xe, IS_DGFX(vm->xe));
944 
945 	if (xe_svm_range_in_vram(range)) {
946 		drm_info(&vm->xe->drm, "Range is already in VRAM\n");
947 		return false;
948 	}
949 
950 	if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
951 		drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
952 		return false;
953 	}
954 
955 	return true;
956 }
957 
958 #define DECL_SVM_RANGE_COUNT_STATS(elem, stat) \
959 static void xe_svm_range_##elem##_count_stats_incr(struct xe_gt *gt, \
960 						   struct xe_svm_range *range) \
961 { \
962 	switch (xe_svm_range_size(range)) { \
963 	case SZ_4K: \
964 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_COUNT, 1); \
965 		break; \
966 	case SZ_64K: \
967 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_COUNT, 1); \
968 		break; \
969 	case SZ_2M: \
970 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_COUNT, 1); \
971 		break; \
972 	} \
973 } \
974 
975 DECL_SVM_RANGE_COUNT_STATS(fault, PAGEFAULT)
976 DECL_SVM_RANGE_COUNT_STATS(valid_fault, VALID_PAGEFAULT)
977 DECL_SVM_RANGE_COUNT_STATS(migrate, MIGRATE)
978 
979 #define DECL_SVM_RANGE_US_STATS(elem, stat) \
980 static void xe_svm_range_##elem##_us_stats_incr(struct xe_gt *gt, \
981 						struct xe_svm_range *range, \
982 						ktime_t start) \
983 { \
984 	s64 us_delta = xe_svm_stats_ktime_us_delta(start); \
985 \
986 	switch (xe_svm_range_size(range)) { \
987 	case SZ_4K: \
988 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_US, \
989 				 us_delta); \
990 		break; \
991 	case SZ_64K: \
992 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_US, \
993 				 us_delta); \
994 		break; \
995 	case SZ_2M: \
996 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_US, \
997 				 us_delta); \
998 		break; \
999 	} \
1000 } \
1001 
1002 DECL_SVM_RANGE_US_STATS(migrate, MIGRATE)
1003 DECL_SVM_RANGE_US_STATS(get_pages, GET_PAGES)
1004 DECL_SVM_RANGE_US_STATS(bind, BIND)
1005 DECL_SVM_RANGE_US_STATS(fault, PAGEFAULT)
1006 
1007 static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
1008 				     struct xe_gt *gt, u64 fault_addr,
1009 				     bool need_vram)
1010 {
1011 	int devmem_possible = IS_DGFX(vm->xe) &&
1012 		IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
1013 	struct drm_gpusvm_ctx ctx = {
1014 		.read_only = xe_vma_read_only(vma),
1015 		.devmem_possible = devmem_possible,
1016 		.check_pages_threshold = devmem_possible ? SZ_64K : 0,
1017 		.devmem_only = need_vram && devmem_possible,
1018 		.timeslice_ms = need_vram && devmem_possible ?
1019 			vm->xe->atomic_svm_timeslice_ms : 0,
1020 	};
1021 	struct xe_validation_ctx vctx;
1022 	struct drm_exec exec;
1023 	struct xe_svm_range *range;
1024 	struct dma_fence *fence;
1025 	struct drm_pagemap *dpagemap;
1026 	struct xe_tile *tile = gt_to_tile(gt);
1027 	int migrate_try_count = ctx.devmem_only ? 3 : 1;
1028 	ktime_t start = xe_svm_stats_ktime_get(), bind_start, get_pages_start;
1029 	int err;
1030 
1031 	lockdep_assert_held_write(&vm->lock);
1032 	xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
1033 
1034 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
1035 
1036 retry:
1037 	/* Always process UNMAPs first so view SVM ranges is current */
1038 	err = xe_svm_garbage_collector(vm);
1039 	if (err)
1040 		return err;
1041 
1042 	range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
1043 
1044 	if (IS_ERR(range))
1045 		return PTR_ERR(range);
1046 
1047 	xe_svm_range_fault_count_stats_incr(gt, range);
1048 
1049 	if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
1050 		err = -EACCES;
1051 		goto out;
1052 	}
1053 
1054 	if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) {
1055 		xe_svm_range_valid_fault_count_stats_incr(gt, range);
1056 		range_debug(range, "PAGE FAULT - VALID");
1057 		goto out;
1058 	}
1059 
1060 	range_debug(range, "PAGE FAULT");
1061 
1062 	dpagemap = xe_vma_resolve_pagemap(vma, tile);
1063 	if (--migrate_try_count >= 0 &&
1064 	    xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
1065 		ktime_t migrate_start = xe_svm_stats_ktime_get();
1066 
1067 		/* TODO : For multi-device dpagemap will be used to find the
1068 		 * remote tile and remote device. Will need to modify
1069 		 * xe_svm_alloc_vram to use dpagemap for future multi-device
1070 		 * support.
1071 		 */
1072 		xe_svm_range_migrate_count_stats_incr(gt, range);
1073 		err = xe_svm_alloc_vram(tile, range, &ctx);
1074 		xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start);
1075 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
1076 		if (err) {
1077 			if (migrate_try_count || !ctx.devmem_only) {
1078 				drm_dbg(&vm->xe->drm,
1079 					"VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
1080 					vm->usm.asid, ERR_PTR(err));
1081 				goto retry;
1082 			} else {
1083 				drm_err(&vm->xe->drm,
1084 					"VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
1085 					vm->usm.asid, ERR_PTR(err));
1086 				return err;
1087 			}
1088 		}
1089 	}
1090 
1091 	get_pages_start = xe_svm_stats_ktime_get();
1092 
1093 	range_debug(range, "GET PAGES");
1094 	err = xe_svm_range_get_pages(vm, range, &ctx);
1095 	/* Corner where CPU mappings have changed */
1096 	if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
1097 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
1098 		if (migrate_try_count > 0 || !ctx.devmem_only) {
1099 			drm_dbg(&vm->xe->drm,
1100 				"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
1101 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1102 			range_debug(range, "PAGE FAULT - RETRY PAGES");
1103 			goto retry;
1104 		} else {
1105 			drm_err(&vm->xe->drm,
1106 				"Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
1107 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1108 		}
1109 	}
1110 	if (err) {
1111 		range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
1112 		goto out;
1113 	}
1114 
1115 	xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
1116 	range_debug(range, "PAGE FAULT - BIND");
1117 
1118 	bind_start = xe_svm_stats_ktime_get();
1119 	xe_validation_guard(&vctx, &vm->xe->val, &exec, (struct xe_val_flags) {}, err) {
1120 		err = xe_vm_drm_exec_lock(vm, &exec);
1121 		drm_exec_retry_on_contention(&exec);
1122 
1123 		xe_vm_set_validation_exec(vm, &exec);
1124 		fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
1125 		xe_vm_set_validation_exec(vm, NULL);
1126 		if (IS_ERR(fence)) {
1127 			drm_exec_retry_on_contention(&exec);
1128 			err = PTR_ERR(fence);
1129 			xe_validation_retry_on_oom(&vctx, &err);
1130 			xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1131 			break;
1132 		}
1133 	}
1134 	if (err)
1135 		goto err_out;
1136 
1137 	dma_fence_wait(fence, false);
1138 	dma_fence_put(fence);
1139 	xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1140 
1141 out:
1142 	xe_svm_range_fault_us_stats_incr(gt, range, start);
1143 	return 0;
1144 
1145 err_out:
1146 	if (err == -EAGAIN) {
1147 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
1148 		range_debug(range, "PAGE FAULT - RETRY BIND");
1149 		goto retry;
1150 	}
1151 
1152 	return err;
1153 }
1154 
1155 /**
1156  * xe_svm_handle_pagefault() - SVM handle page fault
1157  * @vm: The VM.
1158  * @vma: The CPU address mirror VMA.
1159  * @gt: The gt upon the fault occurred.
1160  * @fault_addr: The GPU fault address.
1161  * @atomic: The fault atomic access bit.
1162  *
1163  * Create GPU bindings for a SVM page fault. Optionally migrate to device
1164  * memory.
1165  *
1166  * Return: 0 on success, negative error code on error.
1167  */
1168 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
1169 			    struct xe_gt *gt, u64 fault_addr,
1170 			    bool atomic)
1171 {
1172 	int need_vram, ret;
1173 retry:
1174 	need_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
1175 	if (need_vram < 0)
1176 		return need_vram;
1177 
1178 	ret =  __xe_svm_handle_pagefault(vm, vma, gt, fault_addr,
1179 					 need_vram ? true : false);
1180 	if (ret == -EAGAIN) {
1181 		/*
1182 		 * Retry once on -EAGAIN to re-lookup the VMA, as the original VMA
1183 		 * may have been split by xe_svm_range_set_default_attr.
1184 		 */
1185 		vma = xe_vm_find_vma_by_addr(vm, fault_addr);
1186 		if (!vma)
1187 			return -EINVAL;
1188 
1189 		goto retry;
1190 	}
1191 	return ret;
1192 }
1193 
1194 /**
1195  * xe_svm_has_mapping() - SVM has mappings
1196  * @vm: The VM.
1197  * @start: Start address.
1198  * @end: End address.
1199  *
1200  * Check if an address range has SVM mappings.
1201  *
1202  * Return: True if address range has a SVM mapping, False otherwise
1203  */
1204 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
1205 {
1206 	return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
1207 }
1208 
1209 /**
1210  * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
1211  * @vm: The VM
1212  * @start: start addr
1213  * @end: end addr
1214  *
1215  * This function UNMAPS svm ranges if start or end address are inside them.
1216  */
1217 void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
1218 {
1219 	struct drm_gpusvm_notifier *notifier, *next;
1220 
1221 	lockdep_assert_held_write(&vm->lock);
1222 
1223 	drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
1224 		struct drm_gpusvm_range *range, *__next;
1225 
1226 		drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
1227 			if (start > drm_gpusvm_range_start(range) ||
1228 			    end < drm_gpusvm_range_end(range)) {
1229 				if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
1230 					drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
1231 				drm_gpusvm_range_get(range);
1232 				__xe_svm_garbage_collector(vm, to_xe_range(range));
1233 				if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
1234 					spin_lock(&vm->svm.garbage_collector.lock);
1235 					list_del(&to_xe_range(range)->garbage_collector_link);
1236 					spin_unlock(&vm->svm.garbage_collector.lock);
1237 				}
1238 				drm_gpusvm_range_put(range);
1239 			}
1240 		}
1241 	}
1242 }
1243 
1244 /**
1245  * xe_svm_bo_evict() - SVM evict BO to system memory
1246  * @bo: BO to evict
1247  *
1248  * SVM evict BO to system memory. GPU SVM layer ensures all device pages
1249  * are evicted before returning.
1250  *
1251  * Return: 0 on success standard error code otherwise
1252  */
1253 int xe_svm_bo_evict(struct xe_bo *bo)
1254 {
1255 	return drm_pagemap_evict_to_ram(&bo->devmem_allocation);
1256 }
1257 
1258 /**
1259  * xe_svm_range_find_or_insert- Find or insert GPU SVM range
1260  * @vm: xe_vm pointer
1261  * @addr: address for which range needs to be found/inserted
1262  * @vma:  Pointer to struct xe_vma which mirrors CPU
1263  * @ctx: GPU SVM context
1264  *
1265  * This function finds or inserts a newly allocated a SVM range based on the
1266  * address.
1267  *
1268  * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
1269  */
1270 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
1271 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
1272 {
1273 	struct drm_gpusvm_range *r;
1274 
1275 	r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
1276 					    xe_vma_start(vma), xe_vma_end(vma), ctx);
1277 	if (IS_ERR(r))
1278 		return ERR_CAST(r);
1279 
1280 	return to_xe_range(r);
1281 }
1282 
1283 /**
1284  * xe_svm_range_get_pages() - Get pages for a SVM range
1285  * @vm: Pointer to the struct xe_vm
1286  * @range: Pointer to the xe SVM range structure
1287  * @ctx: GPU SVM context
1288  *
1289  * This function gets pages for a SVM range and ensures they are mapped for
1290  * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
1291  *
1292  * Return: 0 on success, negative error code on failure.
1293  */
1294 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
1295 			   struct drm_gpusvm_ctx *ctx)
1296 {
1297 	int err = 0;
1298 
1299 	err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
1300 	if (err == -EOPNOTSUPP) {
1301 		range_debug(range, "PAGE FAULT - EVICT PAGES");
1302 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
1303 	}
1304 
1305 	return err;
1306 }
1307 
1308 /**
1309  * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
1310  * @vm: Pointer to the xe_vm structure
1311  * @start: Start of the input range
1312  * @end: End of the input range
1313  *
1314  * This function removes the page table entries (PTEs) associated
1315  * with the svm ranges within the given input start and end
1316  *
1317  * Return: tile_mask for which gt's need to be tlb invalidated.
1318  */
1319 u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
1320 {
1321 	struct drm_gpusvm_notifier *notifier;
1322 	struct xe_svm_range *range;
1323 	u64 adj_start, adj_end;
1324 	struct xe_tile *tile;
1325 	u8 tile_mask = 0;
1326 	u8 id;
1327 
1328 	lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
1329 		       lockdep_is_held_type(&vm->lock, 0));
1330 
1331 	drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
1332 		struct drm_gpusvm_range *r = NULL;
1333 
1334 		adj_start = max(start, drm_gpusvm_notifier_start(notifier));
1335 		adj_end = min(end, drm_gpusvm_notifier_end(notifier));
1336 		drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
1337 			range = to_xe_range(r);
1338 			for_each_tile(tile, vm->xe, id) {
1339 				if (xe_pt_zap_ptes_range(tile, vm, range)) {
1340 					tile_mask |= BIT(id);
1341 					/*
1342 					 * WRITE_ONCE pairs with READ_ONCE in
1343 					 * xe_vm_has_valid_gpu_mapping().
1344 					 * Must not fail after setting
1345 					 * tile_invalidated and before
1346 					 * TLB invalidation.
1347 					 */
1348 					WRITE_ONCE(range->tile_invalidated,
1349 						   range->tile_invalidated | BIT(id));
1350 				}
1351 			}
1352 		}
1353 	}
1354 
1355 	return tile_mask;
1356 }
1357 
1358 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
1359 
1360 static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile)
1361 {
1362 	return &tile->mem.vram->dpagemap;
1363 }
1364 
1365 /**
1366  * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
1367  * @vma: Pointer to the xe_vma structure containing memory attributes
1368  * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
1369  *
1370  * This function determines the correct DRM pagemap to use for a given VMA.
1371  * It first checks if a valid devmem_fd is provided in the VMA's preferred
1372  * location. If the devmem_fd is negative, it returns NULL, indicating no
1373  * pagemap is available and smem to be used as preferred location.
1374  * If the devmem_fd is equal to the default faulting
1375  * GT identifier, it returns the VRAM pagemap associated with the tile.
1376  *
1377  * Future support for multi-device configurations may use drm_pagemap_from_fd()
1378  * to resolve pagemaps from arbitrary file descriptors.
1379  *
1380  * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
1381  */
1382 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1383 {
1384 	s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
1385 
1386 	if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
1387 		return NULL;
1388 
1389 	if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
1390 		return IS_DGFX(tile_to_xe(tile)) ? tile_local_pagemap(tile) : NULL;
1391 
1392 	/* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
1393 	return NULL;
1394 }
1395 
1396 /**
1397  * xe_svm_alloc_vram()- Allocate device memory pages for range,
1398  * migrating existing data.
1399  * @tile: tile to allocate vram from
1400  * @range: SVM range
1401  * @ctx: DRM GPU SVM context
1402  *
1403  * Return: 0 on success, error code on failure.
1404  */
1405 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
1406 		      const struct drm_gpusvm_ctx *ctx)
1407 {
1408 	struct drm_pagemap *dpagemap;
1409 
1410 	xe_assert(tile_to_xe(tile), range->base.pages.flags.migrate_devmem);
1411 	range_debug(range, "ALLOCATE VRAM");
1412 
1413 	dpagemap = tile_local_pagemap(tile);
1414 	return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
1415 				       xe_svm_range_end(range),
1416 				       range->base.gpusvm->mm,
1417 				       ctx->timeslice_ms);
1418 }
1419 
1420 static struct drm_pagemap_addr
1421 xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
1422 			  struct device *dev,
1423 			  struct page *page,
1424 			  unsigned int order,
1425 			  enum dma_data_direction dir)
1426 {
1427 	struct device *pgmap_dev = dpagemap->dev;
1428 	enum drm_interconnect_protocol prot;
1429 	dma_addr_t addr;
1430 
1431 	if (pgmap_dev == dev) {
1432 		addr = xe_vram_region_page_to_dpa(page_to_vr(page), page);
1433 		prot = XE_INTERCONNECT_VRAM;
1434 	} else {
1435 		addr = DMA_MAPPING_ERROR;
1436 		prot = 0;
1437 	}
1438 
1439 	return drm_pagemap_addr_encode(addr, prot, order, dir);
1440 }
1441 
1442 static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
1443 	.device_map = xe_drm_pagemap_device_map,
1444 	.populate_mm = xe_drm_pagemap_populate_mm,
1445 };
1446 
1447 /**
1448  * xe_devm_add: Remap and provide memmap backing for device memory
1449  * @tile: tile that the memory region belongs to
1450  * @vr: vram memory region to remap
1451  *
1452  * This remap device memory to host physical address space and create
1453  * struct page to back device memory
1454  *
1455  * Return: 0 on success standard error code otherwise
1456  */
1457 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1458 {
1459 	struct xe_device *xe = tile_to_xe(tile);
1460 	struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
1461 	struct resource *res;
1462 	void *addr;
1463 	int ret;
1464 
1465 	res = devm_request_free_mem_region(dev, &iomem_resource,
1466 					   vr->usable_size);
1467 	if (IS_ERR(res)) {
1468 		ret = PTR_ERR(res);
1469 		return ret;
1470 	}
1471 
1472 	vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
1473 	vr->pagemap.range.start = res->start;
1474 	vr->pagemap.range.end = res->end;
1475 	vr->pagemap.nr_range = 1;
1476 	vr->pagemap.ops = drm_pagemap_pagemap_ops_get();
1477 	vr->pagemap.owner = xe_svm_devm_owner(xe);
1478 	addr = devm_memremap_pages(dev, &vr->pagemap);
1479 
1480 	vr->dpagemap.dev = dev;
1481 	vr->dpagemap.ops = &xe_drm_pagemap_ops;
1482 
1483 	if (IS_ERR(addr)) {
1484 		devm_release_mem_region(dev, res->start, resource_size(res));
1485 		ret = PTR_ERR(addr);
1486 		drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
1487 			tile->id, ERR_PTR(ret));
1488 		return ret;
1489 	}
1490 	vr->hpa_base = res->start;
1491 
1492 	drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
1493 		tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
1494 	return 0;
1495 }
1496 #else
1497 int xe_svm_alloc_vram(struct xe_tile *tile,
1498 		      struct xe_svm_range *range,
1499 		      const struct drm_gpusvm_ctx *ctx)
1500 {
1501 	return -EOPNOTSUPP;
1502 }
1503 
1504 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1505 {
1506 	return 0;
1507 }
1508 
1509 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1510 {
1511 	return NULL;
1512 }
1513 #endif
1514 
1515 /**
1516  * xe_svm_flush() - SVM flush
1517  * @vm: The VM.
1518  *
1519  * Flush all SVM actions.
1520  */
1521 void xe_svm_flush(struct xe_vm *vm)
1522 {
1523 	if (xe_vm_in_fault_mode(vm))
1524 		flush_work(&vm->svm.garbage_collector.work);
1525 }
1526