xref: /linux/drivers/gpu/drm/xe/xe_svm.c (revision 7c0c19c076ffe84b8bcd5f927eb47452837f2c99)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <linux/pci-p2pdma.h>
7 
8 #include <drm/drm_drv.h>
9 #include <drm/drm_managed.h>
10 #include <drm/drm_pagemap.h>
11 #include <drm/drm_pagemap_util.h>
12 
13 #include "xe_bo.h"
14 #include "xe_exec_queue_types.h"
15 #include "xe_gt_stats.h"
16 #include "xe_migrate.h"
17 #include "xe_module.h"
18 #include "xe_pm.h"
19 #include "xe_pt.h"
20 #include "xe_svm.h"
21 #include "xe_tile.h"
22 #include "xe_ttm_vram_mgr.h"
23 #include "xe_vm.h"
24 #include "xe_vm_types.h"
25 #include "xe_vram_types.h"
26 
27 /* Identifies subclasses of struct drm_pagemap_peer */
28 #define XE_PEER_PAGEMAP ((void *)0ul)
29 #define XE_PEER_VM ((void *)1ul)
30 
31 /**
32  * DOC: drm_pagemap reference-counting in xe:
33  *
34  * In addition to the drm_pagemap internal reference counting by its zone
35  * device data, the xe driver holds the following long-time references:
36  *
37  * - struct xe_pagemap:
38  *	The xe_pagemap struct derives from struct drm_pagemap and uses its
39  *	reference count.
40  * - SVM-enabled VMs:
41  *	SVM-enabled VMs look up and keeps a reference to all xe_pagemaps on
42  *	the same device.
43  * - VMAs:
44  *	vmas keep a reference on the drm_pagemap indicated by a gpu_madvise()
45  *	call.
46  *
47  * In addition, all drm_pagemap or xe_pagemap pointers where lifetime cannot
48  * be guaranteed by a vma reference under the vm lock should keep a reference.
49  * That includes the range->pages.dpagemap pointer.
50  */
51 
52 static int xe_svm_get_pagemaps(struct xe_vm *vm);
53 
54 void *xe_svm_private_page_owner(struct xe_vm *vm, bool force_smem)
55 {
56 	return force_smem ? NULL : vm->svm.peer.owner;
57 }
58 
59 static bool xe_svm_range_in_vram(struct xe_svm_range *range)
60 {
61 	/*
62 	 * Advisory only check whether the range is currently backed by VRAM
63 	 * memory.
64 	 */
65 
66 	struct drm_gpusvm_pages_flags flags = {
67 		/* Pairs with WRITE_ONCE in drm_gpusvm.c */
68 		.__flags = READ_ONCE(range->base.pages.flags.__flags),
69 	};
70 
71 	return flags.has_devmem_pages;
72 }
73 
74 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
75 {
76 	/* Not reliable without notifier lock */
77 	return xe_svm_range_in_vram(range) && range->tile_present;
78 }
79 
80 static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
81 {
82 	return container_of(gpusvm, struct xe_vm, svm.gpusvm);
83 }
84 
85 static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
86 {
87 	return gpusvm_to_vm(r->gpusvm);
88 }
89 
90 #define range_debug(r__, operation__)					\
91 	vm_dbg(&range_to_vm(&(r__)->base)->xe->drm,			\
92 	       "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
93 	       "start=0x%014lx, end=0x%014lx, size=%lu",		\
94 	       (operation__), range_to_vm(&(r__)->base)->usm.asid,	\
95 	       (r__)->base.gpusvm,					\
96 	       xe_svm_range_in_vram((r__)) ? 1 : 0,			\
97 	       xe_svm_range_has_vram_binding((r__)) ? 1 : 0,		\
98 	       (r__)->base.pages.notifier_seq,				\
99 	       xe_svm_range_start((r__)), xe_svm_range_end((r__)),	\
100 	       xe_svm_range_size((r__)))
101 
102 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
103 {
104 	range_debug(range, operation);
105 }
106 
107 static struct drm_gpusvm_range *
108 xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
109 {
110 	struct xe_svm_range *range;
111 
112 	range = kzalloc(sizeof(*range), GFP_KERNEL);
113 	if (!range)
114 		return NULL;
115 
116 	INIT_LIST_HEAD(&range->garbage_collector_link);
117 	xe_vm_get(gpusvm_to_vm(gpusvm));
118 
119 	return &range->base;
120 }
121 
122 static void xe_svm_range_free(struct drm_gpusvm_range *range)
123 {
124 	xe_vm_put(range_to_vm(range));
125 	kfree(range);
126 }
127 
128 static void
129 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
130 				   const struct mmu_notifier_range *mmu_range)
131 {
132 	struct xe_device *xe = vm->xe;
133 
134 	range_debug(range, "GARBAGE COLLECTOR ADD");
135 
136 	drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
137 
138 	spin_lock(&vm->svm.garbage_collector.lock);
139 	if (list_empty(&range->garbage_collector_link))
140 		list_add_tail(&range->garbage_collector_link,
141 			      &vm->svm.garbage_collector.range_list);
142 	spin_unlock(&vm->svm.garbage_collector.lock);
143 
144 	queue_work(xe->usm.pf_wq, &vm->svm.garbage_collector.work);
145 }
146 
147 static void xe_svm_tlb_inval_count_stats_incr(struct xe_gt *gt)
148 {
149 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT, 1);
150 }
151 
152 static u8
153 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
154 				  const struct mmu_notifier_range *mmu_range,
155 				  u64 *adj_start, u64 *adj_end)
156 {
157 	struct xe_svm_range *range = to_xe_range(r);
158 	struct xe_device *xe = vm->xe;
159 	struct xe_tile *tile;
160 	u8 tile_mask = 0;
161 	u8 id;
162 
163 	xe_svm_assert_in_notifier(vm);
164 
165 	range_debug(range, "NOTIFIER");
166 
167 	/* Skip if already unmapped or if no binding exist */
168 	if (range->base.pages.flags.unmapped || !range->tile_present)
169 		return 0;
170 
171 	range_debug(range, "NOTIFIER - EXECUTE");
172 
173 	/* Adjust invalidation to range boundaries */
174 	*adj_start = min(xe_svm_range_start(range), mmu_range->start);
175 	*adj_end = max(xe_svm_range_end(range), mmu_range->end);
176 
177 	/*
178 	 * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the
179 	 * invalidation code can't correctly cope with sparse ranges or
180 	 * invalidations spanning multiple ranges.
181 	 */
182 	for_each_tile(tile, xe, id)
183 		if (xe_pt_zap_ptes_range(tile, vm, range)) {
184 			/*
185 			 * WRITE_ONCE pairs with READ_ONCE in
186 			 * xe_vm_has_valid_gpu_mapping()
187 			 */
188 			WRITE_ONCE(range->tile_invalidated,
189 				   range->tile_invalidated | BIT(id));
190 
191 			if (!(tile_mask & BIT(id))) {
192 				xe_svm_tlb_inval_count_stats_incr(tile->primary_gt);
193 				if (tile->media_gt)
194 					xe_svm_tlb_inval_count_stats_incr(tile->media_gt);
195 				tile_mask |= BIT(id);
196 			}
197 		}
198 
199 	return tile_mask;
200 }
201 
202 static void
203 xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
204 				const struct mmu_notifier_range *mmu_range)
205 {
206 	struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
207 
208 	xe_svm_assert_in_notifier(vm);
209 
210 	drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
211 	if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
212 		xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
213 						   mmu_range);
214 }
215 
216 static void xe_svm_tlb_inval_us_stats_incr(struct xe_gt *gt, ktime_t start)
217 {
218 	s64 us_delta = xe_gt_stats_ktime_us_delta(start);
219 
220 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_US, us_delta);
221 }
222 
223 static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
224 			      struct drm_gpusvm_notifier *notifier,
225 			      const struct mmu_notifier_range *mmu_range)
226 {
227 	struct xe_vm *vm = gpusvm_to_vm(gpusvm);
228 	struct xe_device *xe = vm->xe;
229 	struct drm_gpusvm_range *r, *first;
230 	struct xe_tile *tile;
231 	ktime_t start = xe_gt_stats_ktime_get();
232 	u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
233 	u8 tile_mask = 0, id;
234 	long err;
235 
236 	xe_svm_assert_in_notifier(vm);
237 
238 	vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
239 	       "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
240 	       vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
241 	       mmu_range->start, mmu_range->end, mmu_range->event);
242 
243 	/* Adjust invalidation to notifier boundaries */
244 	adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
245 	adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
246 
247 	first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
248 	if (!first)
249 		return;
250 
251 	/*
252 	 * PTs may be getting destroyed so not safe to touch these but PT should
253 	 * be invalidated at this point in time. Regardless we still need to
254 	 * ensure any dma mappings are unmapped in the here.
255 	 */
256 	if (xe_vm_is_closed(vm))
257 		goto range_notifier_event_end;
258 
259 	/*
260 	 * XXX: Less than ideal to always wait on VM's resv slots if an
261 	 * invalidation is not required. Could walk range list twice to figure
262 	 * out if an invalidations is need, but also not ideal.
263 	 */
264 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
265 				    DMA_RESV_USAGE_BOOKKEEP,
266 				    false, MAX_SCHEDULE_TIMEOUT);
267 	XE_WARN_ON(err <= 0);
268 
269 	r = first;
270 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
271 		tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range,
272 							       &adj_start,
273 							       &adj_end);
274 	if (!tile_mask)
275 		goto range_notifier_event_end;
276 
277 	xe_device_wmb(xe);
278 
279 	err = xe_vm_range_tilemask_tlb_inval(vm, adj_start, adj_end, tile_mask);
280 	WARN_ON_ONCE(err);
281 
282 range_notifier_event_end:
283 	r = first;
284 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
285 		xe_svm_range_notifier_event_end(vm, r, mmu_range);
286 	for_each_tile(tile, xe, id) {
287 		if (tile_mask & BIT(id)) {
288 			xe_svm_tlb_inval_us_stats_incr(tile->primary_gt, start);
289 			if (tile->media_gt)
290 				xe_svm_tlb_inval_us_stats_incr(tile->media_gt, start);
291 		}
292 	}
293 }
294 
295 static int __xe_svm_garbage_collector(struct xe_vm *vm,
296 				      struct xe_svm_range *range)
297 {
298 	struct dma_fence *fence;
299 
300 	range_debug(range, "GARBAGE COLLECTOR");
301 
302 	xe_vm_lock(vm, false);
303 	fence = xe_vm_range_unbind(vm, range);
304 	xe_vm_unlock(vm);
305 	if (IS_ERR(fence))
306 		return PTR_ERR(fence);
307 	dma_fence_put(fence);
308 
309 	drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
310 
311 	return 0;
312 }
313 
314 static void xe_vma_set_default_attributes(struct xe_vma *vma)
315 {
316 	struct xe_vma_mem_attr default_attr = {
317 		.preferred_loc.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
318 		.preferred_loc.migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
319 		.pat_index = vma->attr.default_pat_index,
320 		.atomic_access = DRM_XE_ATOMIC_UNDEFINED,
321 	};
322 
323 	xe_vma_mem_attr_copy(&vma->attr, &default_attr);
324 }
325 
326 static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 start, u64 end)
327 {
328 	struct xe_vma *vma;
329 	bool has_default_attr;
330 	int err;
331 
332 	vma = xe_vm_find_vma_by_addr(vm, start);
333 	if (!vma)
334 		return -EINVAL;
335 
336 	if (!(vma->gpuva.flags & XE_VMA_MADV_AUTORESET)) {
337 		drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n");
338 		return 0;
339 	}
340 
341 	vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx",
342 	       xe_vma_start(vma), xe_vma_end(vma));
343 
344 	has_default_attr = xe_vma_has_default_mem_attrs(vma);
345 
346 	if (has_default_attr) {
347 		start = xe_vma_start(vma);
348 		end = xe_vma_end(vma);
349 	} else if (xe_vma_start(vma) == start && xe_vma_end(vma) == end) {
350 		xe_vma_set_default_attributes(vma);
351 	}
352 
353 	xe_vm_find_cpu_addr_mirror_vma_range(vm, &start, &end);
354 
355 	if (xe_vma_start(vma) == start && xe_vma_end(vma) == end && has_default_attr)
356 		return 0;
357 
358 	vm_dbg(&vm->xe->drm, "New VMA start=0x%016llx, vma_end=0x%016llx",  start, end);
359 
360 	err = xe_vm_alloc_cpu_addr_mirror_vma(vm, start, end - start);
361 	if (err) {
362 		drm_warn(&vm->xe->drm, "New VMA MAP failed: %pe\n", ERR_PTR(err));
363 		xe_vm_kill(vm, true);
364 		return err;
365 	}
366 
367 	/*
368 	 * On call from xe_svm_handle_pagefault original VMA might be changed
369 	 * signal this to lookup for VMA again.
370 	 */
371 	return -EAGAIN;
372 }
373 
374 static int xe_svm_garbage_collector(struct xe_vm *vm)
375 {
376 	struct xe_svm_range *range;
377 	u64 range_start;
378 	u64 range_end;
379 	int err, ret = 0;
380 
381 	lockdep_assert_held_write(&vm->lock);
382 
383 	if (xe_vm_is_closed_or_banned(vm))
384 		return -ENOENT;
385 
386 	for (;;) {
387 		spin_lock(&vm->svm.garbage_collector.lock);
388 		range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
389 						 typeof(*range),
390 						 garbage_collector_link);
391 		if (!range)
392 			break;
393 
394 		range_start = xe_svm_range_start(range);
395 		range_end = xe_svm_range_end(range);
396 
397 		list_del(&range->garbage_collector_link);
398 		spin_unlock(&vm->svm.garbage_collector.lock);
399 
400 		err = __xe_svm_garbage_collector(vm, range);
401 		if (err) {
402 			drm_warn(&vm->xe->drm,
403 				 "Garbage collection failed: %pe\n",
404 				 ERR_PTR(err));
405 			xe_vm_kill(vm, true);
406 			return err;
407 		}
408 
409 		err = xe_svm_range_set_default_attr(vm, range_start, range_end);
410 		if (err) {
411 			if (err == -EAGAIN)
412 				ret = -EAGAIN;
413 			else
414 				return err;
415 		}
416 	}
417 	spin_unlock(&vm->svm.garbage_collector.lock);
418 
419 	return ret;
420 }
421 
422 static void xe_svm_garbage_collector_work_func(struct work_struct *w)
423 {
424 	struct xe_vm *vm = container_of(w, struct xe_vm,
425 					svm.garbage_collector.work);
426 
427 	down_write(&vm->lock);
428 	xe_svm_garbage_collector(vm);
429 	up_write(&vm->lock);
430 }
431 
432 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
433 
434 static struct xe_vram_region *xe_pagemap_to_vr(struct xe_pagemap *xpagemap)
435 {
436 	return xpagemap->vr;
437 }
438 
439 static struct xe_pagemap *xe_page_to_pagemap(struct page *page)
440 {
441 	return container_of(page_pgmap(page), struct xe_pagemap, pagemap);
442 }
443 
444 static struct xe_vram_region *xe_page_to_vr(struct page *page)
445 {
446 	return xe_pagemap_to_vr(xe_page_to_pagemap(page));
447 }
448 
449 static u64 xe_page_to_dpa(struct page *page)
450 {
451 	struct xe_pagemap *xpagemap = xe_page_to_pagemap(page);
452 	struct xe_vram_region *vr = xe_pagemap_to_vr(xpagemap);
453 	u64 hpa_base = xpagemap->hpa_base;
454 	u64 pfn = page_to_pfn(page);
455 	u64 offset;
456 	u64 dpa;
457 
458 	xe_assert(vr->xe, is_device_private_page(page));
459 	xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= hpa_base);
460 
461 	offset = (pfn << PAGE_SHIFT) - hpa_base;
462 	dpa = vr->dpa_base + offset;
463 
464 	return dpa;
465 }
466 
467 static u64 xe_page_to_pcie(struct page *page)
468 {
469 	struct xe_pagemap *xpagemap = xe_page_to_pagemap(page);
470 	struct xe_vram_region *vr = xe_pagemap_to_vr(xpagemap);
471 
472 	return xe_page_to_dpa(page) - vr->dpa_base + vr->io_start;
473 }
474 
475 enum xe_svm_copy_dir {
476 	XE_SVM_COPY_TO_VRAM,
477 	XE_SVM_COPY_TO_SRAM,
478 };
479 
480 static void xe_svm_copy_kb_stats_incr(struct xe_gt *gt,
481 				      const enum xe_svm_copy_dir dir,
482 				      int kb)
483 {
484 	if (dir == XE_SVM_COPY_TO_VRAM)
485 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_KB, kb);
486 	else
487 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_KB, kb);
488 }
489 
490 static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
491 				      const enum xe_svm_copy_dir dir,
492 				      unsigned long npages,
493 				      ktime_t start)
494 {
495 	s64 us_delta = xe_gt_stats_ktime_us_delta(start);
496 
497 	if (dir == XE_SVM_COPY_TO_VRAM) {
498 		switch (npages) {
499 		case 1:
500 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US,
501 					 us_delta);
502 			break;
503 		case 16:
504 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US,
505 					 us_delta);
506 			break;
507 		case 512:
508 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US,
509 					 us_delta);
510 			break;
511 		}
512 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_US,
513 				 us_delta);
514 	} else {
515 		switch (npages) {
516 		case 1:
517 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_CPU_COPY_US,
518 					 us_delta);
519 			break;
520 		case 16:
521 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_CPU_COPY_US,
522 					 us_delta);
523 			break;
524 		case 512:
525 			xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_CPU_COPY_US,
526 					 us_delta);
527 			break;
528 		}
529 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_US,
530 				 us_delta);
531 	}
532 }
533 
534 static int xe_svm_copy(struct page **pages,
535 		       struct drm_pagemap_addr *pagemap_addr,
536 		       unsigned long npages, const enum xe_svm_copy_dir dir,
537 		       struct dma_fence *pre_migrate_fence)
538 {
539 	struct xe_vram_region *vr = NULL;
540 	struct xe_gt *gt = NULL;
541 	struct xe_device *xe;
542 	struct dma_fence *fence = NULL;
543 	unsigned long i;
544 #define XE_VRAM_ADDR_INVALID	~0x0ull
545 	u64 vram_addr = XE_VRAM_ADDR_INVALID;
546 	int err = 0, pos = 0;
547 	bool sram = dir == XE_SVM_COPY_TO_SRAM;
548 	ktime_t start = xe_gt_stats_ktime_get();
549 
550 	/*
551 	 * This flow is complex: it locates physically contiguous device pages,
552 	 * derives the starting physical address, and performs a single GPU copy
553 	 * to for every 8M chunk in a DMA address array. Both device pages and
554 	 * DMA addresses may be sparsely populated. If either is NULL, a copy is
555 	 * triggered based on the current search state. The last GPU copy is
556 	 * waited on to ensure all copies are complete.
557 	 */
558 
559 	for (i = 0; i < npages; ++i) {
560 		struct page *spage = pages[i];
561 		struct dma_fence *__fence;
562 		u64 __vram_addr;
563 		bool match = false, chunk, last;
564 
565 #define XE_MIGRATE_CHUNK_SIZE	SZ_8M
566 		chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
567 		last = (i + 1) == npages;
568 
569 		/* No CPU page and no device pages queue'd to copy */
570 		if (!pagemap_addr[i].addr && vram_addr == XE_VRAM_ADDR_INVALID)
571 			continue;
572 
573 		if (!vr && spage) {
574 			vr = xe_page_to_vr(spage);
575 			gt = xe_migrate_exec_queue(vr->migrate)->gt;
576 			xe = vr->xe;
577 		}
578 		XE_WARN_ON(spage && xe_page_to_vr(spage) != vr);
579 
580 		/*
581 		 * CPU page and device page valid, capture physical address on
582 		 * first device page, check if physical contiguous on subsequent
583 		 * device pages.
584 		 */
585 		if (pagemap_addr[i].addr && spage) {
586 			__vram_addr = xe_page_to_dpa(spage);
587 			if (vram_addr == XE_VRAM_ADDR_INVALID) {
588 				vram_addr = __vram_addr;
589 				pos = i;
590 			}
591 
592 			match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
593 			/* Expected with contiguous memory */
594 			xe_assert(vr->xe, match);
595 
596 			if (pagemap_addr[i].order) {
597 				i += NR_PAGES(pagemap_addr[i].order) - 1;
598 				chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
599 				last = (i + 1) == npages;
600 			}
601 		}
602 
603 		/*
604 		 * Mismatched physical address, 8M copy chunk, or last page -
605 		 * trigger a copy.
606 		 */
607 		if (!match || chunk || last) {
608 			/*
609 			 * Extra page for first copy if last page and matching
610 			 * physical address.
611 			 */
612 			int incr = (match && last) ? 1 : 0;
613 
614 			if (vram_addr != XE_VRAM_ADDR_INVALID) {
615 				xe_svm_copy_kb_stats_incr(gt, dir,
616 							  (i - pos + incr) *
617 							  (PAGE_SIZE / SZ_1K));
618 				if (sram) {
619 					vm_dbg(&xe->drm,
620 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
621 					       vram_addr,
622 					       (u64)pagemap_addr[pos].addr, i - pos + incr);
623 					__fence = xe_migrate_from_vram(vr->migrate,
624 								       i - pos + incr,
625 								       vram_addr,
626 								       &pagemap_addr[pos],
627 								       pre_migrate_fence);
628 				} else {
629 					vm_dbg(&xe->drm,
630 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
631 					       (u64)pagemap_addr[pos].addr, vram_addr,
632 					       i - pos + incr);
633 					__fence = xe_migrate_to_vram(vr->migrate,
634 								     i - pos + incr,
635 								     &pagemap_addr[pos],
636 								     vram_addr,
637 								     pre_migrate_fence);
638 				}
639 				if (IS_ERR(__fence)) {
640 					err = PTR_ERR(__fence);
641 					goto err_out;
642 				}
643 				pre_migrate_fence = NULL;
644 				dma_fence_put(fence);
645 				fence = __fence;
646 			}
647 
648 			/* Setup physical address of next device page */
649 			if (pagemap_addr[i].addr && spage) {
650 				vram_addr = __vram_addr;
651 				pos = i;
652 			} else {
653 				vram_addr = XE_VRAM_ADDR_INVALID;
654 			}
655 
656 			/* Extra mismatched device page, copy it */
657 			if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
658 				xe_svm_copy_kb_stats_incr(gt, dir,
659 							  (PAGE_SIZE / SZ_1K));
660 				if (sram) {
661 					vm_dbg(&xe->drm,
662 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
663 					       vram_addr, (u64)pagemap_addr[pos].addr, 1);
664 					__fence = xe_migrate_from_vram(vr->migrate, 1,
665 								       vram_addr,
666 								       &pagemap_addr[pos],
667 								       pre_migrate_fence);
668 				} else {
669 					vm_dbg(&xe->drm,
670 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
671 					       (u64)pagemap_addr[pos].addr, vram_addr, 1);
672 					__fence = xe_migrate_to_vram(vr->migrate, 1,
673 								     &pagemap_addr[pos],
674 								     vram_addr,
675 								     pre_migrate_fence);
676 				}
677 				if (IS_ERR(__fence)) {
678 					err = PTR_ERR(__fence);
679 					goto err_out;
680 				}
681 				pre_migrate_fence = NULL;
682 				dma_fence_put(fence);
683 				fence = __fence;
684 			}
685 		}
686 	}
687 
688 err_out:
689 	/* Wait for all copies to complete */
690 	if (fence) {
691 		dma_fence_wait(fence, false);
692 		dma_fence_put(fence);
693 	}
694 	if (pre_migrate_fence)
695 		dma_fence_wait(pre_migrate_fence, false);
696 
697 	/*
698 	 * XXX: We can't derive the GT here (or anywhere in this functions, but
699 	 * compute always uses the primary GT so accumulate stats on the likely
700 	 * GT of the fault.
701 	 */
702 	if (gt)
703 		xe_svm_copy_us_stats_incr(gt, dir, npages, start);
704 
705 	return err;
706 #undef XE_MIGRATE_CHUNK_SIZE
707 #undef XE_VRAM_ADDR_INVALID
708 }
709 
710 static int xe_svm_copy_to_devmem(struct page **pages,
711 				 struct drm_pagemap_addr *pagemap_addr,
712 				 unsigned long npages,
713 				 struct dma_fence *pre_migrate_fence)
714 {
715 	return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM,
716 			   pre_migrate_fence);
717 }
718 
719 static int xe_svm_copy_to_ram(struct page **pages,
720 			      struct drm_pagemap_addr *pagemap_addr,
721 			      unsigned long npages,
722 			      struct dma_fence *pre_migrate_fence)
723 {
724 	return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM,
725 			   pre_migrate_fence);
726 }
727 
728 static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
729 {
730 	return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
731 }
732 
733 static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
734 {
735 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
736 	struct xe_device *xe = xe_bo_device(bo);
737 
738 	dma_fence_put(devmem_allocation->pre_migrate_fence);
739 	xe_bo_put_async(bo);
740 	xe_pm_runtime_put(xe);
741 }
742 
743 static u64 block_offset_to_pfn(struct drm_pagemap *dpagemap, u64 offset)
744 {
745 	struct xe_pagemap *xpagemap = container_of(dpagemap, typeof(*xpagemap), dpagemap);
746 
747 	return PHYS_PFN(offset + xpagemap->hpa_base);
748 }
749 
750 static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
751 {
752 	return &vram->ttm.mm;
753 }
754 
755 static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
756 				      unsigned long npages, unsigned long *pfn)
757 {
758 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
759 	struct ttm_resource *res = bo->ttm.resource;
760 	struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks;
761 	struct drm_buddy_block *block;
762 	int j = 0;
763 
764 	list_for_each_entry(block, blocks, link) {
765 		struct xe_vram_region *vr = block->private;
766 		struct drm_buddy *buddy = vram_to_buddy(vr);
767 		u64 block_pfn = block_offset_to_pfn(devmem_allocation->dpagemap,
768 						    drm_buddy_block_offset(block));
769 		int i;
770 
771 		for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
772 			pfn[j++] = block_pfn + i;
773 	}
774 
775 	return 0;
776 }
777 
778 static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
779 	.devmem_release = xe_svm_devmem_release,
780 	.populate_devmem_pfn = xe_svm_populate_devmem_pfn,
781 	.copy_to_devmem = xe_svm_copy_to_devmem,
782 	.copy_to_ram = xe_svm_copy_to_ram,
783 };
784 
785 #else
786 static int xe_svm_get_pagemaps(struct xe_vm *vm)
787 {
788 	return 0;
789 }
790 #endif
791 
792 static const struct drm_gpusvm_ops gpusvm_ops = {
793 	.range_alloc = xe_svm_range_alloc,
794 	.range_free = xe_svm_range_free,
795 	.invalidate = xe_svm_invalidate,
796 };
797 
798 static const unsigned long fault_chunk_sizes[] = {
799 	SZ_2M,
800 	SZ_64K,
801 	SZ_4K,
802 };
803 
804 static void xe_pagemap_put(struct xe_pagemap *xpagemap)
805 {
806 	drm_pagemap_put(&xpagemap->dpagemap);
807 }
808 
809 static void xe_svm_put_pagemaps(struct xe_vm *vm)
810 {
811 	struct xe_device *xe = vm->xe;
812 	struct xe_tile *tile;
813 	int id;
814 
815 	for_each_tile(tile, xe, id) {
816 		struct xe_pagemap *xpagemap = vm->svm.pagemaps[id];
817 
818 		if (xpagemap)
819 			xe_pagemap_put(xpagemap);
820 		vm->svm.pagemaps[id] = NULL;
821 	}
822 }
823 
824 static struct device *xe_peer_to_dev(struct drm_pagemap_peer *peer)
825 {
826 	if (peer->private == XE_PEER_PAGEMAP)
827 		return container_of(peer, struct xe_pagemap, peer)->dpagemap.drm->dev;
828 
829 	return container_of(peer, struct xe_vm, svm.peer)->xe->drm.dev;
830 }
831 
832 static bool xe_has_interconnect(struct drm_pagemap_peer *peer1,
833 				struct drm_pagemap_peer *peer2)
834 {
835 	struct device *dev1 = xe_peer_to_dev(peer1);
836 	struct device *dev2 = xe_peer_to_dev(peer2);
837 
838 	if (dev1 == dev2)
839 		return true;
840 
841 	return pci_p2pdma_distance(to_pci_dev(dev1), dev2, true) >= 0;
842 }
843 
844 static DRM_PAGEMAP_OWNER_LIST_DEFINE(xe_owner_list);
845 
846 /**
847  * xe_svm_init() - SVM initialize
848  * @vm: The VM.
849  *
850  * Initialize SVM state which is embedded within the VM.
851  *
852  * Return: 0 on success, negative error code on error.
853  */
854 int xe_svm_init(struct xe_vm *vm)
855 {
856 	int err;
857 
858 	if (vm->flags & XE_VM_FLAG_FAULT_MODE) {
859 		spin_lock_init(&vm->svm.garbage_collector.lock);
860 		INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
861 		INIT_WORK(&vm->svm.garbage_collector.work,
862 			  xe_svm_garbage_collector_work_func);
863 
864 		vm->svm.peer.private = XE_PEER_VM;
865 		err = drm_pagemap_acquire_owner(&vm->svm.peer, &xe_owner_list,
866 						xe_has_interconnect);
867 		if (err)
868 			return err;
869 
870 		err = xe_svm_get_pagemaps(vm);
871 		if (err) {
872 			drm_pagemap_release_owner(&vm->svm.peer);
873 			return err;
874 		}
875 
876 		err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
877 				      current->mm, 0, vm->size,
878 				      xe_modparam.svm_notifier_size * SZ_1M,
879 				      &gpusvm_ops, fault_chunk_sizes,
880 				      ARRAY_SIZE(fault_chunk_sizes));
881 		drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
882 
883 		if (err) {
884 			xe_svm_put_pagemaps(vm);
885 			drm_pagemap_release_owner(&vm->svm.peer);
886 			return err;
887 		}
888 	} else {
889 		err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)",
890 				      &vm->xe->drm, NULL, 0, 0, 0, NULL,
891 				      NULL, 0);
892 	}
893 
894 	return err;
895 }
896 
897 /**
898  * xe_svm_close() - SVM close
899  * @vm: The VM.
900  *
901  * Close SVM state (i.e., stop and flush all SVM actions).
902  */
903 void xe_svm_close(struct xe_vm *vm)
904 {
905 	xe_assert(vm->xe, xe_vm_is_closed(vm));
906 	flush_work(&vm->svm.garbage_collector.work);
907 	xe_svm_put_pagemaps(vm);
908 	drm_pagemap_release_owner(&vm->svm.peer);
909 }
910 
911 /**
912  * xe_svm_fini() - SVM finalize
913  * @vm: The VM.
914  *
915  * Finalize SVM state which is embedded within the VM.
916  */
917 void xe_svm_fini(struct xe_vm *vm)
918 {
919 	xe_assert(vm->xe, xe_vm_is_closed(vm));
920 
921 	drm_gpusvm_fini(&vm->svm.gpusvm);
922 }
923 
924 static bool xe_svm_range_has_pagemap_locked(const struct xe_svm_range *range,
925 					    const struct drm_pagemap *dpagemap)
926 {
927 	return range->base.pages.dpagemap == dpagemap;
928 }
929 
930 static bool xe_svm_range_has_pagemap(struct xe_svm_range *range,
931 				     const struct drm_pagemap *dpagemap)
932 {
933 	struct xe_vm *vm = range_to_vm(&range->base);
934 	bool ret;
935 
936 	xe_svm_notifier_lock(vm);
937 	ret = xe_svm_range_has_pagemap_locked(range, dpagemap);
938 	xe_svm_notifier_unlock(vm);
939 
940 	return ret;
941 }
942 
943 static bool xe_svm_range_is_valid(struct xe_svm_range *range,
944 				  struct xe_tile *tile,
945 				  bool devmem_only,
946 				  const struct drm_pagemap *dpagemap)
947 
948 {
949 	return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
950 					    range->tile_invalidated) &&
951 		(!devmem_only || xe_svm_range_has_pagemap(range, dpagemap)));
952 }
953 
954 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
955  * @vm: xe_vm pointer
956  * @range: Pointer to the SVM range structure
957  *
958  * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
959  * and migrates them to SMEM
960  */
961 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
962 {
963 	if (xe_svm_range_in_vram(range))
964 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
965 }
966 
967 /**
968  * xe_svm_range_validate() - Check if the SVM range is valid
969  * @vm: xe_vm pointer
970  * @range: Pointer to the SVM range structure
971  * @tile_mask: Mask representing the tiles to be checked
972  * @dpagemap: if !%NULL, the range is expected to be present
973  * in device memory identified by this parameter.
974  *
975  * The xe_svm_range_validate() function checks if a range is
976  * valid and located in the desired memory region.
977  *
978  * Return: true if the range is valid, false otherwise
979  */
980 bool xe_svm_range_validate(struct xe_vm *vm,
981 			   struct xe_svm_range *range,
982 			   u8 tile_mask, const struct drm_pagemap *dpagemap)
983 {
984 	bool ret;
985 
986 	xe_svm_notifier_lock(vm);
987 
988 	ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask;
989 	if (dpagemap)
990 		ret = ret && xe_svm_range_has_pagemap_locked(range, dpagemap);
991 	else
992 		ret = ret && !range->base.pages.dpagemap;
993 
994 	xe_svm_notifier_unlock(vm);
995 
996 	return ret;
997 }
998 
999 /**
1000  * xe_svm_find_vma_start - Find start of CPU VMA
1001  * @vm: xe_vm pointer
1002  * @start: start address
1003  * @end: end address
1004  * @vma: Pointer to struct xe_vma
1005  *
1006  *
1007  * This function searches for a cpu vma, within the specified
1008  * range [start, end] in the given VM. It adjusts the range based on the
1009  * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
1010  *
1011  * Return: The starting address of the VMA within the range,
1012  * or ULONG_MAX if no VMA is found
1013  */
1014 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
1015 {
1016 	return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
1017 					 max(start, xe_vma_start(vma)),
1018 					 min(end, xe_vma_end(vma)));
1019 }
1020 
1021 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
1022 static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
1023 				      unsigned long start, unsigned long end,
1024 				      struct mm_struct *mm,
1025 				      unsigned long timeslice_ms)
1026 {
1027 	struct xe_pagemap *xpagemap = container_of(dpagemap, typeof(*xpagemap), dpagemap);
1028 	struct drm_pagemap_migrate_details mdetails = {
1029 		.timeslice_ms = timeslice_ms,
1030 		.source_peer_migrates = 1,
1031 	};
1032 	struct xe_vram_region *vr = xe_pagemap_to_vr(xpagemap);
1033 	struct dma_fence *pre_migrate_fence = NULL;
1034 	struct xe_device *xe = vr->xe;
1035 	struct device *dev = xe->drm.dev;
1036 	struct drm_buddy_block *block;
1037 	struct xe_validation_ctx vctx;
1038 	struct list_head *blocks;
1039 	struct drm_exec exec;
1040 	struct xe_bo *bo;
1041 	int err = 0, idx;
1042 
1043 	if (!drm_dev_enter(&xe->drm, &idx))
1044 		return -ENODEV;
1045 
1046 	xe_pm_runtime_get(xe);
1047 
1048 	xe_validation_guard(&vctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
1049 		bo = xe_bo_create_locked(xe, NULL, NULL, end - start,
1050 					 ttm_bo_type_device,
1051 					 (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) |
1052 					 XE_BO_FLAG_CPU_ADDR_MIRROR, &exec);
1053 		drm_exec_retry_on_contention(&exec);
1054 		if (IS_ERR(bo)) {
1055 			err = PTR_ERR(bo);
1056 			xe_validation_retry_on_oom(&vctx, &err);
1057 			break;
1058 		}
1059 
1060 		/* Ensure that any clearing or async eviction will complete before migration. */
1061 		if (!dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL)) {
1062 			err = dma_resv_get_singleton(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
1063 						     &pre_migrate_fence);
1064 			if (err)
1065 				dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
1066 						      false, MAX_SCHEDULE_TIMEOUT);
1067 			else if (pre_migrate_fence)
1068 				dma_fence_enable_sw_signaling(pre_migrate_fence);
1069 		}
1070 
1071 		drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
1072 					&dpagemap_devmem_ops, dpagemap, end - start,
1073 					pre_migrate_fence);
1074 
1075 		blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
1076 		list_for_each_entry(block, blocks, link)
1077 			block->private = vr;
1078 
1079 		xe_bo_get(bo);
1080 
1081 		/* Ensure the device has a pm ref while there are device pages active. */
1082 		xe_pm_runtime_get_noresume(xe);
1083 		/* Consumes the devmem allocation ref. */
1084 		err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
1085 						    start, end, &mdetails);
1086 		xe_bo_unlock(bo);
1087 		xe_bo_put(bo);
1088 	}
1089 	xe_pm_runtime_put(xe);
1090 	drm_dev_exit(idx);
1091 
1092 	return err;
1093 }
1094 #endif
1095 
1096 static bool supports_4K_migration(struct xe_device *xe)
1097 {
1098 	if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1099 		return false;
1100 
1101 	return true;
1102 }
1103 
1104 /**
1105  * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
1106  * @range: SVM range for which migration needs to be decided
1107  * @vma: vma which has range
1108  * @dpagemap: The preferred struct drm_pagemap to migrate to.
1109  *
1110  * Return: True for range needing migration and migration is supported else false
1111  */
1112 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
1113 					const struct drm_pagemap *dpagemap)
1114 {
1115 	struct xe_vm *vm = range_to_vm(&range->base);
1116 	u64 range_size = xe_svm_range_size(range);
1117 
1118 	if (!range->base.pages.flags.migrate_devmem || !dpagemap)
1119 		return false;
1120 
1121 	xe_assert(vm->xe, IS_DGFX(vm->xe));
1122 
1123 	if (xe_svm_range_has_pagemap(range, dpagemap)) {
1124 		drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
1125 		return false;
1126 	}
1127 
1128 	if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
1129 		drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
1130 		return false;
1131 	}
1132 
1133 	return true;
1134 }
1135 
1136 #define DECL_SVM_RANGE_COUNT_STATS(elem, stat) \
1137 static void xe_svm_range_##elem##_count_stats_incr(struct xe_gt *gt, \
1138 						   struct xe_svm_range *range) \
1139 { \
1140 	switch (xe_svm_range_size(range)) { \
1141 	case SZ_4K: \
1142 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_COUNT, 1); \
1143 		break; \
1144 	case SZ_64K: \
1145 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_COUNT, 1); \
1146 		break; \
1147 	case SZ_2M: \
1148 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_COUNT, 1); \
1149 		break; \
1150 	} \
1151 } \
1152 
1153 DECL_SVM_RANGE_COUNT_STATS(fault, PAGEFAULT)
1154 DECL_SVM_RANGE_COUNT_STATS(valid_fault, VALID_PAGEFAULT)
1155 DECL_SVM_RANGE_COUNT_STATS(migrate, MIGRATE)
1156 
1157 #define DECL_SVM_RANGE_US_STATS(elem, stat) \
1158 static void xe_svm_range_##elem##_us_stats_incr(struct xe_gt *gt, \
1159 						struct xe_svm_range *range, \
1160 						ktime_t start) \
1161 { \
1162 	s64 us_delta = xe_gt_stats_ktime_us_delta(start); \
1163 \
1164 	switch (xe_svm_range_size(range)) { \
1165 	case SZ_4K: \
1166 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_US, \
1167 				 us_delta); \
1168 		break; \
1169 	case SZ_64K: \
1170 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_US, \
1171 				 us_delta); \
1172 		break; \
1173 	case SZ_2M: \
1174 		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_US, \
1175 				 us_delta); \
1176 		break; \
1177 	} \
1178 } \
1179 
1180 DECL_SVM_RANGE_US_STATS(migrate, MIGRATE)
1181 DECL_SVM_RANGE_US_STATS(get_pages, GET_PAGES)
1182 DECL_SVM_RANGE_US_STATS(bind, BIND)
1183 DECL_SVM_RANGE_US_STATS(fault, PAGEFAULT)
1184 
1185 static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
1186 				     struct xe_gt *gt, u64 fault_addr,
1187 				     bool need_vram)
1188 {
1189 	int devmem_possible = IS_DGFX(vm->xe) &&
1190 		IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
1191 	struct drm_gpusvm_ctx ctx = {
1192 		.read_only = xe_vma_read_only(vma),
1193 		.devmem_possible = devmem_possible,
1194 		.check_pages_threshold = devmem_possible ? SZ_64K : 0,
1195 		.devmem_only = need_vram && devmem_possible,
1196 		.timeslice_ms = need_vram && devmem_possible ?
1197 			vm->xe->atomic_svm_timeslice_ms : 0,
1198 	};
1199 	struct xe_validation_ctx vctx;
1200 	struct drm_exec exec;
1201 	struct xe_svm_range *range;
1202 	struct dma_fence *fence;
1203 	struct drm_pagemap *dpagemap;
1204 	struct xe_tile *tile = gt_to_tile(gt);
1205 	int migrate_try_count = ctx.devmem_only ? 3 : 1;
1206 	ktime_t start = xe_gt_stats_ktime_get(), bind_start, get_pages_start;
1207 	int err;
1208 
1209 	lockdep_assert_held_write(&vm->lock);
1210 	xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
1211 
1212 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
1213 
1214 retry:
1215 	/* Always process UNMAPs first so view SVM ranges is current */
1216 	err = xe_svm_garbage_collector(vm);
1217 	if (err)
1218 		return err;
1219 
1220 	dpagemap = ctx.devmem_only ? xe_tile_local_pagemap(tile) :
1221 		xe_vma_resolve_pagemap(vma, tile);
1222 	ctx.device_private_page_owner = xe_svm_private_page_owner(vm, !dpagemap);
1223 	range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
1224 
1225 	if (IS_ERR(range))
1226 		return PTR_ERR(range);
1227 
1228 	xe_svm_range_fault_count_stats_incr(gt, range);
1229 
1230 	if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
1231 		err = -EACCES;
1232 		goto out;
1233 	}
1234 
1235 	if (xe_svm_range_is_valid(range, tile, ctx.devmem_only, dpagemap)) {
1236 		xe_svm_range_valid_fault_count_stats_incr(gt, range);
1237 		range_debug(range, "PAGE FAULT - VALID");
1238 		goto out;
1239 	}
1240 
1241 	range_debug(range, "PAGE FAULT");
1242 
1243 	if (--migrate_try_count >= 0 &&
1244 	    xe_svm_range_needs_migrate_to_vram(range, vma, dpagemap)) {
1245 		ktime_t migrate_start = xe_gt_stats_ktime_get();
1246 
1247 		xe_svm_range_migrate_count_stats_incr(gt, range);
1248 		err = xe_svm_alloc_vram(range, &ctx, dpagemap);
1249 		xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start);
1250 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
1251 		if (err) {
1252 			if (migrate_try_count || !ctx.devmem_only) {
1253 				drm_dbg(&vm->xe->drm,
1254 					"VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
1255 					vm->usm.asid, ERR_PTR(err));
1256 
1257 				/*
1258 				 * In the devmem-only case, mixed mappings may
1259 				 * be found. The get_pages function will fix
1260 				 * these up to a single location, allowing the
1261 				 * page fault handler to make forward progress.
1262 				 */
1263 				if (ctx.devmem_only)
1264 					goto get_pages;
1265 				else
1266 					goto retry;
1267 			} else {
1268 				drm_err(&vm->xe->drm,
1269 					"VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
1270 					vm->usm.asid, ERR_PTR(err));
1271 				return err;
1272 			}
1273 		}
1274 	}
1275 
1276 get_pages:
1277 	get_pages_start = xe_gt_stats_ktime_get();
1278 
1279 	range_debug(range, "GET PAGES");
1280 	err = xe_svm_range_get_pages(vm, range, &ctx);
1281 	/* Corner where CPU mappings have changed */
1282 	if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
1283 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
1284 		if (migrate_try_count > 0 || !ctx.devmem_only) {
1285 			drm_dbg(&vm->xe->drm,
1286 				"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
1287 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1288 			range_debug(range, "PAGE FAULT - RETRY PAGES");
1289 			goto retry;
1290 		} else {
1291 			drm_err(&vm->xe->drm,
1292 				"Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
1293 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1294 		}
1295 	}
1296 	if (err) {
1297 		range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
1298 		goto out;
1299 	} else if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)) {
1300 		drm_dbg(&vm->xe->drm, "After page collect data location is %sin \"%s\".\n",
1301 			xe_svm_range_has_pagemap(range, dpagemap) ? "" : "NOT ",
1302 			dpagemap ? dpagemap->drm->unique : "System.");
1303 	}
1304 
1305 	xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
1306 	range_debug(range, "PAGE FAULT - BIND");
1307 
1308 	bind_start = xe_gt_stats_ktime_get();
1309 	xe_validation_guard(&vctx, &vm->xe->val, &exec, (struct xe_val_flags) {}, err) {
1310 		err = xe_vm_drm_exec_lock(vm, &exec);
1311 		drm_exec_retry_on_contention(&exec);
1312 
1313 		xe_vm_set_validation_exec(vm, &exec);
1314 		fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
1315 		xe_vm_set_validation_exec(vm, NULL);
1316 		if (IS_ERR(fence)) {
1317 			drm_exec_retry_on_contention(&exec);
1318 			err = PTR_ERR(fence);
1319 			xe_validation_retry_on_oom(&vctx, &err);
1320 			xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1321 			break;
1322 		}
1323 	}
1324 	if (err)
1325 		goto err_out;
1326 
1327 	dma_fence_wait(fence, false);
1328 	dma_fence_put(fence);
1329 	xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1330 
1331 out:
1332 	xe_svm_range_fault_us_stats_incr(gt, range, start);
1333 	return 0;
1334 
1335 err_out:
1336 	if (err == -EAGAIN) {
1337 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
1338 		range_debug(range, "PAGE FAULT - RETRY BIND");
1339 		goto retry;
1340 	}
1341 
1342 	return err;
1343 }
1344 
1345 /**
1346  * xe_svm_handle_pagefault() - SVM handle page fault
1347  * @vm: The VM.
1348  * @vma: The CPU address mirror VMA.
1349  * @gt: The gt upon the fault occurred.
1350  * @fault_addr: The GPU fault address.
1351  * @atomic: The fault atomic access bit.
1352  *
1353  * Create GPU bindings for a SVM page fault. Optionally migrate to device
1354  * memory.
1355  *
1356  * Return: 0 on success, negative error code on error.
1357  */
1358 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
1359 			    struct xe_gt *gt, u64 fault_addr,
1360 			    bool atomic)
1361 {
1362 	int need_vram, ret;
1363 retry:
1364 	need_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
1365 	if (need_vram < 0)
1366 		return need_vram;
1367 
1368 	ret =  __xe_svm_handle_pagefault(vm, vma, gt, fault_addr,
1369 					 need_vram ? true : false);
1370 	if (ret == -EAGAIN) {
1371 		/*
1372 		 * Retry once on -EAGAIN to re-lookup the VMA, as the original VMA
1373 		 * may have been split by xe_svm_range_set_default_attr.
1374 		 */
1375 		vma = xe_vm_find_vma_by_addr(vm, fault_addr);
1376 		if (!vma)
1377 			return -EINVAL;
1378 
1379 		goto retry;
1380 	}
1381 	return ret;
1382 }
1383 
1384 /**
1385  * xe_svm_has_mapping() - SVM has mappings
1386  * @vm: The VM.
1387  * @start: Start address.
1388  * @end: End address.
1389  *
1390  * Check if an address range has SVM mappings.
1391  *
1392  * Return: True if address range has a SVM mapping, False otherwise
1393  */
1394 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
1395 {
1396 	return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
1397 }
1398 
1399 /**
1400  * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
1401  * @vm: The VM
1402  * @start: start addr
1403  * @end: end addr
1404  *
1405  * This function UNMAPS svm ranges if start or end address are inside them.
1406  */
1407 void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
1408 {
1409 	struct drm_gpusvm_notifier *notifier, *next;
1410 
1411 	lockdep_assert_held_write(&vm->lock);
1412 
1413 	drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
1414 		struct drm_gpusvm_range *range, *__next;
1415 
1416 		drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
1417 			if (start > drm_gpusvm_range_start(range) ||
1418 			    end < drm_gpusvm_range_end(range)) {
1419 				if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
1420 					drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
1421 				drm_gpusvm_range_get(range);
1422 				__xe_svm_garbage_collector(vm, to_xe_range(range));
1423 				if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
1424 					spin_lock(&vm->svm.garbage_collector.lock);
1425 					list_del(&to_xe_range(range)->garbage_collector_link);
1426 					spin_unlock(&vm->svm.garbage_collector.lock);
1427 				}
1428 				drm_gpusvm_range_put(range);
1429 			}
1430 		}
1431 	}
1432 }
1433 
1434 /**
1435  * xe_svm_bo_evict() - SVM evict BO to system memory
1436  * @bo: BO to evict
1437  *
1438  * SVM evict BO to system memory. GPU SVM layer ensures all device pages
1439  * are evicted before returning.
1440  *
1441  * Return: 0 on success standard error code otherwise
1442  */
1443 int xe_svm_bo_evict(struct xe_bo *bo)
1444 {
1445 	return drm_pagemap_evict_to_ram(&bo->devmem_allocation);
1446 }
1447 
1448 /**
1449  * xe_svm_range_find_or_insert- Find or insert GPU SVM range
1450  * @vm: xe_vm pointer
1451  * @addr: address for which range needs to be found/inserted
1452  * @vma:  Pointer to struct xe_vma which mirrors CPU
1453  * @ctx: GPU SVM context
1454  *
1455  * This function finds or inserts a newly allocated a SVM range based on the
1456  * address.
1457  *
1458  * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
1459  */
1460 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
1461 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
1462 {
1463 	struct drm_gpusvm_range *r;
1464 
1465 	r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
1466 					    xe_vma_start(vma), xe_vma_end(vma), ctx);
1467 	if (IS_ERR(r))
1468 		return ERR_CAST(r);
1469 
1470 	return to_xe_range(r);
1471 }
1472 
1473 /**
1474  * xe_svm_range_get_pages() - Get pages for a SVM range
1475  * @vm: Pointer to the struct xe_vm
1476  * @range: Pointer to the xe SVM range structure
1477  * @ctx: GPU SVM context
1478  *
1479  * This function gets pages for a SVM range and ensures they are mapped for
1480  * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
1481  *
1482  * Return: 0 on success, negative error code on failure.
1483  */
1484 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
1485 			   struct drm_gpusvm_ctx *ctx)
1486 {
1487 	int err = 0;
1488 
1489 	err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
1490 	if (err == -EOPNOTSUPP) {
1491 		range_debug(range, "PAGE FAULT - EVICT PAGES");
1492 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
1493 	}
1494 
1495 	return err;
1496 }
1497 
1498 /**
1499  * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
1500  * @vm: Pointer to the xe_vm structure
1501  * @start: Start of the input range
1502  * @end: End of the input range
1503  *
1504  * This function removes the page table entries (PTEs) associated
1505  * with the svm ranges within the given input start and end
1506  *
1507  * Return: tile_mask for which gt's need to be tlb invalidated.
1508  */
1509 u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
1510 {
1511 	struct drm_gpusvm_notifier *notifier;
1512 	struct xe_svm_range *range;
1513 	u64 adj_start, adj_end;
1514 	struct xe_tile *tile;
1515 	u8 tile_mask = 0;
1516 	u8 id;
1517 
1518 	lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
1519 		       lockdep_is_held_type(&vm->lock, 0));
1520 
1521 	drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
1522 		struct drm_gpusvm_range *r = NULL;
1523 
1524 		adj_start = max(start, drm_gpusvm_notifier_start(notifier));
1525 		adj_end = min(end, drm_gpusvm_notifier_end(notifier));
1526 		drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
1527 			range = to_xe_range(r);
1528 			for_each_tile(tile, vm->xe, id) {
1529 				if (xe_pt_zap_ptes_range(tile, vm, range)) {
1530 					tile_mask |= BIT(id);
1531 					/*
1532 					 * WRITE_ONCE pairs with READ_ONCE in
1533 					 * xe_vm_has_valid_gpu_mapping().
1534 					 * Must not fail after setting
1535 					 * tile_invalidated and before
1536 					 * TLB invalidation.
1537 					 */
1538 					WRITE_ONCE(range->tile_invalidated,
1539 						   range->tile_invalidated | BIT(id));
1540 				}
1541 			}
1542 		}
1543 	}
1544 
1545 	return tile_mask;
1546 }
1547 
1548 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
1549 
1550 /**
1551  * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
1552  * @vma: Pointer to the xe_vma structure containing memory attributes
1553  * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
1554  *
1555  * This function determines the correct DRM pagemap to use for a given VMA.
1556  * It first checks if a valid devmem_fd is provided in the VMA's preferred
1557  * location. If the devmem_fd is negative, it returns NULL, indicating no
1558  * pagemap is available and smem to be used as preferred location.
1559  * If the devmem_fd is equal to the default faulting
1560  * GT identifier, it returns the VRAM pagemap associated with the tile.
1561  *
1562  * Future support for multi-device configurations may use drm_pagemap_from_fd()
1563  * to resolve pagemaps from arbitrary file descriptors.
1564  *
1565  * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
1566  */
1567 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1568 {
1569 	struct drm_pagemap *dpagemap = vma->attr.preferred_loc.dpagemap;
1570 	s32 fd;
1571 
1572 	if (dpagemap)
1573 		return dpagemap;
1574 
1575 	fd = (s32)vma->attr.preferred_loc.devmem_fd;
1576 
1577 	if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
1578 		return NULL;
1579 
1580 	if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
1581 		return IS_DGFX(tile_to_xe(tile)) ? xe_tile_local_pagemap(tile) : NULL;
1582 
1583 	return NULL;
1584 }
1585 
1586 /**
1587  * xe_svm_alloc_vram()- Allocate device memory pages for range,
1588  * migrating existing data.
1589  * @range: SVM range
1590  * @ctx: DRM GPU SVM context
1591  * @dpagemap: The struct drm_pagemap representing the memory to allocate.
1592  *
1593  * Return: 0 on success, error code on failure.
1594  */
1595 int xe_svm_alloc_vram(struct xe_svm_range *range, const struct drm_gpusvm_ctx *ctx,
1596 		      struct drm_pagemap *dpagemap)
1597 {
1598 	static DECLARE_RWSEM(driver_migrate_lock);
1599 	struct xe_vm *vm = range_to_vm(&range->base);
1600 	enum drm_gpusvm_scan_result migration_state;
1601 	struct xe_device *xe = vm->xe;
1602 	int err, retries = 1;
1603 	bool write_locked = false;
1604 
1605 	xe_assert(range_to_vm(&range->base)->xe, range->base.pages.flags.migrate_devmem);
1606 	range_debug(range, "ALLOCATE VRAM");
1607 
1608 	migration_state = drm_gpusvm_scan_mm(&range->base,
1609 					     xe_svm_private_page_owner(vm, false),
1610 					     dpagemap->pagemap);
1611 
1612 	if (migration_state == DRM_GPUSVM_SCAN_EQUAL) {
1613 		if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM))
1614 			drm_dbg(dpagemap->drm, "Already migrated!\n");
1615 		return 0;
1616 	}
1617 
1618 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM))
1619 		drm_dbg(&xe->drm, "Request migration to device memory on \"%s\".\n",
1620 			dpagemap->drm->unique);
1621 
1622 	err = down_read_interruptible(&driver_migrate_lock);
1623 	if (err)
1624 		return err;
1625 	do {
1626 		err = drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
1627 					      xe_svm_range_end(range),
1628 					      range->base.gpusvm->mm,
1629 					      ctx->timeslice_ms);
1630 
1631 		if (err == -EBUSY && retries) {
1632 			if (!write_locked) {
1633 				int lock_err;
1634 
1635 				up_read(&driver_migrate_lock);
1636 				lock_err = down_write_killable(&driver_migrate_lock);
1637 				if (lock_err)
1638 					return lock_err;
1639 				write_locked = true;
1640 			}
1641 			drm_gpusvm_range_evict(range->base.gpusvm, &range->base);
1642 		}
1643 	} while (err == -EBUSY && retries--);
1644 	if (write_locked)
1645 		up_write(&driver_migrate_lock);
1646 	else
1647 		up_read(&driver_migrate_lock);
1648 
1649 	return err;
1650 }
1651 
1652 static struct drm_pagemap_addr
1653 xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
1654 			  struct device *dev,
1655 			  struct page *page,
1656 			  unsigned int order,
1657 			  enum dma_data_direction dir)
1658 {
1659 	struct device *pgmap_dev = dpagemap->drm->dev;
1660 	enum drm_interconnect_protocol prot;
1661 	dma_addr_t addr;
1662 
1663 	if (pgmap_dev == dev) {
1664 		addr = xe_page_to_dpa(page);
1665 		prot = XE_INTERCONNECT_VRAM;
1666 	} else {
1667 		addr = dma_map_resource(dev,
1668 					xe_page_to_pcie(page),
1669 					PAGE_SIZE << order, dir,
1670 					DMA_ATTR_SKIP_CPU_SYNC);
1671 		prot = XE_INTERCONNECT_P2P;
1672 	}
1673 
1674 	return drm_pagemap_addr_encode(addr, prot, order, dir);
1675 }
1676 
1677 static void xe_drm_pagemap_device_unmap(struct drm_pagemap *dpagemap,
1678 					struct device *dev,
1679 					struct drm_pagemap_addr addr)
1680 {
1681 	if (addr.proto != XE_INTERCONNECT_P2P)
1682 		return;
1683 
1684 	dma_unmap_resource(dev, addr.addr, PAGE_SIZE << addr.order,
1685 			   addr.dir, DMA_ATTR_SKIP_CPU_SYNC);
1686 }
1687 
1688 static void xe_pagemap_destroy_work(struct work_struct *work)
1689 {
1690 	struct xe_pagemap *xpagemap = container_of(work, typeof(*xpagemap), destroy_work);
1691 	struct dev_pagemap *pagemap = &xpagemap->pagemap;
1692 	struct drm_device *drm = xpagemap->dpagemap.drm;
1693 	int idx;
1694 
1695 	/*
1696 	 * Only unmap / release if devm_ release hasn't run yet.
1697 	 * Otherwise the devm_ callbacks have already released, or
1698 	 * will do shortly.
1699 	 */
1700 	if (drm_dev_enter(drm, &idx)) {
1701 		devm_memunmap_pages(drm->dev, pagemap);
1702 		devm_release_mem_region(drm->dev, pagemap->range.start,
1703 					pagemap->range.end - pagemap->range.start + 1);
1704 		drm_dev_exit(idx);
1705 	}
1706 
1707 	drm_pagemap_release_owner(&xpagemap->peer);
1708 	kfree(xpagemap);
1709 }
1710 
1711 static void xe_pagemap_destroy(struct drm_pagemap *dpagemap, bool from_atomic_or_reclaim)
1712 {
1713 	struct xe_pagemap *xpagemap = container_of(dpagemap, typeof(*xpagemap), dpagemap);
1714 	struct xe_device *xe = to_xe_device(dpagemap->drm);
1715 
1716 	if (from_atomic_or_reclaim)
1717 		queue_work(xe->destroy_wq, &xpagemap->destroy_work);
1718 	else
1719 		xe_pagemap_destroy_work(&xpagemap->destroy_work);
1720 }
1721 
1722 static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
1723 	.device_map = xe_drm_pagemap_device_map,
1724 	.device_unmap = xe_drm_pagemap_device_unmap,
1725 	.populate_mm = xe_drm_pagemap_populate_mm,
1726 	.destroy = xe_pagemap_destroy,
1727 };
1728 
1729 /**
1730  * xe_pagemap_create() - Create a struct xe_pagemap object
1731  * @xe: The xe device.
1732  * @vr: Back-pointer to the struct xe_vram_region.
1733  *
1734  * Allocate and initialize a struct xe_pagemap. On successful
1735  * return, drm_pagemap_put() on the embedded struct drm_pagemap
1736  * should be used to unreference.
1737  *
1738  * Return: Pointer to a struct xe_pagemap if successful. Error pointer
1739  * on failure.
1740  */
1741 static struct xe_pagemap *xe_pagemap_create(struct xe_device *xe, struct xe_vram_region *vr)
1742 {
1743 	struct device *dev = xe->drm.dev;
1744 	struct xe_pagemap *xpagemap;
1745 	struct dev_pagemap *pagemap;
1746 	struct drm_pagemap *dpagemap;
1747 	struct resource *res;
1748 	void *addr;
1749 	int err;
1750 
1751 	xpagemap = kzalloc(sizeof(*xpagemap), GFP_KERNEL);
1752 	if (!xpagemap)
1753 		return ERR_PTR(-ENOMEM);
1754 
1755 	pagemap = &xpagemap->pagemap;
1756 	dpagemap = &xpagemap->dpagemap;
1757 	INIT_WORK(&xpagemap->destroy_work, xe_pagemap_destroy_work);
1758 	xpagemap->vr = vr;
1759 	xpagemap->peer.private = XE_PEER_PAGEMAP;
1760 
1761 	err = drm_pagemap_init(dpagemap, pagemap, &xe->drm, &xe_drm_pagemap_ops);
1762 	if (err)
1763 		goto out_no_dpagemap;
1764 
1765 	res = devm_request_free_mem_region(dev, &iomem_resource,
1766 					   vr->usable_size);
1767 	if (IS_ERR(res)) {
1768 		err = PTR_ERR(res);
1769 		goto out_err;
1770 	}
1771 
1772 	err = drm_pagemap_acquire_owner(&xpagemap->peer, &xe_owner_list,
1773 					xe_has_interconnect);
1774 	if (err)
1775 		goto out_no_owner;
1776 
1777 	pagemap->type = MEMORY_DEVICE_PRIVATE;
1778 	pagemap->range.start = res->start;
1779 	pagemap->range.end = res->end;
1780 	pagemap->nr_range = 1;
1781 	pagemap->owner = xpagemap->peer.owner;
1782 	pagemap->ops = drm_pagemap_pagemap_ops_get();
1783 	addr = devm_memremap_pages(dev, pagemap);
1784 	if (IS_ERR(addr)) {
1785 		err = PTR_ERR(addr);
1786 		goto out_no_pages;
1787 	}
1788 	xpagemap->hpa_base = res->start;
1789 	return xpagemap;
1790 
1791 out_no_pages:
1792 	drm_pagemap_release_owner(&xpagemap->peer);
1793 out_no_owner:
1794 	devm_release_mem_region(dev, res->start, res->end - res->start + 1);
1795 out_err:
1796 	drm_pagemap_put(dpagemap);
1797 	return ERR_PTR(err);
1798 
1799 out_no_dpagemap:
1800 	kfree(xpagemap);
1801 	return ERR_PTR(err);
1802 }
1803 
1804 /**
1805  * xe_pagemap_find_or_create() - Find or create a struct xe_pagemap
1806  * @xe: The xe device.
1807  * @cache: The struct xe_pagemap_cache.
1808  * @vr: The VRAM region.
1809  *
1810  * Check if there is an already used xe_pagemap for this tile, and in that case,
1811  * return it.
1812  * If not, check if there is a cached xe_pagemap for this tile, and in that case,
1813  * cancel its destruction, re-initialize it and return it.
1814  * Finally if there is no cached or already used pagemap, create one and
1815  * register it in the tile's pagemap cache.
1816  *
1817  * Note that this function is typically called from within an IOCTL, and waits are
1818  * therefore carried out interruptible if possible.
1819  *
1820  * Return: A pointer to a struct xe_pagemap if successful, Error pointer on failure.
1821  */
1822 static struct xe_pagemap *
1823 xe_pagemap_find_or_create(struct xe_device *xe, struct drm_pagemap_cache *cache,
1824 			  struct xe_vram_region *vr)
1825 {
1826 	struct drm_pagemap *dpagemap;
1827 	struct xe_pagemap *xpagemap;
1828 	int err;
1829 
1830 	err = drm_pagemap_cache_lock_lookup(cache);
1831 	if (err)
1832 		return ERR_PTR(err);
1833 
1834 	dpagemap = drm_pagemap_get_from_cache(cache);
1835 	if (IS_ERR(dpagemap)) {
1836 		xpagemap = ERR_CAST(dpagemap);
1837 	} else if (!dpagemap) {
1838 		xpagemap = xe_pagemap_create(xe, vr);
1839 		if (IS_ERR(xpagemap))
1840 			goto out_unlock;
1841 		drm_pagemap_cache_set_pagemap(cache, &xpagemap->dpagemap);
1842 	} else {
1843 		xpagemap = container_of(dpagemap, typeof(*xpagemap), dpagemap);
1844 	}
1845 
1846 out_unlock:
1847 	drm_pagemap_cache_unlock_lookup(cache);
1848 	return xpagemap;
1849 }
1850 
1851 static int xe_svm_get_pagemaps(struct xe_vm *vm)
1852 {
1853 	struct xe_device *xe = vm->xe;
1854 	struct xe_pagemap *xpagemap;
1855 	struct xe_tile *tile;
1856 	int id;
1857 
1858 	for_each_tile(tile, xe, id) {
1859 		struct xe_vram_region *vr;
1860 
1861 		if (!((BIT(id) << 1) & xe->info.mem_region_mask))
1862 			continue;
1863 
1864 		vr = xe_tile_to_vr(tile);
1865 		xpagemap = xe_pagemap_find_or_create(xe, vr->dpagemap_cache, vr);
1866 		if (IS_ERR(xpagemap))
1867 			break;
1868 		vm->svm.pagemaps[id] = xpagemap;
1869 	}
1870 
1871 	if (IS_ERR(xpagemap)) {
1872 		xe_svm_put_pagemaps(vm);
1873 		return PTR_ERR(xpagemap);
1874 	}
1875 
1876 	return 0;
1877 }
1878 
1879 /**
1880  * xe_pagemap_shrinker_create() - Create a drm_pagemap shrinker
1881  * @xe: The xe device
1882  *
1883  * Create a drm_pagemap shrinker and register with the xe device.
1884  *
1885  * Return: %0 on success, negative error code on failure.
1886  */
1887 int xe_pagemap_shrinker_create(struct xe_device *xe)
1888 {
1889 	xe->usm.dpagemap_shrinker = drm_pagemap_shrinker_create_devm(&xe->drm);
1890 	return PTR_ERR_OR_ZERO(xe->usm.dpagemap_shrinker);
1891 }
1892 
1893 /**
1894  * xe_pagemap_cache_create() - Create a drm_pagemap cache
1895  * @tile: The tile to register the cache with
1896  *
1897  * Create a drm_pagemap cache and register with the tile.
1898  *
1899  * Return: %0 on success, negative error code on failure.
1900  */
1901 int xe_pagemap_cache_create(struct xe_tile *tile)
1902 {
1903 	struct xe_device *xe = tile_to_xe(tile);
1904 
1905 	if (IS_DGFX(xe)) {
1906 		struct drm_pagemap_cache *cache =
1907 			drm_pagemap_cache_create_devm(xe->usm.dpagemap_shrinker);
1908 
1909 		if (IS_ERR(cache))
1910 			return PTR_ERR(cache);
1911 
1912 		tile->mem.vram->dpagemap_cache = cache;
1913 	}
1914 
1915 	return 0;
1916 }
1917 
1918 static struct drm_pagemap *xe_devmem_open(struct xe_device *xe, u32 region_instance)
1919 {
1920 	u32 tile_id = region_instance - 1;
1921 	struct xe_pagemap *xpagemap;
1922 	struct xe_vram_region *vr;
1923 
1924 	if (tile_id >= xe->info.tile_count)
1925 		return ERR_PTR(-ENOENT);
1926 
1927 	if (!((BIT(tile_id) << 1) & xe->info.mem_region_mask))
1928 		return ERR_PTR(-ENOENT);
1929 
1930 	vr = xe_tile_to_vr(&xe->tiles[tile_id]);
1931 
1932 	/* Returns a reference-counted embedded struct drm_pagemap */
1933 	xpagemap = xe_pagemap_find_or_create(xe, vr->dpagemap_cache, vr);
1934 	if (IS_ERR(xpagemap))
1935 		return ERR_CAST(xpagemap);
1936 
1937 	return &xpagemap->dpagemap;
1938 }
1939 
1940 /**
1941  * xe_drm_pagemap_from_fd() - Return a drm_pagemap pointer from a
1942  * (file_descriptor, region_instance) pair.
1943  * @fd: An fd opened against an xe device.
1944  * @region_instance: The region instance representing the device memory
1945  * on the opened xe device.
1946  *
1947  * Opens a struct drm_pagemap pointer on the
1948  * indicated device and region_instance.
1949  *
1950  * Return: A reference-counted struct drm_pagemap pointer on success,
1951  * negative error pointer on failure.
1952  */
1953 struct drm_pagemap *xe_drm_pagemap_from_fd(int fd, u32 region_instance)
1954 {
1955 	struct drm_pagemap *dpagemap;
1956 	struct file *file;
1957 	struct drm_file *fpriv;
1958 	struct drm_device *drm;
1959 	int idx;
1960 
1961 	if (fd <= 0)
1962 		return ERR_PTR(-EINVAL);
1963 
1964 	file = fget(fd);
1965 	if (!file)
1966 		return ERR_PTR(-ENOENT);
1967 
1968 	if (!xe_is_xe_file(file)) {
1969 		dpagemap = ERR_PTR(-ENOENT);
1970 		goto out;
1971 	}
1972 
1973 	fpriv = file->private_data;
1974 	drm = fpriv->minor->dev;
1975 	if (!drm_dev_enter(drm, &idx)) {
1976 		dpagemap = ERR_PTR(-ENODEV);
1977 		goto out;
1978 	}
1979 
1980 	dpagemap = xe_devmem_open(to_xe_device(drm), region_instance);
1981 	drm_dev_exit(idx);
1982 out:
1983 	fput(file);
1984 	return dpagemap;
1985 }
1986 
1987 #else
1988 
1989 int xe_pagemap_shrinker_create(struct xe_device *xe)
1990 {
1991 	return 0;
1992 }
1993 
1994 int xe_pagemap_cache_create(struct xe_tile *tile)
1995 {
1996 	return 0;
1997 }
1998 
1999 int xe_svm_alloc_vram(struct xe_svm_range *range,
2000 		      const struct drm_gpusvm_ctx *ctx,
2001 		      struct drm_pagemap *dpagemap)
2002 {
2003 	return -EOPNOTSUPP;
2004 }
2005 
2006 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
2007 {
2008 	return NULL;
2009 }
2010 
2011 struct drm_pagemap *xe_drm_pagemap_from_fd(int fd, u32 region_instance)
2012 {
2013 	return ERR_PTR(-ENOENT);
2014 }
2015 
2016 #endif
2017 
2018 /**
2019  * xe_svm_flush() - SVM flush
2020  * @vm: The VM.
2021  *
2022  * Flush all SVM actions.
2023  */
2024 void xe_svm_flush(struct xe_vm *vm)
2025 {
2026 	if (xe_vm_in_fault_mode(vm))
2027 		flush_work(&vm->svm.garbage_collector.work);
2028 }
2029