xref: /linux/drivers/gpu/drm/xe/xe_svm.c (revision 38fc73b8c7d692a099ddda37b700eeb330a03ff1)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <drm/drm_drv.h>
7 
8 #include "xe_bo.h"
9 #include "xe_gt_stats.h"
10 #include "xe_gt_tlb_invalidation.h"
11 #include "xe_migrate.h"
12 #include "xe_module.h"
13 #include "xe_pm.h"
14 #include "xe_pt.h"
15 #include "xe_svm.h"
16 #include "xe_tile.h"
17 #include "xe_ttm_vram_mgr.h"
18 #include "xe_vm.h"
19 #include "xe_vm_types.h"
20 #include "xe_vram_types.h"
21 
22 static bool xe_svm_range_in_vram(struct xe_svm_range *range)
23 {
24 	/*
25 	 * Advisory only check whether the range is currently backed by VRAM
26 	 * memory.
27 	 */
28 
29 	struct drm_gpusvm_range_flags flags = {
30 		/* Pairs with WRITE_ONCE in drm_gpusvm.c */
31 		.__flags = READ_ONCE(range->base.flags.__flags),
32 	};
33 
34 	return flags.has_devmem_pages;
35 }
36 
37 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
38 {
39 	/* Not reliable without notifier lock */
40 	return xe_svm_range_in_vram(range) && range->tile_present;
41 }
42 
43 static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
44 {
45 	return container_of(gpusvm, struct xe_vm, svm.gpusvm);
46 }
47 
48 static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
49 {
50 	return gpusvm_to_vm(r->gpusvm);
51 }
52 
53 #define range_debug(r__, operaton__)					\
54 	vm_dbg(&range_to_vm(&(r__)->base)->xe->drm,			\
55 	       "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
56 	       "start=0x%014lx, end=0x%014lx, size=%lu",		\
57 	       (operaton__), range_to_vm(&(r__)->base)->usm.asid,	\
58 	       (r__)->base.gpusvm,					\
59 	       xe_svm_range_in_vram((r__)) ? 1 : 0,			\
60 	       xe_svm_range_has_vram_binding((r__)) ? 1 : 0,		\
61 	       (r__)->base.notifier_seq,				\
62 	       xe_svm_range_start((r__)), xe_svm_range_end((r__)),	\
63 	       xe_svm_range_size((r__)))
64 
65 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
66 {
67 	range_debug(range, operation);
68 }
69 
70 static void *xe_svm_devm_owner(struct xe_device *xe)
71 {
72 	return xe;
73 }
74 
75 static struct drm_gpusvm_range *
76 xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
77 {
78 	struct xe_svm_range *range;
79 
80 	range = kzalloc(sizeof(*range), GFP_KERNEL);
81 	if (!range)
82 		return NULL;
83 
84 	INIT_LIST_HEAD(&range->garbage_collector_link);
85 	xe_vm_get(gpusvm_to_vm(gpusvm));
86 
87 	return &range->base;
88 }
89 
90 static void xe_svm_range_free(struct drm_gpusvm_range *range)
91 {
92 	xe_vm_put(range_to_vm(range));
93 	kfree(range);
94 }
95 
96 static void
97 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
98 				   const struct mmu_notifier_range *mmu_range)
99 {
100 	struct xe_device *xe = vm->xe;
101 
102 	range_debug(range, "GARBAGE COLLECTOR ADD");
103 
104 	drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
105 
106 	spin_lock(&vm->svm.garbage_collector.lock);
107 	if (list_empty(&range->garbage_collector_link))
108 		list_add_tail(&range->garbage_collector_link,
109 			      &vm->svm.garbage_collector.range_list);
110 	spin_unlock(&vm->svm.garbage_collector.lock);
111 
112 	queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq,
113 		   &vm->svm.garbage_collector.work);
114 }
115 
116 static u8
117 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
118 				  const struct mmu_notifier_range *mmu_range,
119 				  u64 *adj_start, u64 *adj_end)
120 {
121 	struct xe_svm_range *range = to_xe_range(r);
122 	struct xe_device *xe = vm->xe;
123 	struct xe_tile *tile;
124 	u8 tile_mask = 0;
125 	u8 id;
126 
127 	xe_svm_assert_in_notifier(vm);
128 
129 	range_debug(range, "NOTIFIER");
130 
131 	/* Skip if already unmapped or if no binding exist */
132 	if (range->base.flags.unmapped || !range->tile_present)
133 		return 0;
134 
135 	range_debug(range, "NOTIFIER - EXECUTE");
136 
137 	/* Adjust invalidation to range boundaries */
138 	*adj_start = min(xe_svm_range_start(range), mmu_range->start);
139 	*adj_end = max(xe_svm_range_end(range), mmu_range->end);
140 
141 	/*
142 	 * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the
143 	 * invalidation code can't correctly cope with sparse ranges or
144 	 * invalidations spanning multiple ranges.
145 	 */
146 	for_each_tile(tile, xe, id)
147 		if (xe_pt_zap_ptes_range(tile, vm, range)) {
148 			tile_mask |= BIT(id);
149 			/*
150 			 * WRITE_ONCE pairs with READ_ONCE in
151 			 * xe_vm_has_valid_gpu_mapping()
152 			 */
153 			WRITE_ONCE(range->tile_invalidated,
154 				   range->tile_invalidated | BIT(id));
155 		}
156 
157 	return tile_mask;
158 }
159 
160 static void
161 xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
162 				const struct mmu_notifier_range *mmu_range)
163 {
164 	struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
165 
166 	xe_svm_assert_in_notifier(vm);
167 
168 	drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
169 	if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
170 		xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
171 						   mmu_range);
172 }
173 
174 static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
175 			      struct drm_gpusvm_notifier *notifier,
176 			      const struct mmu_notifier_range *mmu_range)
177 {
178 	struct xe_vm *vm = gpusvm_to_vm(gpusvm);
179 	struct xe_device *xe = vm->xe;
180 	struct drm_gpusvm_range *r, *first;
181 	u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
182 	u8 tile_mask = 0;
183 	long err;
184 
185 	xe_svm_assert_in_notifier(vm);
186 
187 	vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
188 	       "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
189 	       vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
190 	       mmu_range->start, mmu_range->end, mmu_range->event);
191 
192 	/* Adjust invalidation to notifier boundaries */
193 	adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
194 	adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
195 
196 	first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
197 	if (!first)
198 		return;
199 
200 	/*
201 	 * PTs may be getting destroyed so not safe to touch these but PT should
202 	 * be invalidated at this point in time. Regardless we still need to
203 	 * ensure any dma mappings are unmapped in the here.
204 	 */
205 	if (xe_vm_is_closed(vm))
206 		goto range_notifier_event_end;
207 
208 	/*
209 	 * XXX: Less than ideal to always wait on VM's resv slots if an
210 	 * invalidation is not required. Could walk range list twice to figure
211 	 * out if an invalidations is need, but also not ideal.
212 	 */
213 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
214 				    DMA_RESV_USAGE_BOOKKEEP,
215 				    false, MAX_SCHEDULE_TIMEOUT);
216 	XE_WARN_ON(err <= 0);
217 
218 	r = first;
219 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
220 		tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range,
221 							       &adj_start,
222 							       &adj_end);
223 	if (!tile_mask)
224 		goto range_notifier_event_end;
225 
226 	xe_device_wmb(xe);
227 
228 	err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
229 	WARN_ON_ONCE(err);
230 
231 range_notifier_event_end:
232 	r = first;
233 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
234 		xe_svm_range_notifier_event_end(vm, r, mmu_range);
235 }
236 
237 static int __xe_svm_garbage_collector(struct xe_vm *vm,
238 				      struct xe_svm_range *range)
239 {
240 	struct dma_fence *fence;
241 
242 	range_debug(range, "GARBAGE COLLECTOR");
243 
244 	xe_vm_lock(vm, false);
245 	fence = xe_vm_range_unbind(vm, range);
246 	xe_vm_unlock(vm);
247 	if (IS_ERR(fence))
248 		return PTR_ERR(fence);
249 	dma_fence_put(fence);
250 
251 	drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
252 
253 	return 0;
254 }
255 
256 static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 range_end)
257 {
258 	struct xe_vma *vma;
259 	struct xe_vma_mem_attr default_attr = {
260 		.preferred_loc = {
261 			.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
262 			.migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
263 		},
264 		.atomic_access = DRM_XE_ATOMIC_UNDEFINED,
265 	};
266 	int err = 0;
267 
268 	vma = xe_vm_find_vma_by_addr(vm, range_start);
269 	if (!vma)
270 		return -EINVAL;
271 
272 	if (xe_vma_has_default_mem_attrs(vma))
273 		return 0;
274 
275 	vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx",
276 	       xe_vma_start(vma), xe_vma_end(vma));
277 
278 	if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
279 		default_attr.pat_index = vma->attr.default_pat_index;
280 		default_attr.default_pat_index  = vma->attr.default_pat_index;
281 		vma->attr = default_attr;
282 	} else {
283 		vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx",
284 		       range_start, range_end);
285 		err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start);
286 		if (err) {
287 			drm_warn(&vm->xe->drm, "VMA SPLIT failed: %pe\n", ERR_PTR(err));
288 			xe_vm_kill(vm, true);
289 			return err;
290 		}
291 	}
292 
293 	/*
294 	 * On call from xe_svm_handle_pagefault original VMA might be changed
295 	 * signal this to lookup for VMA again.
296 	 */
297 	return -EAGAIN;
298 }
299 
300 static int xe_svm_garbage_collector(struct xe_vm *vm)
301 {
302 	struct xe_svm_range *range;
303 	u64 range_start;
304 	u64 range_end;
305 	int err, ret = 0;
306 
307 	lockdep_assert_held_write(&vm->lock);
308 
309 	if (xe_vm_is_closed_or_banned(vm))
310 		return -ENOENT;
311 
312 	spin_lock(&vm->svm.garbage_collector.lock);
313 	for (;;) {
314 		range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
315 						 typeof(*range),
316 						 garbage_collector_link);
317 		if (!range)
318 			break;
319 
320 		range_start = xe_svm_range_start(range);
321 		range_end = xe_svm_range_end(range);
322 
323 		list_del(&range->garbage_collector_link);
324 		spin_unlock(&vm->svm.garbage_collector.lock);
325 
326 		err = __xe_svm_garbage_collector(vm, range);
327 		if (err) {
328 			drm_warn(&vm->xe->drm,
329 				 "Garbage collection failed: %pe\n",
330 				 ERR_PTR(err));
331 			xe_vm_kill(vm, true);
332 			return err;
333 		}
334 
335 		err = xe_svm_range_set_default_attr(vm, range_start, range_end);
336 		if (err) {
337 			if (err == -EAGAIN)
338 				ret = -EAGAIN;
339 			else
340 				return err;
341 		}
342 
343 		spin_lock(&vm->svm.garbage_collector.lock);
344 	}
345 	spin_unlock(&vm->svm.garbage_collector.lock);
346 
347 	return ret;
348 }
349 
350 static void xe_svm_garbage_collector_work_func(struct work_struct *w)
351 {
352 	struct xe_vm *vm = container_of(w, struct xe_vm,
353 					svm.garbage_collector.work);
354 
355 	down_write(&vm->lock);
356 	xe_svm_garbage_collector(vm);
357 	up_write(&vm->lock);
358 }
359 
360 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
361 
362 static struct xe_vram_region *page_to_vr(struct page *page)
363 {
364 	return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
365 }
366 
367 static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
368 				      struct page *page)
369 {
370 	u64 dpa;
371 	u64 pfn = page_to_pfn(page);
372 	u64 offset;
373 
374 	xe_assert(vr->xe, is_device_private_page(page));
375 	xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base);
376 
377 	offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
378 	dpa = vr->dpa_base + offset;
379 
380 	return dpa;
381 }
382 
383 enum xe_svm_copy_dir {
384 	XE_SVM_COPY_TO_VRAM,
385 	XE_SVM_COPY_TO_SRAM,
386 };
387 
388 static int xe_svm_copy(struct page **pages,
389 		       struct drm_pagemap_addr *pagemap_addr,
390 		       unsigned long npages, const enum xe_svm_copy_dir dir)
391 {
392 	struct xe_vram_region *vr = NULL;
393 	struct xe_device *xe;
394 	struct dma_fence *fence = NULL;
395 	unsigned long i;
396 #define XE_VRAM_ADDR_INVALID	~0x0ull
397 	u64 vram_addr = XE_VRAM_ADDR_INVALID;
398 	int err = 0, pos = 0;
399 	bool sram = dir == XE_SVM_COPY_TO_SRAM;
400 
401 	/*
402 	 * This flow is complex: it locates physically contiguous device pages,
403 	 * derives the starting physical address, and performs a single GPU copy
404 	 * to for every 8M chunk in a DMA address array. Both device pages and
405 	 * DMA addresses may be sparsely populated. If either is NULL, a copy is
406 	 * triggered based on the current search state. The last GPU copy is
407 	 * waited on to ensure all copies are complete.
408 	 */
409 
410 	for (i = 0; i < npages; ++i) {
411 		struct page *spage = pages[i];
412 		struct dma_fence *__fence;
413 		u64 __vram_addr;
414 		bool match = false, chunk, last;
415 
416 #define XE_MIGRATE_CHUNK_SIZE	SZ_8M
417 		chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
418 		last = (i + 1) == npages;
419 
420 		/* No CPU page and no device pages queue'd to copy */
421 		if (!pagemap_addr[i].addr && vram_addr == XE_VRAM_ADDR_INVALID)
422 			continue;
423 
424 		if (!vr && spage) {
425 			vr = page_to_vr(spage);
426 			xe = vr->xe;
427 		}
428 		XE_WARN_ON(spage && page_to_vr(spage) != vr);
429 
430 		/*
431 		 * CPU page and device page valid, capture physical address on
432 		 * first device page, check if physical contiguous on subsequent
433 		 * device pages.
434 		 */
435 		if (pagemap_addr[i].addr && spage) {
436 			__vram_addr = xe_vram_region_page_to_dpa(vr, spage);
437 			if (vram_addr == XE_VRAM_ADDR_INVALID) {
438 				vram_addr = __vram_addr;
439 				pos = i;
440 			}
441 
442 			match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
443 			/* Expected with contiguous memory */
444 			xe_assert(vr->xe, match);
445 
446 			if (pagemap_addr[i].order) {
447 				i += NR_PAGES(pagemap_addr[i].order) - 1;
448 				chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
449 				last = (i + 1) == npages;
450 			}
451 		}
452 
453 		/*
454 		 * Mismatched physical address, 8M copy chunk, or last page -
455 		 * trigger a copy.
456 		 */
457 		if (!match || chunk || last) {
458 			/*
459 			 * Extra page for first copy if last page and matching
460 			 * physical address.
461 			 */
462 			int incr = (match && last) ? 1 : 0;
463 
464 			if (vram_addr != XE_VRAM_ADDR_INVALID) {
465 				if (sram) {
466 					vm_dbg(&xe->drm,
467 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
468 					       vram_addr,
469 					       (u64)pagemap_addr[pos].addr, i - pos + incr);
470 					__fence = xe_migrate_from_vram(vr->migrate,
471 								       i - pos + incr,
472 								       vram_addr,
473 								       &pagemap_addr[pos]);
474 				} else {
475 					vm_dbg(&xe->drm,
476 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
477 					       (u64)pagemap_addr[pos].addr, vram_addr,
478 					       i - pos + incr);
479 					__fence = xe_migrate_to_vram(vr->migrate,
480 								     i - pos + incr,
481 								     &pagemap_addr[pos],
482 								     vram_addr);
483 				}
484 				if (IS_ERR(__fence)) {
485 					err = PTR_ERR(__fence);
486 					goto err_out;
487 				}
488 
489 				dma_fence_put(fence);
490 				fence = __fence;
491 			}
492 
493 			/* Setup physical address of next device page */
494 			if (pagemap_addr[i].addr && spage) {
495 				vram_addr = __vram_addr;
496 				pos = i;
497 			} else {
498 				vram_addr = XE_VRAM_ADDR_INVALID;
499 			}
500 
501 			/* Extra mismatched device page, copy it */
502 			if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
503 				if (sram) {
504 					vm_dbg(&xe->drm,
505 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
506 					       vram_addr, (u64)pagemap_addr[pos].addr, 1);
507 					__fence = xe_migrate_from_vram(vr->migrate, 1,
508 								       vram_addr,
509 								       &pagemap_addr[pos]);
510 				} else {
511 					vm_dbg(&xe->drm,
512 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
513 					       (u64)pagemap_addr[pos].addr, vram_addr, 1);
514 					__fence = xe_migrate_to_vram(vr->migrate, 1,
515 								     &pagemap_addr[pos],
516 								     vram_addr);
517 				}
518 				if (IS_ERR(__fence)) {
519 					err = PTR_ERR(__fence);
520 					goto err_out;
521 				}
522 
523 				dma_fence_put(fence);
524 				fence = __fence;
525 			}
526 		}
527 	}
528 
529 err_out:
530 	/* Wait for all copies to complete */
531 	if (fence) {
532 		dma_fence_wait(fence, false);
533 		dma_fence_put(fence);
534 	}
535 
536 	return err;
537 #undef XE_MIGRATE_CHUNK_SIZE
538 #undef XE_VRAM_ADDR_INVALID
539 }
540 
541 static int xe_svm_copy_to_devmem(struct page **pages,
542 				 struct drm_pagemap_addr *pagemap_addr,
543 				 unsigned long npages)
544 {
545 	return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
546 }
547 
548 static int xe_svm_copy_to_ram(struct page **pages,
549 			      struct drm_pagemap_addr *pagemap_addr,
550 			      unsigned long npages)
551 {
552 	return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
553 }
554 
555 static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
556 {
557 	return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
558 }
559 
560 static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
561 {
562 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
563 	struct xe_device *xe = xe_bo_device(bo);
564 
565 	xe_bo_put_async(bo);
566 	xe_pm_runtime_put(xe);
567 }
568 
569 static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
570 {
571 	return PHYS_PFN(offset + vr->hpa_base);
572 }
573 
574 static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
575 {
576 	return &vram->ttm.mm;
577 }
578 
579 static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
580 				      unsigned long npages, unsigned long *pfn)
581 {
582 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
583 	struct ttm_resource *res = bo->ttm.resource;
584 	struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks;
585 	struct drm_buddy_block *block;
586 	int j = 0;
587 
588 	list_for_each_entry(block, blocks, link) {
589 		struct xe_vram_region *vr = block->private;
590 		struct drm_buddy *buddy = vram_to_buddy(vr);
591 		u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
592 		int i;
593 
594 		for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
595 			pfn[j++] = block_pfn + i;
596 	}
597 
598 	return 0;
599 }
600 
601 static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
602 	.devmem_release = xe_svm_devmem_release,
603 	.populate_devmem_pfn = xe_svm_populate_devmem_pfn,
604 	.copy_to_devmem = xe_svm_copy_to_devmem,
605 	.copy_to_ram = xe_svm_copy_to_ram,
606 };
607 
608 #endif
609 
610 static const struct drm_gpusvm_ops gpusvm_ops = {
611 	.range_alloc = xe_svm_range_alloc,
612 	.range_free = xe_svm_range_free,
613 	.invalidate = xe_svm_invalidate,
614 };
615 
616 static const unsigned long fault_chunk_sizes[] = {
617 	SZ_2M,
618 	SZ_64K,
619 	SZ_4K,
620 };
621 
622 /**
623  * xe_svm_init() - SVM initialize
624  * @vm: The VM.
625  *
626  * Initialize SVM state which is embedded within the VM.
627  *
628  * Return: 0 on success, negative error code on error.
629  */
630 int xe_svm_init(struct xe_vm *vm)
631 {
632 	int err;
633 
634 	spin_lock_init(&vm->svm.garbage_collector.lock);
635 	INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
636 	INIT_WORK(&vm->svm.garbage_collector.work,
637 		  xe_svm_garbage_collector_work_func);
638 
639 	err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
640 			      current->mm, xe_svm_devm_owner(vm->xe), 0,
641 			      vm->size, xe_modparam.svm_notifier_size * SZ_1M,
642 			      &gpusvm_ops, fault_chunk_sizes,
643 			      ARRAY_SIZE(fault_chunk_sizes));
644 	if (err)
645 		return err;
646 
647 	drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
648 
649 	return 0;
650 }
651 
652 /**
653  * xe_svm_close() - SVM close
654  * @vm: The VM.
655  *
656  * Close SVM state (i.e., stop and flush all SVM actions).
657  */
658 void xe_svm_close(struct xe_vm *vm)
659 {
660 	xe_assert(vm->xe, xe_vm_is_closed(vm));
661 	flush_work(&vm->svm.garbage_collector.work);
662 }
663 
664 /**
665  * xe_svm_fini() - SVM finalize
666  * @vm: The VM.
667  *
668  * Finalize SVM state which is embedded within the VM.
669  */
670 void xe_svm_fini(struct xe_vm *vm)
671 {
672 	xe_assert(vm->xe, xe_vm_is_closed(vm));
673 
674 	drm_gpusvm_fini(&vm->svm.gpusvm);
675 }
676 
677 static bool xe_svm_range_is_valid(struct xe_svm_range *range,
678 				  struct xe_tile *tile,
679 				  bool devmem_only)
680 {
681 	return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
682 					    range->tile_invalidated) &&
683 		(!devmem_only || xe_svm_range_in_vram(range)));
684 }
685 
686 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
687  * @vm: xe_vm pointer
688  * @range: Pointer to the SVM range structure
689  *
690  * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
691  * and migrates them to SMEM
692  */
693 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
694 {
695 	if (xe_svm_range_in_vram(range))
696 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
697 }
698 
699 /**
700  * xe_svm_range_validate() - Check if the SVM range is valid
701  * @vm: xe_vm pointer
702  * @range: Pointer to the SVM range structure
703  * @tile_mask: Mask representing the tiles to be checked
704  * @devmem_preferred : if true range needs to be in devmem
705  *
706  * The xe_svm_range_validate() function checks if a range is
707  * valid and located in the desired memory region.
708  *
709  * Return: true if the range is valid, false otherwise
710  */
711 bool xe_svm_range_validate(struct xe_vm *vm,
712 			   struct xe_svm_range *range,
713 			   u8 tile_mask, bool devmem_preferred)
714 {
715 	bool ret;
716 
717 	xe_svm_notifier_lock(vm);
718 
719 	ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
720 	       (devmem_preferred == range->base.flags.has_devmem_pages);
721 
722 	xe_svm_notifier_unlock(vm);
723 
724 	return ret;
725 }
726 
727 /**
728  * xe_svm_find_vma_start - Find start of CPU VMA
729  * @vm: xe_vm pointer
730  * @start: start address
731  * @end: end address
732  * @vma: Pointer to struct xe_vma
733  *
734  *
735  * This function searches for a cpu vma, within the specified
736  * range [start, end] in the given VM. It adjusts the range based on the
737  * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
738  *
739  * Return: The starting address of the VMA within the range,
740  * or ULONG_MAX if no VMA is found
741  */
742 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
743 {
744 	return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
745 					 max(start, xe_vma_start(vma)),
746 					 min(end, xe_vma_end(vma)));
747 }
748 
749 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
750 static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
751 				      unsigned long start, unsigned long end,
752 				      struct mm_struct *mm,
753 				      unsigned long timeslice_ms)
754 {
755 	struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
756 	struct xe_device *xe = vr->xe;
757 	struct device *dev = xe->drm.dev;
758 	struct drm_buddy_block *block;
759 	struct list_head *blocks;
760 	struct xe_bo *bo;
761 	ktime_t time_end = 0;
762 	int err, idx;
763 
764 	if (!drm_dev_enter(&xe->drm, &idx))
765 		return -ENODEV;
766 
767 	xe_pm_runtime_get(xe);
768 
769  retry:
770 	bo = xe_bo_create_locked(vr->xe, NULL, NULL, end - start,
771 				 ttm_bo_type_device,
772 				 (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) |
773 				 XE_BO_FLAG_CPU_ADDR_MIRROR);
774 	if (IS_ERR(bo)) {
775 		err = PTR_ERR(bo);
776 		if (xe_vm_validate_should_retry(NULL, err, &time_end))
777 			goto retry;
778 		goto out_pm_put;
779 	}
780 
781 	drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
782 				&dpagemap_devmem_ops, dpagemap, end - start);
783 
784 	blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
785 	list_for_each_entry(block, blocks, link)
786 		block->private = vr;
787 
788 	xe_bo_get(bo);
789 
790 	/* Ensure the device has a pm ref while there are device pages active. */
791 	xe_pm_runtime_get_noresume(xe);
792 	err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
793 					    start, end, timeslice_ms,
794 					    xe_svm_devm_owner(xe));
795 	if (err)
796 		xe_svm_devmem_release(&bo->devmem_allocation);
797 
798 	xe_bo_unlock(bo);
799 	xe_bo_put(bo);
800 
801 out_pm_put:
802 	xe_pm_runtime_put(xe);
803 	drm_dev_exit(idx);
804 
805 	return err;
806 }
807 #endif
808 
809 static bool supports_4K_migration(struct xe_device *xe)
810 {
811 	if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
812 		return false;
813 
814 	return true;
815 }
816 
817 /**
818  * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
819  * @range: SVM range for which migration needs to be decided
820  * @vma: vma which has range
821  * @preferred_region_is_vram: preferred region for range is vram
822  *
823  * Return: True for range needing migration and migration is supported else false
824  */
825 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
826 					bool preferred_region_is_vram)
827 {
828 	struct xe_vm *vm = range_to_vm(&range->base);
829 	u64 range_size = xe_svm_range_size(range);
830 
831 	if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
832 		return false;
833 
834 	xe_assert(vm->xe, IS_DGFX(vm->xe));
835 
836 	if (preferred_region_is_vram && xe_svm_range_in_vram(range)) {
837 		drm_info(&vm->xe->drm, "Range is already in VRAM\n");
838 		return false;
839 	}
840 
841 	if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
842 		drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
843 		return false;
844 	}
845 
846 	return true;
847 }
848 
849 static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
850 				     struct xe_gt *gt, u64 fault_addr,
851 				     bool need_vram)
852 {
853 	struct drm_gpusvm_ctx ctx = {
854 		.read_only = xe_vma_read_only(vma),
855 		.devmem_possible = IS_DGFX(vm->xe) &&
856 			IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
857 		.check_pages_threshold = IS_DGFX(vm->xe) &&
858 			IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K : 0,
859 		.devmem_only = need_vram && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
860 		.timeslice_ms = need_vram && IS_DGFX(vm->xe) &&
861 			IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ?
862 			vm->xe->atomic_svm_timeslice_ms : 0,
863 	};
864 	struct xe_svm_range *range;
865 	struct dma_fence *fence;
866 	struct drm_pagemap *dpagemap;
867 	struct xe_tile *tile = gt_to_tile(gt);
868 	int migrate_try_count = ctx.devmem_only ? 3 : 1;
869 	ktime_t end = 0;
870 	int err;
871 
872 	lockdep_assert_held_write(&vm->lock);
873 	xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
874 
875 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
876 
877 retry:
878 	/* Always process UNMAPs first so view SVM ranges is current */
879 	err = xe_svm_garbage_collector(vm);
880 	if (err)
881 		return err;
882 
883 	range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
884 
885 	if (IS_ERR(range))
886 		return PTR_ERR(range);
887 
888 	if (ctx.devmem_only && !range->base.flags.migrate_devmem)
889 		return -EACCES;
890 
891 	if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
892 		return 0;
893 
894 	range_debug(range, "PAGE FAULT");
895 
896 	dpagemap = xe_vma_resolve_pagemap(vma, tile);
897 	if (--migrate_try_count >= 0 &&
898 	    xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
899 		/* TODO : For multi-device dpagemap will be used to find the
900 		 * remote tile and remote device. Will need to modify
901 		 * xe_svm_alloc_vram to use dpagemap for future multi-device
902 		 * support.
903 		 */
904 		err = xe_svm_alloc_vram(tile, range, &ctx);
905 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
906 		if (err) {
907 			if (migrate_try_count || !ctx.devmem_only) {
908 				drm_dbg(&vm->xe->drm,
909 					"VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
910 					vm->usm.asid, ERR_PTR(err));
911 				goto retry;
912 			} else {
913 				drm_err(&vm->xe->drm,
914 					"VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
915 					vm->usm.asid, ERR_PTR(err));
916 				return err;
917 			}
918 		}
919 	}
920 
921 	range_debug(range, "GET PAGES");
922 	err = xe_svm_range_get_pages(vm, range, &ctx);
923 	/* Corner where CPU mappings have changed */
924 	if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
925 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
926 		if (migrate_try_count > 0 || !ctx.devmem_only) {
927 			drm_dbg(&vm->xe->drm,
928 				"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
929 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
930 			range_debug(range, "PAGE FAULT - RETRY PAGES");
931 			goto retry;
932 		} else {
933 			drm_err(&vm->xe->drm,
934 				"Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
935 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
936 		}
937 	}
938 	if (err) {
939 		range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
940 		goto err_out;
941 	}
942 
943 	range_debug(range, "PAGE FAULT - BIND");
944 
945 retry_bind:
946 	xe_vm_lock(vm, false);
947 	fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
948 	if (IS_ERR(fence)) {
949 		xe_vm_unlock(vm);
950 		err = PTR_ERR(fence);
951 		if (err == -EAGAIN) {
952 			ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
953 			range_debug(range, "PAGE FAULT - RETRY BIND");
954 			goto retry;
955 		}
956 		if (xe_vm_validate_should_retry(NULL, err, &end))
957 			goto retry_bind;
958 		goto err_out;
959 	}
960 	xe_vm_unlock(vm);
961 
962 	dma_fence_wait(fence, false);
963 	dma_fence_put(fence);
964 
965 err_out:
966 
967 	return err;
968 }
969 
970 /**
971  * xe_svm_handle_pagefault() - SVM handle page fault
972  * @vm: The VM.
973  * @vma: The CPU address mirror VMA.
974  * @gt: The gt upon the fault occurred.
975  * @fault_addr: The GPU fault address.
976  * @atomic: The fault atomic access bit.
977  *
978  * Create GPU bindings for a SVM page fault. Optionally migrate to device
979  * memory.
980  *
981  * Return: 0 on success, negative error code on error.
982  */
983 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
984 			    struct xe_gt *gt, u64 fault_addr,
985 			    bool atomic)
986 {
987 	int need_vram, ret;
988 retry:
989 	need_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
990 	if (need_vram < 0)
991 		return need_vram;
992 
993 	ret =  __xe_svm_handle_pagefault(vm, vma, gt, fault_addr,
994 					 need_vram ? true : false);
995 	if (ret == -EAGAIN) {
996 		/*
997 		 * Retry once on -EAGAIN to re-lookup the VMA, as the original VMA
998 		 * may have been split by xe_svm_range_set_default_attr.
999 		 */
1000 		vma = xe_vm_find_vma_by_addr(vm, fault_addr);
1001 		if (!vma)
1002 			return -EINVAL;
1003 
1004 		goto retry;
1005 	}
1006 	return ret;
1007 }
1008 
1009 /**
1010  * xe_svm_has_mapping() - SVM has mappings
1011  * @vm: The VM.
1012  * @start: Start address.
1013  * @end: End address.
1014  *
1015  * Check if an address range has SVM mappings.
1016  *
1017  * Return: True if address range has a SVM mapping, False otherwise
1018  */
1019 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
1020 {
1021 	return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
1022 }
1023 
1024 /**
1025  * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
1026  * @vm: The VM
1027  * @start: start addr
1028  * @end: end addr
1029  *
1030  * This function UNMAPS svm ranges if start or end address are inside them.
1031  */
1032 void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
1033 {
1034 	struct drm_gpusvm_notifier *notifier, *next;
1035 
1036 	lockdep_assert_held_write(&vm->lock);
1037 
1038 	drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
1039 		struct drm_gpusvm_range *range, *__next;
1040 
1041 		drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
1042 			if (start > drm_gpusvm_range_start(range) ||
1043 			    end < drm_gpusvm_range_end(range)) {
1044 				if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
1045 					drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
1046 				drm_gpusvm_range_get(range);
1047 				__xe_svm_garbage_collector(vm, to_xe_range(range));
1048 				if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
1049 					spin_lock(&vm->svm.garbage_collector.lock);
1050 					list_del(&to_xe_range(range)->garbage_collector_link);
1051 					spin_unlock(&vm->svm.garbage_collector.lock);
1052 				}
1053 				drm_gpusvm_range_put(range);
1054 			}
1055 		}
1056 	}
1057 }
1058 
1059 /**
1060  * xe_svm_bo_evict() - SVM evict BO to system memory
1061  * @bo: BO to evict
1062  *
1063  * SVM evict BO to system memory. GPU SVM layer ensures all device pages
1064  * are evicted before returning.
1065  *
1066  * Return: 0 on success standard error code otherwise
1067  */
1068 int xe_svm_bo_evict(struct xe_bo *bo)
1069 {
1070 	return drm_pagemap_evict_to_ram(&bo->devmem_allocation);
1071 }
1072 
1073 /**
1074  * xe_svm_range_find_or_insert- Find or insert GPU SVM range
1075  * @vm: xe_vm pointer
1076  * @addr: address for which range needs to be found/inserted
1077  * @vma:  Pointer to struct xe_vma which mirrors CPU
1078  * @ctx: GPU SVM context
1079  *
1080  * This function finds or inserts a newly allocated a SVM range based on the
1081  * address.
1082  *
1083  * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
1084  */
1085 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
1086 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
1087 {
1088 	struct drm_gpusvm_range *r;
1089 
1090 	r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
1091 					    xe_vma_start(vma), xe_vma_end(vma), ctx);
1092 	if (IS_ERR(r))
1093 		return ERR_PTR(PTR_ERR(r));
1094 
1095 	return to_xe_range(r);
1096 }
1097 
1098 /**
1099  * xe_svm_range_get_pages() - Get pages for a SVM range
1100  * @vm: Pointer to the struct xe_vm
1101  * @range: Pointer to the xe SVM range structure
1102  * @ctx: GPU SVM context
1103  *
1104  * This function gets pages for a SVM range and ensures they are mapped for
1105  * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
1106  *
1107  * Return: 0 on success, negative error code on failure.
1108  */
1109 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
1110 			   struct drm_gpusvm_ctx *ctx)
1111 {
1112 	int err = 0;
1113 
1114 	err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
1115 	if (err == -EOPNOTSUPP) {
1116 		range_debug(range, "PAGE FAULT - EVICT PAGES");
1117 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
1118 	}
1119 
1120 	return err;
1121 }
1122 
1123 /**
1124  * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
1125  * @vm: Pointer to the xe_vm structure
1126  * @start: Start of the input range
1127  * @end: End of the input range
1128  *
1129  * This function removes the page table entries (PTEs) associated
1130  * with the svm ranges within the given input start and end
1131  *
1132  * Return: tile_mask for which gt's need to be tlb invalidated.
1133  */
1134 u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
1135 {
1136 	struct drm_gpusvm_notifier *notifier;
1137 	struct xe_svm_range *range;
1138 	u64 adj_start, adj_end;
1139 	struct xe_tile *tile;
1140 	u8 tile_mask = 0;
1141 	u8 id;
1142 
1143 	lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
1144 		       lockdep_is_held_type(&vm->lock, 0));
1145 
1146 	drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
1147 		struct drm_gpusvm_range *r = NULL;
1148 
1149 		adj_start = max(start, drm_gpusvm_notifier_start(notifier));
1150 		adj_end = min(end, drm_gpusvm_notifier_end(notifier));
1151 		drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
1152 			range = to_xe_range(r);
1153 			for_each_tile(tile, vm->xe, id) {
1154 				if (xe_pt_zap_ptes_range(tile, vm, range)) {
1155 					tile_mask |= BIT(id);
1156 					/*
1157 					 * WRITE_ONCE pairs with READ_ONCE in
1158 					 * xe_vm_has_valid_gpu_mapping().
1159 					 * Must not fail after setting
1160 					 * tile_invalidated and before
1161 					 * TLB invalidation.
1162 					 */
1163 					WRITE_ONCE(range->tile_invalidated,
1164 						   range->tile_invalidated | BIT(id));
1165 				}
1166 			}
1167 		}
1168 	}
1169 
1170 	return tile_mask;
1171 }
1172 
1173 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
1174 
1175 static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile)
1176 {
1177 	return &tile->mem.vram->dpagemap;
1178 }
1179 
1180 /**
1181  * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
1182  * @vma: Pointer to the xe_vma structure containing memory attributes
1183  * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
1184  *
1185  * This function determines the correct DRM pagemap to use for a given VMA.
1186  * It first checks if a valid devmem_fd is provided in the VMA's preferred
1187  * location. If the devmem_fd is negative, it returns NULL, indicating no
1188  * pagemap is available and smem to be used as preferred location.
1189  * If the devmem_fd is equal to the default faulting
1190  * GT identifier, it returns the VRAM pagemap associated with the tile.
1191  *
1192  * Future support for multi-device configurations may use drm_pagemap_from_fd()
1193  * to resolve pagemaps from arbitrary file descriptors.
1194  *
1195  * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
1196  */
1197 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1198 {
1199 	s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
1200 
1201 	if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
1202 		return NULL;
1203 
1204 	if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
1205 		return IS_DGFX(tile_to_xe(tile)) ? tile_local_pagemap(tile) : NULL;
1206 
1207 	/* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
1208 	return NULL;
1209 }
1210 
1211 /**
1212  * xe_svm_alloc_vram()- Allocate device memory pages for range,
1213  * migrating existing data.
1214  * @tile: tile to allocate vram from
1215  * @range: SVM range
1216  * @ctx: DRM GPU SVM context
1217  *
1218  * Return: 0 on success, error code on failure.
1219  */
1220 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
1221 		      const struct drm_gpusvm_ctx *ctx)
1222 {
1223 	struct drm_pagemap *dpagemap;
1224 
1225 	xe_assert(tile_to_xe(tile), range->base.flags.migrate_devmem);
1226 	range_debug(range, "ALLOCATE VRAM");
1227 
1228 	dpagemap = tile_local_pagemap(tile);
1229 	return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
1230 				       xe_svm_range_end(range),
1231 				       range->base.gpusvm->mm,
1232 				       ctx->timeslice_ms);
1233 }
1234 
1235 static struct drm_pagemap_addr
1236 xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
1237 			  struct device *dev,
1238 			  struct page *page,
1239 			  unsigned int order,
1240 			  enum dma_data_direction dir)
1241 {
1242 	struct device *pgmap_dev = dpagemap->dev;
1243 	enum drm_interconnect_protocol prot;
1244 	dma_addr_t addr;
1245 
1246 	if (pgmap_dev == dev) {
1247 		addr = xe_vram_region_page_to_dpa(page_to_vr(page), page);
1248 		prot = XE_INTERCONNECT_VRAM;
1249 	} else {
1250 		addr = DMA_MAPPING_ERROR;
1251 		prot = 0;
1252 	}
1253 
1254 	return drm_pagemap_addr_encode(addr, prot, order, dir);
1255 }
1256 
1257 static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
1258 	.device_map = xe_drm_pagemap_device_map,
1259 	.populate_mm = xe_drm_pagemap_populate_mm,
1260 };
1261 
1262 /**
1263  * xe_devm_add: Remap and provide memmap backing for device memory
1264  * @tile: tile that the memory region belongs to
1265  * @vr: vram memory region to remap
1266  *
1267  * This remap device memory to host physical address space and create
1268  * struct page to back device memory
1269  *
1270  * Return: 0 on success standard error code otherwise
1271  */
1272 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1273 {
1274 	struct xe_device *xe = tile_to_xe(tile);
1275 	struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
1276 	struct resource *res;
1277 	void *addr;
1278 	int ret;
1279 
1280 	res = devm_request_free_mem_region(dev, &iomem_resource,
1281 					   vr->usable_size);
1282 	if (IS_ERR(res)) {
1283 		ret = PTR_ERR(res);
1284 		return ret;
1285 	}
1286 
1287 	vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
1288 	vr->pagemap.range.start = res->start;
1289 	vr->pagemap.range.end = res->end;
1290 	vr->pagemap.nr_range = 1;
1291 	vr->pagemap.ops = drm_pagemap_pagemap_ops_get();
1292 	vr->pagemap.owner = xe_svm_devm_owner(xe);
1293 	addr = devm_memremap_pages(dev, &vr->pagemap);
1294 
1295 	vr->dpagemap.dev = dev;
1296 	vr->dpagemap.ops = &xe_drm_pagemap_ops;
1297 
1298 	if (IS_ERR(addr)) {
1299 		devm_release_mem_region(dev, res->start, resource_size(res));
1300 		ret = PTR_ERR(addr);
1301 		drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
1302 			tile->id, ERR_PTR(ret));
1303 		return ret;
1304 	}
1305 	vr->hpa_base = res->start;
1306 
1307 	drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
1308 		tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
1309 	return 0;
1310 }
1311 #else
1312 int xe_svm_alloc_vram(struct xe_tile *tile,
1313 		      struct xe_svm_range *range,
1314 		      const struct drm_gpusvm_ctx *ctx)
1315 {
1316 	return -EOPNOTSUPP;
1317 }
1318 
1319 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1320 {
1321 	return 0;
1322 }
1323 
1324 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1325 {
1326 	return NULL;
1327 }
1328 #endif
1329 
1330 /**
1331  * xe_svm_flush() - SVM flush
1332  * @vm: The VM.
1333  *
1334  * Flush all SVM actions.
1335  */
1336 void xe_svm_flush(struct xe_vm *vm)
1337 {
1338 	if (xe_vm_in_fault_mode(vm))
1339 		flush_work(&vm->svm.garbage_collector.work);
1340 }
1341