xref: /linux/drivers/gpu/drm/xe/xe_svm.c (revision 9fd2da71c301184d98fe37674ca8d017d1ce6600)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <drm/drm_drv.h>
7 
8 #include "xe_bo.h"
9 #include "xe_gt_stats.h"
10 #include "xe_migrate.h"
11 #include "xe_module.h"
12 #include "xe_pm.h"
13 #include "xe_pt.h"
14 #include "xe_svm.h"
15 #include "xe_tile.h"
16 #include "xe_ttm_vram_mgr.h"
17 #include "xe_vm.h"
18 #include "xe_vm_types.h"
19 #include "xe_vram_types.h"
20 
21 static bool xe_svm_range_in_vram(struct xe_svm_range *range)
22 {
23 	/*
24 	 * Advisory only check whether the range is currently backed by VRAM
25 	 * memory.
26 	 */
27 
28 	struct drm_gpusvm_range_flags flags = {
29 		/* Pairs with WRITE_ONCE in drm_gpusvm.c */
30 		.__flags = READ_ONCE(range->base.flags.__flags),
31 	};
32 
33 	return flags.has_devmem_pages;
34 }
35 
36 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
37 {
38 	/* Not reliable without notifier lock */
39 	return xe_svm_range_in_vram(range) && range->tile_present;
40 }
41 
42 static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
43 {
44 	return container_of(gpusvm, struct xe_vm, svm.gpusvm);
45 }
46 
47 static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
48 {
49 	return gpusvm_to_vm(r->gpusvm);
50 }
51 
52 #define range_debug(r__, operaton__)					\
53 	vm_dbg(&range_to_vm(&(r__)->base)->xe->drm,			\
54 	       "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
55 	       "start=0x%014lx, end=0x%014lx, size=%lu",		\
56 	       (operaton__), range_to_vm(&(r__)->base)->usm.asid,	\
57 	       (r__)->base.gpusvm,					\
58 	       xe_svm_range_in_vram((r__)) ? 1 : 0,			\
59 	       xe_svm_range_has_vram_binding((r__)) ? 1 : 0,		\
60 	       (r__)->base.notifier_seq,				\
61 	       xe_svm_range_start((r__)), xe_svm_range_end((r__)),	\
62 	       xe_svm_range_size((r__)))
63 
64 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
65 {
66 	range_debug(range, operation);
67 }
68 
69 static void *xe_svm_devm_owner(struct xe_device *xe)
70 {
71 	return xe;
72 }
73 
74 static struct drm_gpusvm_range *
75 xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
76 {
77 	struct xe_svm_range *range;
78 
79 	range = kzalloc(sizeof(*range), GFP_KERNEL);
80 	if (!range)
81 		return NULL;
82 
83 	INIT_LIST_HEAD(&range->garbage_collector_link);
84 	xe_vm_get(gpusvm_to_vm(gpusvm));
85 
86 	return &range->base;
87 }
88 
89 static void xe_svm_range_free(struct drm_gpusvm_range *range)
90 {
91 	xe_vm_put(range_to_vm(range));
92 	kfree(range);
93 }
94 
95 static void
96 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
97 				   const struct mmu_notifier_range *mmu_range)
98 {
99 	struct xe_device *xe = vm->xe;
100 
101 	range_debug(range, "GARBAGE COLLECTOR ADD");
102 
103 	drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
104 
105 	spin_lock(&vm->svm.garbage_collector.lock);
106 	if (list_empty(&range->garbage_collector_link))
107 		list_add_tail(&range->garbage_collector_link,
108 			      &vm->svm.garbage_collector.range_list);
109 	spin_unlock(&vm->svm.garbage_collector.lock);
110 
111 	queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq,
112 		   &vm->svm.garbage_collector.work);
113 }
114 
115 static u8
116 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
117 				  const struct mmu_notifier_range *mmu_range,
118 				  u64 *adj_start, u64 *adj_end)
119 {
120 	struct xe_svm_range *range = to_xe_range(r);
121 	struct xe_device *xe = vm->xe;
122 	struct xe_tile *tile;
123 	u8 tile_mask = 0;
124 	u8 id;
125 
126 	xe_svm_assert_in_notifier(vm);
127 
128 	range_debug(range, "NOTIFIER");
129 
130 	/* Skip if already unmapped or if no binding exist */
131 	if (range->base.flags.unmapped || !range->tile_present)
132 		return 0;
133 
134 	range_debug(range, "NOTIFIER - EXECUTE");
135 
136 	/* Adjust invalidation to range boundaries */
137 	*adj_start = min(xe_svm_range_start(range), mmu_range->start);
138 	*adj_end = max(xe_svm_range_end(range), mmu_range->end);
139 
140 	/*
141 	 * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the
142 	 * invalidation code can't correctly cope with sparse ranges or
143 	 * invalidations spanning multiple ranges.
144 	 */
145 	for_each_tile(tile, xe, id)
146 		if (xe_pt_zap_ptes_range(tile, vm, range)) {
147 			tile_mask |= BIT(id);
148 			/*
149 			 * WRITE_ONCE pairs with READ_ONCE in
150 			 * xe_vm_has_valid_gpu_mapping()
151 			 */
152 			WRITE_ONCE(range->tile_invalidated,
153 				   range->tile_invalidated | BIT(id));
154 		}
155 
156 	return tile_mask;
157 }
158 
159 static void
160 xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
161 				const struct mmu_notifier_range *mmu_range)
162 {
163 	struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
164 
165 	xe_svm_assert_in_notifier(vm);
166 
167 	drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
168 	if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
169 		xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
170 						   mmu_range);
171 }
172 
173 static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
174 			      struct drm_gpusvm_notifier *notifier,
175 			      const struct mmu_notifier_range *mmu_range)
176 {
177 	struct xe_vm *vm = gpusvm_to_vm(gpusvm);
178 	struct xe_device *xe = vm->xe;
179 	struct drm_gpusvm_range *r, *first;
180 	u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
181 	u8 tile_mask = 0;
182 	long err;
183 
184 	xe_svm_assert_in_notifier(vm);
185 
186 	vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
187 	       "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
188 	       vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
189 	       mmu_range->start, mmu_range->end, mmu_range->event);
190 
191 	/* Adjust invalidation to notifier boundaries */
192 	adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
193 	adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
194 
195 	first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
196 	if (!first)
197 		return;
198 
199 	/*
200 	 * PTs may be getting destroyed so not safe to touch these but PT should
201 	 * be invalidated at this point in time. Regardless we still need to
202 	 * ensure any dma mappings are unmapped in the here.
203 	 */
204 	if (xe_vm_is_closed(vm))
205 		goto range_notifier_event_end;
206 
207 	/*
208 	 * XXX: Less than ideal to always wait on VM's resv slots if an
209 	 * invalidation is not required. Could walk range list twice to figure
210 	 * out if an invalidations is need, but also not ideal.
211 	 */
212 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
213 				    DMA_RESV_USAGE_BOOKKEEP,
214 				    false, MAX_SCHEDULE_TIMEOUT);
215 	XE_WARN_ON(err <= 0);
216 
217 	r = first;
218 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
219 		tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range,
220 							       &adj_start,
221 							       &adj_end);
222 	if (!tile_mask)
223 		goto range_notifier_event_end;
224 
225 	xe_device_wmb(xe);
226 
227 	err = xe_vm_range_tilemask_tlb_inval(vm, adj_start, adj_end, tile_mask);
228 	WARN_ON_ONCE(err);
229 
230 range_notifier_event_end:
231 	r = first;
232 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
233 		xe_svm_range_notifier_event_end(vm, r, mmu_range);
234 }
235 
236 static int __xe_svm_garbage_collector(struct xe_vm *vm,
237 				      struct xe_svm_range *range)
238 {
239 	struct dma_fence *fence;
240 
241 	range_debug(range, "GARBAGE COLLECTOR");
242 
243 	xe_vm_lock(vm, false);
244 	fence = xe_vm_range_unbind(vm, range);
245 	xe_vm_unlock(vm);
246 	if (IS_ERR(fence))
247 		return PTR_ERR(fence);
248 	dma_fence_put(fence);
249 
250 	drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
251 
252 	return 0;
253 }
254 
255 static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 range_end)
256 {
257 	struct xe_vma *vma;
258 	struct xe_vma_mem_attr default_attr = {
259 		.preferred_loc = {
260 			.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
261 			.migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
262 		},
263 		.atomic_access = DRM_XE_ATOMIC_UNDEFINED,
264 	};
265 	int err = 0;
266 
267 	vma = xe_vm_find_vma_by_addr(vm, range_start);
268 	if (!vma)
269 		return -EINVAL;
270 
271 	if (xe_vma_has_default_mem_attrs(vma))
272 		return 0;
273 
274 	vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx",
275 	       xe_vma_start(vma), xe_vma_end(vma));
276 
277 	if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
278 		default_attr.pat_index = vma->attr.default_pat_index;
279 		default_attr.default_pat_index  = vma->attr.default_pat_index;
280 		vma->attr = default_attr;
281 	} else {
282 		vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx",
283 		       range_start, range_end);
284 		err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start);
285 		if (err) {
286 			drm_warn(&vm->xe->drm, "VMA SPLIT failed: %pe\n", ERR_PTR(err));
287 			xe_vm_kill(vm, true);
288 			return err;
289 		}
290 	}
291 
292 	/*
293 	 * On call from xe_svm_handle_pagefault original VMA might be changed
294 	 * signal this to lookup for VMA again.
295 	 */
296 	return -EAGAIN;
297 }
298 
299 static int xe_svm_garbage_collector(struct xe_vm *vm)
300 {
301 	struct xe_svm_range *range;
302 	u64 range_start;
303 	u64 range_end;
304 	int err, ret = 0;
305 
306 	lockdep_assert_held_write(&vm->lock);
307 
308 	if (xe_vm_is_closed_or_banned(vm))
309 		return -ENOENT;
310 
311 	spin_lock(&vm->svm.garbage_collector.lock);
312 	for (;;) {
313 		range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
314 						 typeof(*range),
315 						 garbage_collector_link);
316 		if (!range)
317 			break;
318 
319 		range_start = xe_svm_range_start(range);
320 		range_end = xe_svm_range_end(range);
321 
322 		list_del(&range->garbage_collector_link);
323 		spin_unlock(&vm->svm.garbage_collector.lock);
324 
325 		err = __xe_svm_garbage_collector(vm, range);
326 		if (err) {
327 			drm_warn(&vm->xe->drm,
328 				 "Garbage collection failed: %pe\n",
329 				 ERR_PTR(err));
330 			xe_vm_kill(vm, true);
331 			return err;
332 		}
333 
334 		err = xe_svm_range_set_default_attr(vm, range_start, range_end);
335 		if (err) {
336 			if (err == -EAGAIN)
337 				ret = -EAGAIN;
338 			else
339 				return err;
340 		}
341 
342 		spin_lock(&vm->svm.garbage_collector.lock);
343 	}
344 	spin_unlock(&vm->svm.garbage_collector.lock);
345 
346 	return ret;
347 }
348 
349 static void xe_svm_garbage_collector_work_func(struct work_struct *w)
350 {
351 	struct xe_vm *vm = container_of(w, struct xe_vm,
352 					svm.garbage_collector.work);
353 
354 	down_write(&vm->lock);
355 	xe_svm_garbage_collector(vm);
356 	up_write(&vm->lock);
357 }
358 
359 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
360 
361 static struct xe_vram_region *page_to_vr(struct page *page)
362 {
363 	return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
364 }
365 
366 static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
367 				      struct page *page)
368 {
369 	u64 dpa;
370 	u64 pfn = page_to_pfn(page);
371 	u64 offset;
372 
373 	xe_assert(vr->xe, is_device_private_page(page));
374 	xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base);
375 
376 	offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
377 	dpa = vr->dpa_base + offset;
378 
379 	return dpa;
380 }
381 
382 enum xe_svm_copy_dir {
383 	XE_SVM_COPY_TO_VRAM,
384 	XE_SVM_COPY_TO_SRAM,
385 };
386 
387 static int xe_svm_copy(struct page **pages,
388 		       struct drm_pagemap_addr *pagemap_addr,
389 		       unsigned long npages, const enum xe_svm_copy_dir dir)
390 {
391 	struct xe_vram_region *vr = NULL;
392 	struct xe_device *xe;
393 	struct dma_fence *fence = NULL;
394 	unsigned long i;
395 #define XE_VRAM_ADDR_INVALID	~0x0ull
396 	u64 vram_addr = XE_VRAM_ADDR_INVALID;
397 	int err = 0, pos = 0;
398 	bool sram = dir == XE_SVM_COPY_TO_SRAM;
399 
400 	/*
401 	 * This flow is complex: it locates physically contiguous device pages,
402 	 * derives the starting physical address, and performs a single GPU copy
403 	 * to for every 8M chunk in a DMA address array. Both device pages and
404 	 * DMA addresses may be sparsely populated. If either is NULL, a copy is
405 	 * triggered based on the current search state. The last GPU copy is
406 	 * waited on to ensure all copies are complete.
407 	 */
408 
409 	for (i = 0; i < npages; ++i) {
410 		struct page *spage = pages[i];
411 		struct dma_fence *__fence;
412 		u64 __vram_addr;
413 		bool match = false, chunk, last;
414 
415 #define XE_MIGRATE_CHUNK_SIZE	SZ_8M
416 		chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
417 		last = (i + 1) == npages;
418 
419 		/* No CPU page and no device pages queue'd to copy */
420 		if (!pagemap_addr[i].addr && vram_addr == XE_VRAM_ADDR_INVALID)
421 			continue;
422 
423 		if (!vr && spage) {
424 			vr = page_to_vr(spage);
425 			xe = vr->xe;
426 		}
427 		XE_WARN_ON(spage && page_to_vr(spage) != vr);
428 
429 		/*
430 		 * CPU page and device page valid, capture physical address on
431 		 * first device page, check if physical contiguous on subsequent
432 		 * device pages.
433 		 */
434 		if (pagemap_addr[i].addr && spage) {
435 			__vram_addr = xe_vram_region_page_to_dpa(vr, spage);
436 			if (vram_addr == XE_VRAM_ADDR_INVALID) {
437 				vram_addr = __vram_addr;
438 				pos = i;
439 			}
440 
441 			match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
442 			/* Expected with contiguous memory */
443 			xe_assert(vr->xe, match);
444 
445 			if (pagemap_addr[i].order) {
446 				i += NR_PAGES(pagemap_addr[i].order) - 1;
447 				chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
448 				last = (i + 1) == npages;
449 			}
450 		}
451 
452 		/*
453 		 * Mismatched physical address, 8M copy chunk, or last page -
454 		 * trigger a copy.
455 		 */
456 		if (!match || chunk || last) {
457 			/*
458 			 * Extra page for first copy if last page and matching
459 			 * physical address.
460 			 */
461 			int incr = (match && last) ? 1 : 0;
462 
463 			if (vram_addr != XE_VRAM_ADDR_INVALID) {
464 				if (sram) {
465 					vm_dbg(&xe->drm,
466 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
467 					       vram_addr,
468 					       (u64)pagemap_addr[pos].addr, i - pos + incr);
469 					__fence = xe_migrate_from_vram(vr->migrate,
470 								       i - pos + incr,
471 								       vram_addr,
472 								       &pagemap_addr[pos]);
473 				} else {
474 					vm_dbg(&xe->drm,
475 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
476 					       (u64)pagemap_addr[pos].addr, vram_addr,
477 					       i - pos + incr);
478 					__fence = xe_migrate_to_vram(vr->migrate,
479 								     i - pos + incr,
480 								     &pagemap_addr[pos],
481 								     vram_addr);
482 				}
483 				if (IS_ERR(__fence)) {
484 					err = PTR_ERR(__fence);
485 					goto err_out;
486 				}
487 
488 				dma_fence_put(fence);
489 				fence = __fence;
490 			}
491 
492 			/* Setup physical address of next device page */
493 			if (pagemap_addr[i].addr && spage) {
494 				vram_addr = __vram_addr;
495 				pos = i;
496 			} else {
497 				vram_addr = XE_VRAM_ADDR_INVALID;
498 			}
499 
500 			/* Extra mismatched device page, copy it */
501 			if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
502 				if (sram) {
503 					vm_dbg(&xe->drm,
504 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
505 					       vram_addr, (u64)pagemap_addr[pos].addr, 1);
506 					__fence = xe_migrate_from_vram(vr->migrate, 1,
507 								       vram_addr,
508 								       &pagemap_addr[pos]);
509 				} else {
510 					vm_dbg(&xe->drm,
511 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
512 					       (u64)pagemap_addr[pos].addr, vram_addr, 1);
513 					__fence = xe_migrate_to_vram(vr->migrate, 1,
514 								     &pagemap_addr[pos],
515 								     vram_addr);
516 				}
517 				if (IS_ERR(__fence)) {
518 					err = PTR_ERR(__fence);
519 					goto err_out;
520 				}
521 
522 				dma_fence_put(fence);
523 				fence = __fence;
524 			}
525 		}
526 	}
527 
528 err_out:
529 	/* Wait for all copies to complete */
530 	if (fence) {
531 		dma_fence_wait(fence, false);
532 		dma_fence_put(fence);
533 	}
534 
535 	return err;
536 #undef XE_MIGRATE_CHUNK_SIZE
537 #undef XE_VRAM_ADDR_INVALID
538 }
539 
540 static int xe_svm_copy_to_devmem(struct page **pages,
541 				 struct drm_pagemap_addr *pagemap_addr,
542 				 unsigned long npages)
543 {
544 	return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
545 }
546 
547 static int xe_svm_copy_to_ram(struct page **pages,
548 			      struct drm_pagemap_addr *pagemap_addr,
549 			      unsigned long npages)
550 {
551 	return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
552 }
553 
554 static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
555 {
556 	return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
557 }
558 
559 static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
560 {
561 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
562 	struct xe_device *xe = xe_bo_device(bo);
563 
564 	xe_bo_put_async(bo);
565 	xe_pm_runtime_put(xe);
566 }
567 
568 static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
569 {
570 	return PHYS_PFN(offset + vr->hpa_base);
571 }
572 
573 static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
574 {
575 	return &vram->ttm.mm;
576 }
577 
578 static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
579 				      unsigned long npages, unsigned long *pfn)
580 {
581 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
582 	struct ttm_resource *res = bo->ttm.resource;
583 	struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks;
584 	struct drm_buddy_block *block;
585 	int j = 0;
586 
587 	list_for_each_entry(block, blocks, link) {
588 		struct xe_vram_region *vr = block->private;
589 		struct drm_buddy *buddy = vram_to_buddy(vr);
590 		u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
591 		int i;
592 
593 		for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
594 			pfn[j++] = block_pfn + i;
595 	}
596 
597 	return 0;
598 }
599 
600 static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
601 	.devmem_release = xe_svm_devmem_release,
602 	.populate_devmem_pfn = xe_svm_populate_devmem_pfn,
603 	.copy_to_devmem = xe_svm_copy_to_devmem,
604 	.copy_to_ram = xe_svm_copy_to_ram,
605 };
606 
607 #endif
608 
609 static const struct drm_gpusvm_ops gpusvm_ops = {
610 	.range_alloc = xe_svm_range_alloc,
611 	.range_free = xe_svm_range_free,
612 	.invalidate = xe_svm_invalidate,
613 };
614 
615 static const unsigned long fault_chunk_sizes[] = {
616 	SZ_2M,
617 	SZ_64K,
618 	SZ_4K,
619 };
620 
621 /**
622  * xe_svm_init() - SVM initialize
623  * @vm: The VM.
624  *
625  * Initialize SVM state which is embedded within the VM.
626  *
627  * Return: 0 on success, negative error code on error.
628  */
629 int xe_svm_init(struct xe_vm *vm)
630 {
631 	int err;
632 
633 	spin_lock_init(&vm->svm.garbage_collector.lock);
634 	INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
635 	INIT_WORK(&vm->svm.garbage_collector.work,
636 		  xe_svm_garbage_collector_work_func);
637 
638 	err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
639 			      current->mm, xe_svm_devm_owner(vm->xe), 0,
640 			      vm->size, xe_modparam.svm_notifier_size * SZ_1M,
641 			      &gpusvm_ops, fault_chunk_sizes,
642 			      ARRAY_SIZE(fault_chunk_sizes));
643 	if (err)
644 		return err;
645 
646 	drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
647 
648 	return 0;
649 }
650 
651 /**
652  * xe_svm_close() - SVM close
653  * @vm: The VM.
654  *
655  * Close SVM state (i.e., stop and flush all SVM actions).
656  */
657 void xe_svm_close(struct xe_vm *vm)
658 {
659 	xe_assert(vm->xe, xe_vm_is_closed(vm));
660 	flush_work(&vm->svm.garbage_collector.work);
661 }
662 
663 /**
664  * xe_svm_fini() - SVM finalize
665  * @vm: The VM.
666  *
667  * Finalize SVM state which is embedded within the VM.
668  */
669 void xe_svm_fini(struct xe_vm *vm)
670 {
671 	xe_assert(vm->xe, xe_vm_is_closed(vm));
672 
673 	drm_gpusvm_fini(&vm->svm.gpusvm);
674 }
675 
676 static bool xe_svm_range_is_valid(struct xe_svm_range *range,
677 				  struct xe_tile *tile,
678 				  bool devmem_only)
679 {
680 	return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
681 					    range->tile_invalidated) &&
682 		(!devmem_only || xe_svm_range_in_vram(range)));
683 }
684 
685 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
686  * @vm: xe_vm pointer
687  * @range: Pointer to the SVM range structure
688  *
689  * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
690  * and migrates them to SMEM
691  */
692 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
693 {
694 	if (xe_svm_range_in_vram(range))
695 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
696 }
697 
698 /**
699  * xe_svm_range_validate() - Check if the SVM range is valid
700  * @vm: xe_vm pointer
701  * @range: Pointer to the SVM range structure
702  * @tile_mask: Mask representing the tiles to be checked
703  * @devmem_preferred : if true range needs to be in devmem
704  *
705  * The xe_svm_range_validate() function checks if a range is
706  * valid and located in the desired memory region.
707  *
708  * Return: true if the range is valid, false otherwise
709  */
710 bool xe_svm_range_validate(struct xe_vm *vm,
711 			   struct xe_svm_range *range,
712 			   u8 tile_mask, bool devmem_preferred)
713 {
714 	bool ret;
715 
716 	xe_svm_notifier_lock(vm);
717 
718 	ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
719 	       (devmem_preferred == range->base.flags.has_devmem_pages);
720 
721 	xe_svm_notifier_unlock(vm);
722 
723 	return ret;
724 }
725 
726 /**
727  * xe_svm_find_vma_start - Find start of CPU VMA
728  * @vm: xe_vm pointer
729  * @start: start address
730  * @end: end address
731  * @vma: Pointer to struct xe_vma
732  *
733  *
734  * This function searches for a cpu vma, within the specified
735  * range [start, end] in the given VM. It adjusts the range based on the
736  * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
737  *
738  * Return: The starting address of the VMA within the range,
739  * or ULONG_MAX if no VMA is found
740  */
741 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
742 {
743 	return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
744 					 max(start, xe_vma_start(vma)),
745 					 min(end, xe_vma_end(vma)));
746 }
747 
748 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
749 static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
750 				      unsigned long start, unsigned long end,
751 				      struct mm_struct *mm,
752 				      unsigned long timeslice_ms)
753 {
754 	struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
755 	struct xe_device *xe = vr->xe;
756 	struct device *dev = xe->drm.dev;
757 	struct drm_buddy_block *block;
758 	struct list_head *blocks;
759 	struct xe_bo *bo;
760 	ktime_t time_end = 0;
761 	int err, idx;
762 
763 	if (!drm_dev_enter(&xe->drm, &idx))
764 		return -ENODEV;
765 
766 	xe_pm_runtime_get(xe);
767 
768  retry:
769 	bo = xe_bo_create_locked(vr->xe, NULL, NULL, end - start,
770 				 ttm_bo_type_device,
771 				 (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) |
772 				 XE_BO_FLAG_CPU_ADDR_MIRROR);
773 	if (IS_ERR(bo)) {
774 		err = PTR_ERR(bo);
775 		if (xe_vm_validate_should_retry(NULL, err, &time_end))
776 			goto retry;
777 		goto out_pm_put;
778 	}
779 
780 	drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
781 				&dpagemap_devmem_ops, dpagemap, end - start);
782 
783 	blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
784 	list_for_each_entry(block, blocks, link)
785 		block->private = vr;
786 
787 	xe_bo_get(bo);
788 
789 	/* Ensure the device has a pm ref while there are device pages active. */
790 	xe_pm_runtime_get_noresume(xe);
791 	err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
792 					    start, end, timeslice_ms,
793 					    xe_svm_devm_owner(xe));
794 	if (err)
795 		xe_svm_devmem_release(&bo->devmem_allocation);
796 
797 	xe_bo_unlock(bo);
798 	xe_bo_put(bo);
799 
800 out_pm_put:
801 	xe_pm_runtime_put(xe);
802 	drm_dev_exit(idx);
803 
804 	return err;
805 }
806 #endif
807 
808 static bool supports_4K_migration(struct xe_device *xe)
809 {
810 	if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
811 		return false;
812 
813 	return true;
814 }
815 
816 /**
817  * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
818  * @range: SVM range for which migration needs to be decided
819  * @vma: vma which has range
820  * @preferred_region_is_vram: preferred region for range is vram
821  *
822  * Return: True for range needing migration and migration is supported else false
823  */
824 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
825 					bool preferred_region_is_vram)
826 {
827 	struct xe_vm *vm = range_to_vm(&range->base);
828 	u64 range_size = xe_svm_range_size(range);
829 
830 	if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
831 		return false;
832 
833 	xe_assert(vm->xe, IS_DGFX(vm->xe));
834 
835 	if (preferred_region_is_vram && xe_svm_range_in_vram(range)) {
836 		drm_info(&vm->xe->drm, "Range is already in VRAM\n");
837 		return false;
838 	}
839 
840 	if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
841 		drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
842 		return false;
843 	}
844 
845 	return true;
846 }
847 
848 static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
849 				     struct xe_gt *gt, u64 fault_addr,
850 				     bool need_vram)
851 {
852 	struct drm_gpusvm_ctx ctx = {
853 		.read_only = xe_vma_read_only(vma),
854 		.devmem_possible = IS_DGFX(vm->xe) &&
855 			IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
856 		.check_pages_threshold = IS_DGFX(vm->xe) &&
857 			IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K : 0,
858 		.devmem_only = need_vram && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
859 		.timeslice_ms = need_vram && IS_DGFX(vm->xe) &&
860 			IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ?
861 			vm->xe->atomic_svm_timeslice_ms : 0,
862 	};
863 	struct xe_svm_range *range;
864 	struct dma_fence *fence;
865 	struct drm_pagemap *dpagemap;
866 	struct xe_tile *tile = gt_to_tile(gt);
867 	int migrate_try_count = ctx.devmem_only ? 3 : 1;
868 	ktime_t end = 0;
869 	int err;
870 
871 	lockdep_assert_held_write(&vm->lock);
872 	xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
873 
874 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
875 
876 retry:
877 	/* Always process UNMAPs first so view SVM ranges is current */
878 	err = xe_svm_garbage_collector(vm);
879 	if (err)
880 		return err;
881 
882 	range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
883 
884 	if (IS_ERR(range))
885 		return PTR_ERR(range);
886 
887 	if (ctx.devmem_only && !range->base.flags.migrate_devmem)
888 		return -EACCES;
889 
890 	if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
891 		return 0;
892 
893 	range_debug(range, "PAGE FAULT");
894 
895 	dpagemap = xe_vma_resolve_pagemap(vma, tile);
896 	if (--migrate_try_count >= 0 &&
897 	    xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
898 		/* TODO : For multi-device dpagemap will be used to find the
899 		 * remote tile and remote device. Will need to modify
900 		 * xe_svm_alloc_vram to use dpagemap for future multi-device
901 		 * support.
902 		 */
903 		err = xe_svm_alloc_vram(tile, range, &ctx);
904 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
905 		if (err) {
906 			if (migrate_try_count || !ctx.devmem_only) {
907 				drm_dbg(&vm->xe->drm,
908 					"VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
909 					vm->usm.asid, ERR_PTR(err));
910 				goto retry;
911 			} else {
912 				drm_err(&vm->xe->drm,
913 					"VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
914 					vm->usm.asid, ERR_PTR(err));
915 				return err;
916 			}
917 		}
918 	}
919 
920 	range_debug(range, "GET PAGES");
921 	err = xe_svm_range_get_pages(vm, range, &ctx);
922 	/* Corner where CPU mappings have changed */
923 	if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
924 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
925 		if (migrate_try_count > 0 || !ctx.devmem_only) {
926 			drm_dbg(&vm->xe->drm,
927 				"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
928 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
929 			range_debug(range, "PAGE FAULT - RETRY PAGES");
930 			goto retry;
931 		} else {
932 			drm_err(&vm->xe->drm,
933 				"Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
934 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
935 		}
936 	}
937 	if (err) {
938 		range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
939 		goto err_out;
940 	}
941 
942 	range_debug(range, "PAGE FAULT - BIND");
943 
944 retry_bind:
945 	xe_vm_lock(vm, false);
946 	fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
947 	if (IS_ERR(fence)) {
948 		xe_vm_unlock(vm);
949 		err = PTR_ERR(fence);
950 		if (err == -EAGAIN) {
951 			ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
952 			range_debug(range, "PAGE FAULT - RETRY BIND");
953 			goto retry;
954 		}
955 		if (xe_vm_validate_should_retry(NULL, err, &end))
956 			goto retry_bind;
957 		goto err_out;
958 	}
959 	xe_vm_unlock(vm);
960 
961 	dma_fence_wait(fence, false);
962 	dma_fence_put(fence);
963 
964 err_out:
965 
966 	return err;
967 }
968 
969 /**
970  * xe_svm_handle_pagefault() - SVM handle page fault
971  * @vm: The VM.
972  * @vma: The CPU address mirror VMA.
973  * @gt: The gt upon the fault occurred.
974  * @fault_addr: The GPU fault address.
975  * @atomic: The fault atomic access bit.
976  *
977  * Create GPU bindings for a SVM page fault. Optionally migrate to device
978  * memory.
979  *
980  * Return: 0 on success, negative error code on error.
981  */
982 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
983 			    struct xe_gt *gt, u64 fault_addr,
984 			    bool atomic)
985 {
986 	int need_vram, ret;
987 retry:
988 	need_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
989 	if (need_vram < 0)
990 		return need_vram;
991 
992 	ret =  __xe_svm_handle_pagefault(vm, vma, gt, fault_addr,
993 					 need_vram ? true : false);
994 	if (ret == -EAGAIN) {
995 		/*
996 		 * Retry once on -EAGAIN to re-lookup the VMA, as the original VMA
997 		 * may have been split by xe_svm_range_set_default_attr.
998 		 */
999 		vma = xe_vm_find_vma_by_addr(vm, fault_addr);
1000 		if (!vma)
1001 			return -EINVAL;
1002 
1003 		goto retry;
1004 	}
1005 	return ret;
1006 }
1007 
1008 /**
1009  * xe_svm_has_mapping() - SVM has mappings
1010  * @vm: The VM.
1011  * @start: Start address.
1012  * @end: End address.
1013  *
1014  * Check if an address range has SVM mappings.
1015  *
1016  * Return: True if address range has a SVM mapping, False otherwise
1017  */
1018 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
1019 {
1020 	return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
1021 }
1022 
1023 /**
1024  * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
1025  * @vm: The VM
1026  * @start: start addr
1027  * @end: end addr
1028  *
1029  * This function UNMAPS svm ranges if start or end address are inside them.
1030  */
1031 void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
1032 {
1033 	struct drm_gpusvm_notifier *notifier, *next;
1034 
1035 	lockdep_assert_held_write(&vm->lock);
1036 
1037 	drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
1038 		struct drm_gpusvm_range *range, *__next;
1039 
1040 		drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
1041 			if (start > drm_gpusvm_range_start(range) ||
1042 			    end < drm_gpusvm_range_end(range)) {
1043 				if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
1044 					drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
1045 				drm_gpusvm_range_get(range);
1046 				__xe_svm_garbage_collector(vm, to_xe_range(range));
1047 				if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
1048 					spin_lock(&vm->svm.garbage_collector.lock);
1049 					list_del(&to_xe_range(range)->garbage_collector_link);
1050 					spin_unlock(&vm->svm.garbage_collector.lock);
1051 				}
1052 				drm_gpusvm_range_put(range);
1053 			}
1054 		}
1055 	}
1056 }
1057 
1058 /**
1059  * xe_svm_bo_evict() - SVM evict BO to system memory
1060  * @bo: BO to evict
1061  *
1062  * SVM evict BO to system memory. GPU SVM layer ensures all device pages
1063  * are evicted before returning.
1064  *
1065  * Return: 0 on success standard error code otherwise
1066  */
1067 int xe_svm_bo_evict(struct xe_bo *bo)
1068 {
1069 	return drm_pagemap_evict_to_ram(&bo->devmem_allocation);
1070 }
1071 
1072 /**
1073  * xe_svm_range_find_or_insert- Find or insert GPU SVM range
1074  * @vm: xe_vm pointer
1075  * @addr: address for which range needs to be found/inserted
1076  * @vma:  Pointer to struct xe_vma which mirrors CPU
1077  * @ctx: GPU SVM context
1078  *
1079  * This function finds or inserts a newly allocated a SVM range based on the
1080  * address.
1081  *
1082  * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
1083  */
1084 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
1085 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
1086 {
1087 	struct drm_gpusvm_range *r;
1088 
1089 	r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
1090 					    xe_vma_start(vma), xe_vma_end(vma), ctx);
1091 	if (IS_ERR(r))
1092 		return ERR_PTR(PTR_ERR(r));
1093 
1094 	return to_xe_range(r);
1095 }
1096 
1097 /**
1098  * xe_svm_range_get_pages() - Get pages for a SVM range
1099  * @vm: Pointer to the struct xe_vm
1100  * @range: Pointer to the xe SVM range structure
1101  * @ctx: GPU SVM context
1102  *
1103  * This function gets pages for a SVM range and ensures they are mapped for
1104  * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
1105  *
1106  * Return: 0 on success, negative error code on failure.
1107  */
1108 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
1109 			   struct drm_gpusvm_ctx *ctx)
1110 {
1111 	int err = 0;
1112 
1113 	err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
1114 	if (err == -EOPNOTSUPP) {
1115 		range_debug(range, "PAGE FAULT - EVICT PAGES");
1116 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
1117 	}
1118 
1119 	return err;
1120 }
1121 
1122 /**
1123  * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
1124  * @vm: Pointer to the xe_vm structure
1125  * @start: Start of the input range
1126  * @end: End of the input range
1127  *
1128  * This function removes the page table entries (PTEs) associated
1129  * with the svm ranges within the given input start and end
1130  *
1131  * Return: tile_mask for which gt's need to be tlb invalidated.
1132  */
1133 u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
1134 {
1135 	struct drm_gpusvm_notifier *notifier;
1136 	struct xe_svm_range *range;
1137 	u64 adj_start, adj_end;
1138 	struct xe_tile *tile;
1139 	u8 tile_mask = 0;
1140 	u8 id;
1141 
1142 	lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
1143 		       lockdep_is_held_type(&vm->lock, 0));
1144 
1145 	drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
1146 		struct drm_gpusvm_range *r = NULL;
1147 
1148 		adj_start = max(start, drm_gpusvm_notifier_start(notifier));
1149 		adj_end = min(end, drm_gpusvm_notifier_end(notifier));
1150 		drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
1151 			range = to_xe_range(r);
1152 			for_each_tile(tile, vm->xe, id) {
1153 				if (xe_pt_zap_ptes_range(tile, vm, range)) {
1154 					tile_mask |= BIT(id);
1155 					/*
1156 					 * WRITE_ONCE pairs with READ_ONCE in
1157 					 * xe_vm_has_valid_gpu_mapping().
1158 					 * Must not fail after setting
1159 					 * tile_invalidated and before
1160 					 * TLB invalidation.
1161 					 */
1162 					WRITE_ONCE(range->tile_invalidated,
1163 						   range->tile_invalidated | BIT(id));
1164 				}
1165 			}
1166 		}
1167 	}
1168 
1169 	return tile_mask;
1170 }
1171 
1172 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
1173 
1174 static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile)
1175 {
1176 	return &tile->mem.vram->dpagemap;
1177 }
1178 
1179 /**
1180  * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
1181  * @vma: Pointer to the xe_vma structure containing memory attributes
1182  * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
1183  *
1184  * This function determines the correct DRM pagemap to use for a given VMA.
1185  * It first checks if a valid devmem_fd is provided in the VMA's preferred
1186  * location. If the devmem_fd is negative, it returns NULL, indicating no
1187  * pagemap is available and smem to be used as preferred location.
1188  * If the devmem_fd is equal to the default faulting
1189  * GT identifier, it returns the VRAM pagemap associated with the tile.
1190  *
1191  * Future support for multi-device configurations may use drm_pagemap_from_fd()
1192  * to resolve pagemaps from arbitrary file descriptors.
1193  *
1194  * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
1195  */
1196 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1197 {
1198 	s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
1199 
1200 	if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
1201 		return NULL;
1202 
1203 	if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
1204 		return IS_DGFX(tile_to_xe(tile)) ? tile_local_pagemap(tile) : NULL;
1205 
1206 	/* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
1207 	return NULL;
1208 }
1209 
1210 /**
1211  * xe_svm_alloc_vram()- Allocate device memory pages for range,
1212  * migrating existing data.
1213  * @tile: tile to allocate vram from
1214  * @range: SVM range
1215  * @ctx: DRM GPU SVM context
1216  *
1217  * Return: 0 on success, error code on failure.
1218  */
1219 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
1220 		      const struct drm_gpusvm_ctx *ctx)
1221 {
1222 	struct drm_pagemap *dpagemap;
1223 
1224 	xe_assert(tile_to_xe(tile), range->base.flags.migrate_devmem);
1225 	range_debug(range, "ALLOCATE VRAM");
1226 
1227 	dpagemap = tile_local_pagemap(tile);
1228 	return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
1229 				       xe_svm_range_end(range),
1230 				       range->base.gpusvm->mm,
1231 				       ctx->timeslice_ms);
1232 }
1233 
1234 static struct drm_pagemap_addr
1235 xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
1236 			  struct device *dev,
1237 			  struct page *page,
1238 			  unsigned int order,
1239 			  enum dma_data_direction dir)
1240 {
1241 	struct device *pgmap_dev = dpagemap->dev;
1242 	enum drm_interconnect_protocol prot;
1243 	dma_addr_t addr;
1244 
1245 	if (pgmap_dev == dev) {
1246 		addr = xe_vram_region_page_to_dpa(page_to_vr(page), page);
1247 		prot = XE_INTERCONNECT_VRAM;
1248 	} else {
1249 		addr = DMA_MAPPING_ERROR;
1250 		prot = 0;
1251 	}
1252 
1253 	return drm_pagemap_addr_encode(addr, prot, order, dir);
1254 }
1255 
1256 static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
1257 	.device_map = xe_drm_pagemap_device_map,
1258 	.populate_mm = xe_drm_pagemap_populate_mm,
1259 };
1260 
1261 /**
1262  * xe_devm_add: Remap and provide memmap backing for device memory
1263  * @tile: tile that the memory region belongs to
1264  * @vr: vram memory region to remap
1265  *
1266  * This remap device memory to host physical address space and create
1267  * struct page to back device memory
1268  *
1269  * Return: 0 on success standard error code otherwise
1270  */
1271 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1272 {
1273 	struct xe_device *xe = tile_to_xe(tile);
1274 	struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
1275 	struct resource *res;
1276 	void *addr;
1277 	int ret;
1278 
1279 	res = devm_request_free_mem_region(dev, &iomem_resource,
1280 					   vr->usable_size);
1281 	if (IS_ERR(res)) {
1282 		ret = PTR_ERR(res);
1283 		return ret;
1284 	}
1285 
1286 	vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
1287 	vr->pagemap.range.start = res->start;
1288 	vr->pagemap.range.end = res->end;
1289 	vr->pagemap.nr_range = 1;
1290 	vr->pagemap.ops = drm_pagemap_pagemap_ops_get();
1291 	vr->pagemap.owner = xe_svm_devm_owner(xe);
1292 	addr = devm_memremap_pages(dev, &vr->pagemap);
1293 
1294 	vr->dpagemap.dev = dev;
1295 	vr->dpagemap.ops = &xe_drm_pagemap_ops;
1296 
1297 	if (IS_ERR(addr)) {
1298 		devm_release_mem_region(dev, res->start, resource_size(res));
1299 		ret = PTR_ERR(addr);
1300 		drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
1301 			tile->id, ERR_PTR(ret));
1302 		return ret;
1303 	}
1304 	vr->hpa_base = res->start;
1305 
1306 	drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
1307 		tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
1308 	return 0;
1309 }
1310 #else
1311 int xe_svm_alloc_vram(struct xe_tile *tile,
1312 		      struct xe_svm_range *range,
1313 		      const struct drm_gpusvm_ctx *ctx)
1314 {
1315 	return -EOPNOTSUPP;
1316 }
1317 
1318 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1319 {
1320 	return 0;
1321 }
1322 
1323 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
1324 {
1325 	return NULL;
1326 }
1327 #endif
1328 
1329 /**
1330  * xe_svm_flush() - SVM flush
1331  * @vm: The VM.
1332  *
1333  * Flush all SVM actions.
1334  */
1335 void xe_svm_flush(struct xe_vm *vm)
1336 {
1337 	if (xe_vm_in_fault_mode(vm))
1338 		flush_work(&vm->svm.garbage_collector.work);
1339 }
1340