xref: /linux/drivers/gpu/drm/xe/xe_svm.c (revision 29042df3acdc7364af1c251b2a05f7c1c8fe0401)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <drm/drm_drv.h>
7 
8 #include "xe_bo.h"
9 #include "xe_gt_stats.h"
10 #include "xe_gt_tlb_invalidation.h"
11 #include "xe_migrate.h"
12 #include "xe_module.h"
13 #include "xe_pm.h"
14 #include "xe_pt.h"
15 #include "xe_svm.h"
16 #include "xe_tile.h"
17 #include "xe_ttm_vram_mgr.h"
18 #include "xe_vm.h"
19 #include "xe_vm_types.h"
20 #include "xe_vram_types.h"
21 
22 static bool xe_svm_range_in_vram(struct xe_svm_range *range)
23 {
24 	/*
25 	 * Advisory only check whether the range is currently backed by VRAM
26 	 * memory.
27 	 */
28 
29 	struct drm_gpusvm_range_flags flags = {
30 		/* Pairs with WRITE_ONCE in drm_gpusvm.c */
31 		.__flags = READ_ONCE(range->base.flags.__flags),
32 	};
33 
34 	return flags.has_devmem_pages;
35 }
36 
37 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
38 {
39 	/* Not reliable without notifier lock */
40 	return xe_svm_range_in_vram(range) && range->tile_present;
41 }
42 
43 static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
44 {
45 	return container_of(gpusvm, struct xe_vm, svm.gpusvm);
46 }
47 
48 static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
49 {
50 	return gpusvm_to_vm(r->gpusvm);
51 }
52 
53 #define range_debug(r__, operaton__)					\
54 	vm_dbg(&range_to_vm(&(r__)->base)->xe->drm,			\
55 	       "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
56 	       "start=0x%014lx, end=0x%014lx, size=%lu",		\
57 	       (operaton__), range_to_vm(&(r__)->base)->usm.asid,	\
58 	       (r__)->base.gpusvm,					\
59 	       xe_svm_range_in_vram((r__)) ? 1 : 0,			\
60 	       xe_svm_range_has_vram_binding((r__)) ? 1 : 0,		\
61 	       (r__)->base.notifier_seq,				\
62 	       xe_svm_range_start((r__)), xe_svm_range_end((r__)),	\
63 	       xe_svm_range_size((r__)))
64 
65 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
66 {
67 	range_debug(range, operation);
68 }
69 
70 static void *xe_svm_devm_owner(struct xe_device *xe)
71 {
72 	return xe;
73 }
74 
75 static struct drm_gpusvm_range *
76 xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
77 {
78 	struct xe_svm_range *range;
79 
80 	range = kzalloc(sizeof(*range), GFP_KERNEL);
81 	if (!range)
82 		return NULL;
83 
84 	INIT_LIST_HEAD(&range->garbage_collector_link);
85 	xe_vm_get(gpusvm_to_vm(gpusvm));
86 
87 	return &range->base;
88 }
89 
90 static void xe_svm_range_free(struct drm_gpusvm_range *range)
91 {
92 	xe_vm_put(range_to_vm(range));
93 	kfree(range);
94 }
95 
96 static void
97 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
98 				   const struct mmu_notifier_range *mmu_range)
99 {
100 	struct xe_device *xe = vm->xe;
101 
102 	range_debug(range, "GARBAGE COLLECTOR ADD");
103 
104 	drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
105 
106 	spin_lock(&vm->svm.garbage_collector.lock);
107 	if (list_empty(&range->garbage_collector_link))
108 		list_add_tail(&range->garbage_collector_link,
109 			      &vm->svm.garbage_collector.range_list);
110 	spin_unlock(&vm->svm.garbage_collector.lock);
111 
112 	queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq,
113 		   &vm->svm.garbage_collector.work);
114 }
115 
116 static u8
117 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
118 				  const struct mmu_notifier_range *mmu_range,
119 				  u64 *adj_start, u64 *adj_end)
120 {
121 	struct xe_svm_range *range = to_xe_range(r);
122 	struct xe_device *xe = vm->xe;
123 	struct xe_tile *tile;
124 	u8 tile_mask = 0;
125 	u8 id;
126 
127 	xe_svm_assert_in_notifier(vm);
128 
129 	range_debug(range, "NOTIFIER");
130 
131 	/* Skip if already unmapped or if no binding exist */
132 	if (range->base.flags.unmapped || !range->tile_present)
133 		return 0;
134 
135 	range_debug(range, "NOTIFIER - EXECUTE");
136 
137 	/* Adjust invalidation to range boundaries */
138 	*adj_start = min(xe_svm_range_start(range), mmu_range->start);
139 	*adj_end = max(xe_svm_range_end(range), mmu_range->end);
140 
141 	/*
142 	 * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the
143 	 * invalidation code can't correctly cope with sparse ranges or
144 	 * invalidations spanning multiple ranges.
145 	 */
146 	for_each_tile(tile, xe, id)
147 		if (xe_pt_zap_ptes_range(tile, vm, range)) {
148 			tile_mask |= BIT(id);
149 			/*
150 			 * WRITE_ONCE pairs with READ_ONCE in
151 			 * xe_vm_has_valid_gpu_mapping()
152 			 */
153 			WRITE_ONCE(range->tile_invalidated,
154 				   range->tile_invalidated | BIT(id));
155 		}
156 
157 	return tile_mask;
158 }
159 
160 static void
161 xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
162 				const struct mmu_notifier_range *mmu_range)
163 {
164 	struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
165 
166 	xe_svm_assert_in_notifier(vm);
167 
168 	drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
169 	if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
170 		xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
171 						   mmu_range);
172 }
173 
174 static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
175 			      struct drm_gpusvm_notifier *notifier,
176 			      const struct mmu_notifier_range *mmu_range)
177 {
178 	struct xe_vm *vm = gpusvm_to_vm(gpusvm);
179 	struct xe_device *xe = vm->xe;
180 	struct drm_gpusvm_range *r, *first;
181 	u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
182 	u8 tile_mask = 0;
183 	long err;
184 
185 	xe_svm_assert_in_notifier(vm);
186 
187 	vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
188 	       "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
189 	       vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
190 	       mmu_range->start, mmu_range->end, mmu_range->event);
191 
192 	/* Adjust invalidation to notifier boundaries */
193 	adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
194 	adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
195 
196 	first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
197 	if (!first)
198 		return;
199 
200 	/*
201 	 * PTs may be getting destroyed so not safe to touch these but PT should
202 	 * be invalidated at this point in time. Regardless we still need to
203 	 * ensure any dma mappings are unmapped in the here.
204 	 */
205 	if (xe_vm_is_closed(vm))
206 		goto range_notifier_event_end;
207 
208 	/*
209 	 * XXX: Less than ideal to always wait on VM's resv slots if an
210 	 * invalidation is not required. Could walk range list twice to figure
211 	 * out if an invalidations is need, but also not ideal.
212 	 */
213 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
214 				    DMA_RESV_USAGE_BOOKKEEP,
215 				    false, MAX_SCHEDULE_TIMEOUT);
216 	XE_WARN_ON(err <= 0);
217 
218 	r = first;
219 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
220 		tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range,
221 							       &adj_start,
222 							       &adj_end);
223 	if (!tile_mask)
224 		goto range_notifier_event_end;
225 
226 	xe_device_wmb(xe);
227 
228 	err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
229 	WARN_ON_ONCE(err);
230 
231 range_notifier_event_end:
232 	r = first;
233 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
234 		xe_svm_range_notifier_event_end(vm, r, mmu_range);
235 }
236 
237 static int __xe_svm_garbage_collector(struct xe_vm *vm,
238 				      struct xe_svm_range *range)
239 {
240 	struct dma_fence *fence;
241 
242 	range_debug(range, "GARBAGE COLLECTOR");
243 
244 	xe_vm_lock(vm, false);
245 	fence = xe_vm_range_unbind(vm, range);
246 	xe_vm_unlock(vm);
247 	if (IS_ERR(fence))
248 		return PTR_ERR(fence);
249 	dma_fence_put(fence);
250 
251 	drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
252 
253 	return 0;
254 }
255 
256 static int xe_svm_garbage_collector(struct xe_vm *vm)
257 {
258 	struct xe_svm_range *range;
259 	int err;
260 
261 	lockdep_assert_held_write(&vm->lock);
262 
263 	if (xe_vm_is_closed_or_banned(vm))
264 		return -ENOENT;
265 
266 	spin_lock(&vm->svm.garbage_collector.lock);
267 	for (;;) {
268 		range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
269 						 typeof(*range),
270 						 garbage_collector_link);
271 		if (!range)
272 			break;
273 
274 		list_del(&range->garbage_collector_link);
275 		spin_unlock(&vm->svm.garbage_collector.lock);
276 
277 		err = __xe_svm_garbage_collector(vm, range);
278 		if (err) {
279 			drm_warn(&vm->xe->drm,
280 				 "Garbage collection failed: %pe\n",
281 				 ERR_PTR(err));
282 			xe_vm_kill(vm, true);
283 			return err;
284 		}
285 
286 		spin_lock(&vm->svm.garbage_collector.lock);
287 	}
288 	spin_unlock(&vm->svm.garbage_collector.lock);
289 
290 	return 0;
291 }
292 
293 static void xe_svm_garbage_collector_work_func(struct work_struct *w)
294 {
295 	struct xe_vm *vm = container_of(w, struct xe_vm,
296 					svm.garbage_collector.work);
297 
298 	down_write(&vm->lock);
299 	xe_svm_garbage_collector(vm);
300 	up_write(&vm->lock);
301 }
302 
303 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
304 
305 static struct xe_vram_region *page_to_vr(struct page *page)
306 {
307 	return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
308 }
309 
310 static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
311 				      struct page *page)
312 {
313 	u64 dpa;
314 	u64 pfn = page_to_pfn(page);
315 	u64 offset;
316 
317 	xe_assert(vr->xe, is_device_private_page(page));
318 	xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base);
319 
320 	offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
321 	dpa = vr->dpa_base + offset;
322 
323 	return dpa;
324 }
325 
326 enum xe_svm_copy_dir {
327 	XE_SVM_COPY_TO_VRAM,
328 	XE_SVM_COPY_TO_SRAM,
329 };
330 
331 static int xe_svm_copy(struct page **pages,
332 		       struct drm_pagemap_addr *pagemap_addr,
333 		       unsigned long npages, const enum xe_svm_copy_dir dir)
334 {
335 	struct xe_vram_region *vr = NULL;
336 	struct xe_device *xe;
337 	struct dma_fence *fence = NULL;
338 	unsigned long i;
339 #define XE_VRAM_ADDR_INVALID	~0x0ull
340 	u64 vram_addr = XE_VRAM_ADDR_INVALID;
341 	int err = 0, pos = 0;
342 	bool sram = dir == XE_SVM_COPY_TO_SRAM;
343 
344 	/*
345 	 * This flow is complex: it locates physically contiguous device pages,
346 	 * derives the starting physical address, and performs a single GPU copy
347 	 * to for every 8M chunk in a DMA address array. Both device pages and
348 	 * DMA addresses may be sparsely populated. If either is NULL, a copy is
349 	 * triggered based on the current search state. The last GPU copy is
350 	 * waited on to ensure all copies are complete.
351 	 */
352 
353 	for (i = 0; i < npages; ++i) {
354 		struct page *spage = pages[i];
355 		struct dma_fence *__fence;
356 		u64 __vram_addr;
357 		bool match = false, chunk, last;
358 
359 #define XE_MIGRATE_CHUNK_SIZE	SZ_8M
360 		chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
361 		last = (i + 1) == npages;
362 
363 		/* No CPU page and no device pages queue'd to copy */
364 		if (!pagemap_addr[i].addr && vram_addr == XE_VRAM_ADDR_INVALID)
365 			continue;
366 
367 		if (!vr && spage) {
368 			vr = page_to_vr(spage);
369 			xe = vr->xe;
370 		}
371 		XE_WARN_ON(spage && page_to_vr(spage) != vr);
372 
373 		/*
374 		 * CPU page and device page valid, capture physical address on
375 		 * first device page, check if physical contiguous on subsequent
376 		 * device pages.
377 		 */
378 		if (pagemap_addr[i].addr && spage) {
379 			__vram_addr = xe_vram_region_page_to_dpa(vr, spage);
380 			if (vram_addr == XE_VRAM_ADDR_INVALID) {
381 				vram_addr = __vram_addr;
382 				pos = i;
383 			}
384 
385 			match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
386 			/* Expected with contiguous memory */
387 			xe_assert(vr->xe, match);
388 
389 			if (pagemap_addr[i].order) {
390 				i += NR_PAGES(pagemap_addr[i].order) - 1;
391 				chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
392 				last = (i + 1) == npages;
393 			}
394 		}
395 
396 		/*
397 		 * Mismatched physical address, 8M copy chunk, or last page -
398 		 * trigger a copy.
399 		 */
400 		if (!match || chunk || last) {
401 			/*
402 			 * Extra page for first copy if last page and matching
403 			 * physical address.
404 			 */
405 			int incr = (match && last) ? 1 : 0;
406 
407 			if (vram_addr != XE_VRAM_ADDR_INVALID) {
408 				if (sram) {
409 					vm_dbg(&xe->drm,
410 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
411 					       vram_addr,
412 					       (u64)pagemap_addr[pos].addr, i - pos + incr);
413 					__fence = xe_migrate_from_vram(vr->migrate,
414 								       i - pos + incr,
415 								       vram_addr,
416 								       &pagemap_addr[pos]);
417 				} else {
418 					vm_dbg(&xe->drm,
419 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
420 					       (u64)pagemap_addr[pos].addr, vram_addr,
421 					       i - pos + incr);
422 					__fence = xe_migrate_to_vram(vr->migrate,
423 								     i - pos + incr,
424 								     &pagemap_addr[pos],
425 								     vram_addr);
426 				}
427 				if (IS_ERR(__fence)) {
428 					err = PTR_ERR(__fence);
429 					goto err_out;
430 				}
431 
432 				dma_fence_put(fence);
433 				fence = __fence;
434 			}
435 
436 			/* Setup physical address of next device page */
437 			if (pagemap_addr[i].addr && spage) {
438 				vram_addr = __vram_addr;
439 				pos = i;
440 			} else {
441 				vram_addr = XE_VRAM_ADDR_INVALID;
442 			}
443 
444 			/* Extra mismatched device page, copy it */
445 			if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
446 				if (sram) {
447 					vm_dbg(&xe->drm,
448 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
449 					       vram_addr, (u64)pagemap_addr[pos].addr, 1);
450 					__fence = xe_migrate_from_vram(vr->migrate, 1,
451 								       vram_addr,
452 								       &pagemap_addr[pos]);
453 				} else {
454 					vm_dbg(&xe->drm,
455 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
456 					       (u64)pagemap_addr[pos].addr, vram_addr, 1);
457 					__fence = xe_migrate_to_vram(vr->migrate, 1,
458 								     &pagemap_addr[pos],
459 								     vram_addr);
460 				}
461 				if (IS_ERR(__fence)) {
462 					err = PTR_ERR(__fence);
463 					goto err_out;
464 				}
465 
466 				dma_fence_put(fence);
467 				fence = __fence;
468 			}
469 		}
470 	}
471 
472 err_out:
473 	/* Wait for all copies to complete */
474 	if (fence) {
475 		dma_fence_wait(fence, false);
476 		dma_fence_put(fence);
477 	}
478 
479 	return err;
480 #undef XE_MIGRATE_CHUNK_SIZE
481 #undef XE_VRAM_ADDR_INVALID
482 }
483 
484 static int xe_svm_copy_to_devmem(struct page **pages,
485 				 struct drm_pagemap_addr *pagemap_addr,
486 				 unsigned long npages)
487 {
488 	return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
489 }
490 
491 static int xe_svm_copy_to_ram(struct page **pages,
492 			      struct drm_pagemap_addr *pagemap_addr,
493 			      unsigned long npages)
494 {
495 	return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
496 }
497 
498 static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
499 {
500 	return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
501 }
502 
503 static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
504 {
505 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
506 	struct xe_device *xe = xe_bo_device(bo);
507 
508 	xe_bo_put_async(bo);
509 	xe_pm_runtime_put(xe);
510 }
511 
512 static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
513 {
514 	return PHYS_PFN(offset + vr->hpa_base);
515 }
516 
517 static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
518 {
519 	return &vram->ttm.mm;
520 }
521 
522 static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
523 				      unsigned long npages, unsigned long *pfn)
524 {
525 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
526 	struct ttm_resource *res = bo->ttm.resource;
527 	struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks;
528 	struct drm_buddy_block *block;
529 	int j = 0;
530 
531 	list_for_each_entry(block, blocks, link) {
532 		struct xe_vram_region *vr = block->private;
533 		struct drm_buddy *buddy = vram_to_buddy(vr);
534 		u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
535 		int i;
536 
537 		for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
538 			pfn[j++] = block_pfn + i;
539 	}
540 
541 	return 0;
542 }
543 
544 static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
545 	.devmem_release = xe_svm_devmem_release,
546 	.populate_devmem_pfn = xe_svm_populate_devmem_pfn,
547 	.copy_to_devmem = xe_svm_copy_to_devmem,
548 	.copy_to_ram = xe_svm_copy_to_ram,
549 };
550 
551 #endif
552 
553 static const struct drm_gpusvm_ops gpusvm_ops = {
554 	.range_alloc = xe_svm_range_alloc,
555 	.range_free = xe_svm_range_free,
556 	.invalidate = xe_svm_invalidate,
557 };
558 
559 static const unsigned long fault_chunk_sizes[] = {
560 	SZ_2M,
561 	SZ_64K,
562 	SZ_4K,
563 };
564 
565 /**
566  * xe_svm_init() - SVM initialize
567  * @vm: The VM.
568  *
569  * Initialize SVM state which is embedded within the VM.
570  *
571  * Return: 0 on success, negative error code on error.
572  */
573 int xe_svm_init(struct xe_vm *vm)
574 {
575 	int err;
576 
577 	spin_lock_init(&vm->svm.garbage_collector.lock);
578 	INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
579 	INIT_WORK(&vm->svm.garbage_collector.work,
580 		  xe_svm_garbage_collector_work_func);
581 
582 	err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
583 			      current->mm, xe_svm_devm_owner(vm->xe), 0,
584 			      vm->size, xe_modparam.svm_notifier_size * SZ_1M,
585 			      &gpusvm_ops, fault_chunk_sizes,
586 			      ARRAY_SIZE(fault_chunk_sizes));
587 	if (err)
588 		return err;
589 
590 	drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
591 
592 	return 0;
593 }
594 
595 /**
596  * xe_svm_close() - SVM close
597  * @vm: The VM.
598  *
599  * Close SVM state (i.e., stop and flush all SVM actions).
600  */
601 void xe_svm_close(struct xe_vm *vm)
602 {
603 	xe_assert(vm->xe, xe_vm_is_closed(vm));
604 	flush_work(&vm->svm.garbage_collector.work);
605 }
606 
607 /**
608  * xe_svm_fini() - SVM finalize
609  * @vm: The VM.
610  *
611  * Finalize SVM state which is embedded within the VM.
612  */
613 void xe_svm_fini(struct xe_vm *vm)
614 {
615 	xe_assert(vm->xe, xe_vm_is_closed(vm));
616 
617 	drm_gpusvm_fini(&vm->svm.gpusvm);
618 }
619 
620 static bool xe_svm_range_is_valid(struct xe_svm_range *range,
621 				  struct xe_tile *tile,
622 				  bool devmem_only)
623 {
624 	return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
625 					    range->tile_invalidated) &&
626 		(!devmem_only || xe_svm_range_in_vram(range)));
627 }
628 
629 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
630  * @vm: xe_vm pointer
631  * @range: Pointer to the SVM range structure
632  *
633  * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
634  * and migrates them to SMEM
635  */
636 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
637 {
638 	if (xe_svm_range_in_vram(range))
639 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
640 }
641 
642 /**
643  * xe_svm_range_validate() - Check if the SVM range is valid
644  * @vm: xe_vm pointer
645  * @range: Pointer to the SVM range structure
646  * @tile_mask: Mask representing the tiles to be checked
647  * @devmem_preferred : if true range needs to be in devmem
648  *
649  * The xe_svm_range_validate() function checks if a range is
650  * valid and located in the desired memory region.
651  *
652  * Return: true if the range is valid, false otherwise
653  */
654 bool xe_svm_range_validate(struct xe_vm *vm,
655 			   struct xe_svm_range *range,
656 			   u8 tile_mask, bool devmem_preferred)
657 {
658 	bool ret;
659 
660 	xe_svm_notifier_lock(vm);
661 
662 	ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
663 	       (devmem_preferred == range->base.flags.has_devmem_pages);
664 
665 	xe_svm_notifier_unlock(vm);
666 
667 	return ret;
668 }
669 
670 /**
671  * xe_svm_find_vma_start - Find start of CPU VMA
672  * @vm: xe_vm pointer
673  * @start: start address
674  * @end: end address
675  * @vma: Pointer to struct xe_vma
676  *
677  *
678  * This function searches for a cpu vma, within the specified
679  * range [start, end] in the given VM. It adjusts the range based on the
680  * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
681  *
682  * Return: The starting address of the VMA within the range,
683  * or ULONG_MAX if no VMA is found
684  */
685 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
686 {
687 	return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
688 					 max(start, xe_vma_start(vma)),
689 					 min(end, xe_vma_end(vma)));
690 }
691 
692 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
693 static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
694 				      unsigned long start, unsigned long end,
695 				      struct mm_struct *mm,
696 				      unsigned long timeslice_ms)
697 {
698 	struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
699 	struct xe_device *xe = vr->xe;
700 	struct device *dev = xe->drm.dev;
701 	struct drm_buddy_block *block;
702 	struct list_head *blocks;
703 	struct xe_bo *bo;
704 	ktime_t time_end = 0;
705 	int err, idx;
706 
707 	if (!drm_dev_enter(&xe->drm, &idx))
708 		return -ENODEV;
709 
710 	xe_pm_runtime_get(xe);
711 
712  retry:
713 	bo = xe_bo_create_locked(vr->xe, NULL, NULL, end - start,
714 				 ttm_bo_type_device,
715 				 (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) |
716 				 XE_BO_FLAG_CPU_ADDR_MIRROR);
717 	if (IS_ERR(bo)) {
718 		err = PTR_ERR(bo);
719 		if (xe_vm_validate_should_retry(NULL, err, &time_end))
720 			goto retry;
721 		goto out_pm_put;
722 	}
723 
724 	drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
725 				&dpagemap_devmem_ops, dpagemap, end - start);
726 
727 	blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
728 	list_for_each_entry(block, blocks, link)
729 		block->private = vr;
730 
731 	xe_bo_get(bo);
732 
733 	/* Ensure the device has a pm ref while there are device pages active. */
734 	xe_pm_runtime_get_noresume(xe);
735 	err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
736 					    start, end, timeslice_ms,
737 					    xe_svm_devm_owner(xe));
738 	if (err)
739 		xe_svm_devmem_release(&bo->devmem_allocation);
740 
741 	xe_bo_unlock(bo);
742 	xe_bo_put(bo);
743 
744 out_pm_put:
745 	xe_pm_runtime_put(xe);
746 	drm_dev_exit(idx);
747 
748 	return err;
749 }
750 #endif
751 
752 static bool supports_4K_migration(struct xe_device *xe)
753 {
754 	if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
755 		return false;
756 
757 	return true;
758 }
759 
760 /**
761  * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
762  * @range: SVM range for which migration needs to be decided
763  * @vma: vma which has range
764  * @preferred_region_is_vram: preferred region for range is vram
765  *
766  * Return: True for range needing migration and migration is supported else false
767  */
768 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
769 					bool preferred_region_is_vram)
770 {
771 	struct xe_vm *vm = range_to_vm(&range->base);
772 	u64 range_size = xe_svm_range_size(range);
773 
774 	if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
775 		return false;
776 
777 	xe_assert(vm->xe, IS_DGFX(vm->xe));
778 
779 	if (preferred_region_is_vram && xe_svm_range_in_vram(range)) {
780 		drm_info(&vm->xe->drm, "Range is already in VRAM\n");
781 		return false;
782 	}
783 
784 	if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
785 		drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
786 		return false;
787 	}
788 
789 	return true;
790 }
791 
792 /**
793  * xe_svm_handle_pagefault() - SVM handle page fault
794  * @vm: The VM.
795  * @vma: The CPU address mirror VMA.
796  * @gt: The gt upon the fault occurred.
797  * @fault_addr: The GPU fault address.
798  * @atomic: The fault atomic access bit.
799  *
800  * Create GPU bindings for a SVM page fault. Optionally migrate to device
801  * memory.
802  *
803  * Return: 0 on success, negative error code on error.
804  */
805 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
806 			    struct xe_gt *gt, u64 fault_addr,
807 			    bool atomic)
808 {
809 	struct drm_gpusvm_ctx ctx = {
810 		.read_only = xe_vma_read_only(vma),
811 		.devmem_possible = IS_DGFX(vm->xe) &&
812 			IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
813 		.check_pages_threshold = IS_DGFX(vm->xe) &&
814 			IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K : 0,
815 		.devmem_only = atomic && IS_DGFX(vm->xe) &&
816 			IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
817 		.timeslice_ms = atomic && IS_DGFX(vm->xe) &&
818 			IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ?
819 			vm->xe->atomic_svm_timeslice_ms : 0,
820 	};
821 	struct xe_svm_range *range;
822 	struct dma_fence *fence;
823 	struct xe_tile *tile = gt_to_tile(gt);
824 	int migrate_try_count = ctx.devmem_only ? 3 : 1;
825 	ktime_t end = 0;
826 	int err;
827 
828 	lockdep_assert_held_write(&vm->lock);
829 	xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
830 
831 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
832 
833 retry:
834 	/* Always process UNMAPs first so view SVM ranges is current */
835 	err = xe_svm_garbage_collector(vm);
836 	if (err)
837 		return err;
838 
839 	range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
840 
841 	if (IS_ERR(range))
842 		return PTR_ERR(range);
843 
844 	if (ctx.devmem_only && !range->base.flags.migrate_devmem)
845 		return -EACCES;
846 
847 	if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
848 		return 0;
849 
850 	range_debug(range, "PAGE FAULT");
851 
852 	if (--migrate_try_count >= 0 &&
853 	    xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) {
854 		err = xe_svm_alloc_vram(tile, range, &ctx);
855 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
856 		if (err) {
857 			if (migrate_try_count || !ctx.devmem_only) {
858 				drm_dbg(&vm->xe->drm,
859 					"VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
860 					vm->usm.asid, ERR_PTR(err));
861 				goto retry;
862 			} else {
863 				drm_err(&vm->xe->drm,
864 					"VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
865 					vm->usm.asid, ERR_PTR(err));
866 				return err;
867 			}
868 		}
869 	}
870 
871 	range_debug(range, "GET PAGES");
872 	err = xe_svm_range_get_pages(vm, range, &ctx);
873 	/* Corner where CPU mappings have changed */
874 	if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
875 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
876 		if (migrate_try_count > 0 || !ctx.devmem_only) {
877 			drm_dbg(&vm->xe->drm,
878 				"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
879 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
880 			range_debug(range, "PAGE FAULT - RETRY PAGES");
881 			goto retry;
882 		} else {
883 			drm_err(&vm->xe->drm,
884 				"Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
885 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
886 		}
887 	}
888 	if (err) {
889 		range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
890 		goto err_out;
891 	}
892 
893 	range_debug(range, "PAGE FAULT - BIND");
894 
895 retry_bind:
896 	xe_vm_lock(vm, false);
897 	fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
898 	if (IS_ERR(fence)) {
899 		xe_vm_unlock(vm);
900 		err = PTR_ERR(fence);
901 		if (err == -EAGAIN) {
902 			ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
903 			range_debug(range, "PAGE FAULT - RETRY BIND");
904 			goto retry;
905 		}
906 		if (xe_vm_validate_should_retry(NULL, err, &end))
907 			goto retry_bind;
908 		goto err_out;
909 	}
910 	xe_vm_unlock(vm);
911 
912 	dma_fence_wait(fence, false);
913 	dma_fence_put(fence);
914 
915 err_out:
916 
917 	return err;
918 }
919 
920 /**
921  * xe_svm_has_mapping() - SVM has mappings
922  * @vm: The VM.
923  * @start: Start address.
924  * @end: End address.
925  *
926  * Check if an address range has SVM mappings.
927  *
928  * Return: True if address range has a SVM mapping, False otherwise
929  */
930 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
931 {
932 	return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
933 }
934 
935 /**
936  * xe_svm_bo_evict() - SVM evict BO to system memory
937  * @bo: BO to evict
938  *
939  * SVM evict BO to system memory. GPU SVM layer ensures all device pages
940  * are evicted before returning.
941  *
942  * Return: 0 on success standard error code otherwise
943  */
944 int xe_svm_bo_evict(struct xe_bo *bo)
945 {
946 	return drm_pagemap_evict_to_ram(&bo->devmem_allocation);
947 }
948 
949 /**
950  * xe_svm_range_find_or_insert- Find or insert GPU SVM range
951  * @vm: xe_vm pointer
952  * @addr: address for which range needs to be found/inserted
953  * @vma:  Pointer to struct xe_vma which mirrors CPU
954  * @ctx: GPU SVM context
955  *
956  * This function finds or inserts a newly allocated a SVM range based on the
957  * address.
958  *
959  * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
960  */
961 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
962 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
963 {
964 	struct drm_gpusvm_range *r;
965 
966 	r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
967 					    xe_vma_start(vma), xe_vma_end(vma), ctx);
968 	if (IS_ERR(r))
969 		return ERR_PTR(PTR_ERR(r));
970 
971 	return to_xe_range(r);
972 }
973 
974 /**
975  * xe_svm_range_get_pages() - Get pages for a SVM range
976  * @vm: Pointer to the struct xe_vm
977  * @range: Pointer to the xe SVM range structure
978  * @ctx: GPU SVM context
979  *
980  * This function gets pages for a SVM range and ensures they are mapped for
981  * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
982  *
983  * Return: 0 on success, negative error code on failure.
984  */
985 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
986 			   struct drm_gpusvm_ctx *ctx)
987 {
988 	int err = 0;
989 
990 	err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
991 	if (err == -EOPNOTSUPP) {
992 		range_debug(range, "PAGE FAULT - EVICT PAGES");
993 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
994 	}
995 
996 	return err;
997 }
998 
999 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
1000 
1001 static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile)
1002 {
1003 	return &tile->mem.vram->dpagemap;
1004 }
1005 
1006 /**
1007  * xe_svm_alloc_vram()- Allocate device memory pages for range,
1008  * migrating existing data.
1009  * @tile: tile to allocate vram from
1010  * @range: SVM range
1011  * @ctx: DRM GPU SVM context
1012  *
1013  * Return: 0 on success, error code on failure.
1014  */
1015 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
1016 		      const struct drm_gpusvm_ctx *ctx)
1017 {
1018 	struct drm_pagemap *dpagemap;
1019 
1020 	xe_assert(tile_to_xe(tile), range->base.flags.migrate_devmem);
1021 	range_debug(range, "ALLOCATE VRAM");
1022 
1023 	dpagemap = tile_local_pagemap(tile);
1024 	return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
1025 				       xe_svm_range_end(range),
1026 				       range->base.gpusvm->mm,
1027 				       ctx->timeslice_ms);
1028 }
1029 
1030 static struct drm_pagemap_addr
1031 xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
1032 			  struct device *dev,
1033 			  struct page *page,
1034 			  unsigned int order,
1035 			  enum dma_data_direction dir)
1036 {
1037 	struct device *pgmap_dev = dpagemap->dev;
1038 	enum drm_interconnect_protocol prot;
1039 	dma_addr_t addr;
1040 
1041 	if (pgmap_dev == dev) {
1042 		addr = xe_vram_region_page_to_dpa(page_to_vr(page), page);
1043 		prot = XE_INTERCONNECT_VRAM;
1044 	} else {
1045 		addr = DMA_MAPPING_ERROR;
1046 		prot = 0;
1047 	}
1048 
1049 	return drm_pagemap_addr_encode(addr, prot, order, dir);
1050 }
1051 
1052 static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
1053 	.device_map = xe_drm_pagemap_device_map,
1054 	.populate_mm = xe_drm_pagemap_populate_mm,
1055 };
1056 
1057 /**
1058  * xe_devm_add: Remap and provide memmap backing for device memory
1059  * @tile: tile that the memory region belongs to
1060  * @vr: vram memory region to remap
1061  *
1062  * This remap device memory to host physical address space and create
1063  * struct page to back device memory
1064  *
1065  * Return: 0 on success standard error code otherwise
1066  */
1067 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1068 {
1069 	struct xe_device *xe = tile_to_xe(tile);
1070 	struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
1071 	struct resource *res;
1072 	void *addr;
1073 	int ret;
1074 
1075 	res = devm_request_free_mem_region(dev, &iomem_resource,
1076 					   vr->usable_size);
1077 	if (IS_ERR(res)) {
1078 		ret = PTR_ERR(res);
1079 		return ret;
1080 	}
1081 
1082 	vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
1083 	vr->pagemap.range.start = res->start;
1084 	vr->pagemap.range.end = res->end;
1085 	vr->pagemap.nr_range = 1;
1086 	vr->pagemap.ops = drm_pagemap_pagemap_ops_get();
1087 	vr->pagemap.owner = xe_svm_devm_owner(xe);
1088 	addr = devm_memremap_pages(dev, &vr->pagemap);
1089 
1090 	vr->dpagemap.dev = dev;
1091 	vr->dpagemap.ops = &xe_drm_pagemap_ops;
1092 
1093 	if (IS_ERR(addr)) {
1094 		devm_release_mem_region(dev, res->start, resource_size(res));
1095 		ret = PTR_ERR(addr);
1096 		drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
1097 			tile->id, ERR_PTR(ret));
1098 		return ret;
1099 	}
1100 	vr->hpa_base = res->start;
1101 
1102 	drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
1103 		tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
1104 	return 0;
1105 }
1106 #else
1107 int xe_svm_alloc_vram(struct xe_tile *tile,
1108 		      struct xe_svm_range *range,
1109 		      const struct drm_gpusvm_ctx *ctx)
1110 {
1111 	return -EOPNOTSUPP;
1112 }
1113 
1114 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1115 {
1116 	return 0;
1117 }
1118 #endif
1119 
1120 /**
1121  * xe_svm_flush() - SVM flush
1122  * @vm: The VM.
1123  *
1124  * Flush all SVM actions.
1125  */
1126 void xe_svm_flush(struct xe_vm *vm)
1127 {
1128 	if (xe_vm_in_fault_mode(vm))
1129 		flush_work(&vm->svm.garbage_collector.work);
1130 }
1131