xref: /linux/drivers/gpu/drm/xe/xe_svm.c (revision 917b10d90990fd2138b5dbc2d22cfa428c070ade)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include "xe_bo.h"
7 #include "xe_gt_stats.h"
8 #include "xe_gt_tlb_invalidation.h"
9 #include "xe_migrate.h"
10 #include "xe_module.h"
11 #include "xe_pt.h"
12 #include "xe_svm.h"
13 #include "xe_ttm_vram_mgr.h"
14 #include "xe_vm.h"
15 #include "xe_vm_types.h"
16 
17 static bool xe_svm_range_in_vram(struct xe_svm_range *range)
18 {
19 	/*
20 	 * Advisory only check whether the range is currently backed by VRAM
21 	 * memory.
22 	 */
23 
24 	struct drm_gpusvm_range_flags flags = {
25 		/* Pairs with WRITE_ONCE in drm_gpusvm.c */
26 		.__flags = READ_ONCE(range->base.flags.__flags),
27 	};
28 
29 	return flags.has_devmem_pages;
30 }
31 
32 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
33 {
34 	/* Not reliable without notifier lock */
35 	return xe_svm_range_in_vram(range) && range->tile_present;
36 }
37 
38 static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
39 {
40 	return container_of(gpusvm, struct xe_vm, svm.gpusvm);
41 }
42 
43 static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
44 {
45 	return gpusvm_to_vm(r->gpusvm);
46 }
47 
48 #define range_debug(r__, operaton__)					\
49 	vm_dbg(&range_to_vm(&(r__)->base)->xe->drm,			\
50 	       "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
51 	       "start=0x%014lx, end=0x%014lx, size=%lu",		\
52 	       (operaton__), range_to_vm(&(r__)->base)->usm.asid,	\
53 	       (r__)->base.gpusvm,					\
54 	       xe_svm_range_in_vram((r__)) ? 1 : 0,			\
55 	       xe_svm_range_has_vram_binding((r__)) ? 1 : 0,		\
56 	       (r__)->base.notifier_seq,				\
57 	       xe_svm_range_start((r__)), xe_svm_range_end((r__)),	\
58 	       xe_svm_range_size((r__)))
59 
60 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
61 {
62 	range_debug(range, operation);
63 }
64 
65 static void *xe_svm_devm_owner(struct xe_device *xe)
66 {
67 	return xe;
68 }
69 
70 static struct drm_gpusvm_range *
71 xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
72 {
73 	struct xe_svm_range *range;
74 
75 	range = kzalloc(sizeof(*range), GFP_KERNEL);
76 	if (!range)
77 		return NULL;
78 
79 	INIT_LIST_HEAD(&range->garbage_collector_link);
80 	xe_vm_get(gpusvm_to_vm(gpusvm));
81 
82 	return &range->base;
83 }
84 
85 static void xe_svm_range_free(struct drm_gpusvm_range *range)
86 {
87 	xe_vm_put(range_to_vm(range));
88 	kfree(range);
89 }
90 
91 static void
92 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
93 				   const struct mmu_notifier_range *mmu_range)
94 {
95 	struct xe_device *xe = vm->xe;
96 
97 	range_debug(range, "GARBAGE COLLECTOR ADD");
98 
99 	drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
100 
101 	spin_lock(&vm->svm.garbage_collector.lock);
102 	if (list_empty(&range->garbage_collector_link))
103 		list_add_tail(&range->garbage_collector_link,
104 			      &vm->svm.garbage_collector.range_list);
105 	spin_unlock(&vm->svm.garbage_collector.lock);
106 
107 	queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq,
108 		   &vm->svm.garbage_collector.work);
109 }
110 
111 static u8
112 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
113 				  const struct mmu_notifier_range *mmu_range,
114 				  u64 *adj_start, u64 *adj_end)
115 {
116 	struct xe_svm_range *range = to_xe_range(r);
117 	struct xe_device *xe = vm->xe;
118 	struct xe_tile *tile;
119 	u8 tile_mask = 0;
120 	u8 id;
121 
122 	xe_svm_assert_in_notifier(vm);
123 
124 	range_debug(range, "NOTIFIER");
125 
126 	/* Skip if already unmapped or if no binding exist */
127 	if (range->base.flags.unmapped || !range->tile_present)
128 		return 0;
129 
130 	range_debug(range, "NOTIFIER - EXECUTE");
131 
132 	/* Adjust invalidation to range boundaries */
133 	*adj_start = min(xe_svm_range_start(range), mmu_range->start);
134 	*adj_end = max(xe_svm_range_end(range), mmu_range->end);
135 
136 	/*
137 	 * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the
138 	 * invalidation code can't correctly cope with sparse ranges or
139 	 * invalidations spanning multiple ranges.
140 	 */
141 	for_each_tile(tile, xe, id)
142 		if (xe_pt_zap_ptes_range(tile, vm, range)) {
143 			tile_mask |= BIT(id);
144 			/*
145 			 * WRITE_ONCE pairs with READ_ONCE in
146 			 * xe_vm_has_valid_gpu_mapping()
147 			 */
148 			WRITE_ONCE(range->tile_invalidated,
149 				   range->tile_invalidated | BIT(id));
150 		}
151 
152 	return tile_mask;
153 }
154 
155 static void
156 xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
157 				const struct mmu_notifier_range *mmu_range)
158 {
159 	struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
160 
161 	xe_svm_assert_in_notifier(vm);
162 
163 	drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
164 	if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
165 		xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
166 						   mmu_range);
167 }
168 
169 static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
170 			      struct drm_gpusvm_notifier *notifier,
171 			      const struct mmu_notifier_range *mmu_range)
172 {
173 	struct xe_vm *vm = gpusvm_to_vm(gpusvm);
174 	struct xe_device *xe = vm->xe;
175 	struct drm_gpusvm_range *r, *first;
176 	u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
177 	u8 tile_mask = 0;
178 	long err;
179 
180 	xe_svm_assert_in_notifier(vm);
181 
182 	vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
183 	       "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
184 	       vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
185 	       mmu_range->start, mmu_range->end, mmu_range->event);
186 
187 	/* Adjust invalidation to notifier boundaries */
188 	adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
189 	adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
190 
191 	first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
192 	if (!first)
193 		return;
194 
195 	/*
196 	 * PTs may be getting destroyed so not safe to touch these but PT should
197 	 * be invalidated at this point in time. Regardless we still need to
198 	 * ensure any dma mappings are unmapped in the here.
199 	 */
200 	if (xe_vm_is_closed(vm))
201 		goto range_notifier_event_end;
202 
203 	/*
204 	 * XXX: Less than ideal to always wait on VM's resv slots if an
205 	 * invalidation is not required. Could walk range list twice to figure
206 	 * out if an invalidations is need, but also not ideal.
207 	 */
208 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
209 				    DMA_RESV_USAGE_BOOKKEEP,
210 				    false, MAX_SCHEDULE_TIMEOUT);
211 	XE_WARN_ON(err <= 0);
212 
213 	r = first;
214 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
215 		tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range,
216 							       &adj_start,
217 							       &adj_end);
218 	if (!tile_mask)
219 		goto range_notifier_event_end;
220 
221 	xe_device_wmb(xe);
222 
223 	err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
224 	WARN_ON_ONCE(err);
225 
226 range_notifier_event_end:
227 	r = first;
228 	drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
229 		xe_svm_range_notifier_event_end(vm, r, mmu_range);
230 }
231 
232 static int __xe_svm_garbage_collector(struct xe_vm *vm,
233 				      struct xe_svm_range *range)
234 {
235 	struct dma_fence *fence;
236 
237 	range_debug(range, "GARBAGE COLLECTOR");
238 
239 	xe_vm_lock(vm, false);
240 	fence = xe_vm_range_unbind(vm, range);
241 	xe_vm_unlock(vm);
242 	if (IS_ERR(fence))
243 		return PTR_ERR(fence);
244 	dma_fence_put(fence);
245 
246 	drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
247 
248 	return 0;
249 }
250 
251 static int xe_svm_garbage_collector(struct xe_vm *vm)
252 {
253 	struct xe_svm_range *range;
254 	int err;
255 
256 	lockdep_assert_held_write(&vm->lock);
257 
258 	if (xe_vm_is_closed_or_banned(vm))
259 		return -ENOENT;
260 
261 	spin_lock(&vm->svm.garbage_collector.lock);
262 	for (;;) {
263 		range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
264 						 typeof(*range),
265 						 garbage_collector_link);
266 		if (!range)
267 			break;
268 
269 		list_del(&range->garbage_collector_link);
270 		spin_unlock(&vm->svm.garbage_collector.lock);
271 
272 		err = __xe_svm_garbage_collector(vm, range);
273 		if (err) {
274 			drm_warn(&vm->xe->drm,
275 				 "Garbage collection failed: %pe\n",
276 				 ERR_PTR(err));
277 			xe_vm_kill(vm, true);
278 			return err;
279 		}
280 
281 		spin_lock(&vm->svm.garbage_collector.lock);
282 	}
283 	spin_unlock(&vm->svm.garbage_collector.lock);
284 
285 	return 0;
286 }
287 
288 static void xe_svm_garbage_collector_work_func(struct work_struct *w)
289 {
290 	struct xe_vm *vm = container_of(w, struct xe_vm,
291 					svm.garbage_collector.work);
292 
293 	down_write(&vm->lock);
294 	xe_svm_garbage_collector(vm);
295 	up_write(&vm->lock);
296 }
297 
298 #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
299 
300 static struct xe_vram_region *page_to_vr(struct page *page)
301 {
302 	return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
303 }
304 
305 static struct xe_tile *vr_to_tile(struct xe_vram_region *vr)
306 {
307 	return container_of(vr, struct xe_tile, mem.vram);
308 }
309 
310 static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
311 				      struct page *page)
312 {
313 	u64 dpa;
314 	struct xe_tile *tile = vr_to_tile(vr);
315 	u64 pfn = page_to_pfn(page);
316 	u64 offset;
317 
318 	xe_tile_assert(tile, is_device_private_page(page));
319 	xe_tile_assert(tile, (pfn << PAGE_SHIFT) >= vr->hpa_base);
320 
321 	offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
322 	dpa = vr->dpa_base + offset;
323 
324 	return dpa;
325 }
326 
327 enum xe_svm_copy_dir {
328 	XE_SVM_COPY_TO_VRAM,
329 	XE_SVM_COPY_TO_SRAM,
330 };
331 
332 static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
333 		       unsigned long npages, const enum xe_svm_copy_dir dir)
334 {
335 	struct xe_vram_region *vr = NULL;
336 	struct xe_tile *tile;
337 	struct dma_fence *fence = NULL;
338 	unsigned long i;
339 #define XE_VRAM_ADDR_INVALID	~0x0ull
340 	u64 vram_addr = XE_VRAM_ADDR_INVALID;
341 	int err = 0, pos = 0;
342 	bool sram = dir == XE_SVM_COPY_TO_SRAM;
343 
344 	/*
345 	 * This flow is complex: it locates physically contiguous device pages,
346 	 * derives the starting physical address, and performs a single GPU copy
347 	 * to for every 8M chunk in a DMA address array. Both device pages and
348 	 * DMA addresses may be sparsely populated. If either is NULL, a copy is
349 	 * triggered based on the current search state. The last GPU copy is
350 	 * waited on to ensure all copies are complete.
351 	 */
352 
353 	for (i = 0; i < npages; ++i) {
354 		struct page *spage = pages[i];
355 		struct dma_fence *__fence;
356 		u64 __vram_addr;
357 		bool match = false, chunk, last;
358 
359 #define XE_MIGRATE_CHUNK_SIZE	SZ_8M
360 		chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
361 		last = (i + 1) == npages;
362 
363 		/* No CPU page and no device pages queue'd to copy */
364 		if (!dma_addr[i] && vram_addr == XE_VRAM_ADDR_INVALID)
365 			continue;
366 
367 		if (!vr && spage) {
368 			vr = page_to_vr(spage);
369 			tile = vr_to_tile(vr);
370 		}
371 		XE_WARN_ON(spage && page_to_vr(spage) != vr);
372 
373 		/*
374 		 * CPU page and device page valid, capture physical address on
375 		 * first device page, check if physical contiguous on subsequent
376 		 * device pages.
377 		 */
378 		if (dma_addr[i] && spage) {
379 			__vram_addr = xe_vram_region_page_to_dpa(vr, spage);
380 			if (vram_addr == XE_VRAM_ADDR_INVALID) {
381 				vram_addr = __vram_addr;
382 				pos = i;
383 			}
384 
385 			match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
386 		}
387 
388 		/*
389 		 * Mismatched physical address, 8M copy chunk, or last page -
390 		 * trigger a copy.
391 		 */
392 		if (!match || chunk || last) {
393 			/*
394 			 * Extra page for first copy if last page and matching
395 			 * physical address.
396 			 */
397 			int incr = (match && last) ? 1 : 0;
398 
399 			if (vram_addr != XE_VRAM_ADDR_INVALID) {
400 				if (sram) {
401 					vm_dbg(&tile->xe->drm,
402 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
403 					       vram_addr, (u64)dma_addr[pos], i - pos + incr);
404 					__fence = xe_migrate_from_vram(tile->migrate,
405 								       i - pos + incr,
406 								       vram_addr,
407 								       dma_addr + pos);
408 				} else {
409 					vm_dbg(&tile->xe->drm,
410 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
411 					       (u64)dma_addr[pos], vram_addr, i - pos + incr);
412 					__fence = xe_migrate_to_vram(tile->migrate,
413 								     i - pos + incr,
414 								     dma_addr + pos,
415 								     vram_addr);
416 				}
417 				if (IS_ERR(__fence)) {
418 					err = PTR_ERR(__fence);
419 					goto err_out;
420 				}
421 
422 				dma_fence_put(fence);
423 				fence = __fence;
424 			}
425 
426 			/* Setup physical address of next device page */
427 			if (dma_addr[i] && spage) {
428 				vram_addr = __vram_addr;
429 				pos = i;
430 			} else {
431 				vram_addr = XE_VRAM_ADDR_INVALID;
432 			}
433 
434 			/* Extra mismatched device page, copy it */
435 			if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
436 				if (sram) {
437 					vm_dbg(&tile->xe->drm,
438 					       "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
439 					       vram_addr, (u64)dma_addr[pos], 1);
440 					__fence = xe_migrate_from_vram(tile->migrate, 1,
441 								       vram_addr,
442 								       dma_addr + pos);
443 				} else {
444 					vm_dbg(&tile->xe->drm,
445 					       "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
446 					       (u64)dma_addr[pos], vram_addr, 1);
447 					__fence = xe_migrate_to_vram(tile->migrate, 1,
448 								     dma_addr + pos,
449 								     vram_addr);
450 				}
451 				if (IS_ERR(__fence)) {
452 					err = PTR_ERR(__fence);
453 					goto err_out;
454 				}
455 
456 				dma_fence_put(fence);
457 				fence = __fence;
458 			}
459 		}
460 	}
461 
462 err_out:
463 	/* Wait for all copies to complete */
464 	if (fence) {
465 		dma_fence_wait(fence, false);
466 		dma_fence_put(fence);
467 	}
468 
469 	return err;
470 #undef XE_MIGRATE_CHUNK_SIZE
471 #undef XE_VRAM_ADDR_INVALID
472 }
473 
474 static int xe_svm_copy_to_devmem(struct page **pages, dma_addr_t *dma_addr,
475 				 unsigned long npages)
476 {
477 	return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_VRAM);
478 }
479 
480 static int xe_svm_copy_to_ram(struct page **pages, dma_addr_t *dma_addr,
481 			      unsigned long npages)
482 {
483 	return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_SRAM);
484 }
485 
486 static struct xe_bo *to_xe_bo(struct drm_gpusvm_devmem *devmem_allocation)
487 {
488 	return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
489 }
490 
491 static void xe_svm_devmem_release(struct drm_gpusvm_devmem *devmem_allocation)
492 {
493 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
494 
495 	xe_bo_put_async(bo);
496 }
497 
498 static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
499 {
500 	return PHYS_PFN(offset + vr->hpa_base);
501 }
502 
503 static struct drm_buddy *tile_to_buddy(struct xe_tile *tile)
504 {
505 	return &tile->mem.vram.ttm.mm;
506 }
507 
508 static int xe_svm_populate_devmem_pfn(struct drm_gpusvm_devmem *devmem_allocation,
509 				      unsigned long npages, unsigned long *pfn)
510 {
511 	struct xe_bo *bo = to_xe_bo(devmem_allocation);
512 	struct ttm_resource *res = bo->ttm.resource;
513 	struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks;
514 	struct drm_buddy_block *block;
515 	int j = 0;
516 
517 	list_for_each_entry(block, blocks, link) {
518 		struct xe_vram_region *vr = block->private;
519 		struct xe_tile *tile = vr_to_tile(vr);
520 		struct drm_buddy *buddy = tile_to_buddy(tile);
521 		u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
522 		int i;
523 
524 		for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
525 			pfn[j++] = block_pfn + i;
526 	}
527 
528 	return 0;
529 }
530 
531 static const struct drm_gpusvm_devmem_ops gpusvm_devmem_ops = {
532 	.devmem_release = xe_svm_devmem_release,
533 	.populate_devmem_pfn = xe_svm_populate_devmem_pfn,
534 	.copy_to_devmem = xe_svm_copy_to_devmem,
535 	.copy_to_ram = xe_svm_copy_to_ram,
536 };
537 
538 #endif
539 
540 static const struct drm_gpusvm_ops gpusvm_ops = {
541 	.range_alloc = xe_svm_range_alloc,
542 	.range_free = xe_svm_range_free,
543 	.invalidate = xe_svm_invalidate,
544 };
545 
546 static const unsigned long fault_chunk_sizes[] = {
547 	SZ_2M,
548 	SZ_64K,
549 	SZ_4K,
550 };
551 
552 /**
553  * xe_svm_init() - SVM initialize
554  * @vm: The VM.
555  *
556  * Initialize SVM state which is embedded within the VM.
557  *
558  * Return: 0 on success, negative error code on error.
559  */
560 int xe_svm_init(struct xe_vm *vm)
561 {
562 	int err;
563 
564 	spin_lock_init(&vm->svm.garbage_collector.lock);
565 	INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
566 	INIT_WORK(&vm->svm.garbage_collector.work,
567 		  xe_svm_garbage_collector_work_func);
568 
569 	err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
570 			      current->mm, xe_svm_devm_owner(vm->xe), 0,
571 			      vm->size, xe_modparam.svm_notifier_size * SZ_1M,
572 			      &gpusvm_ops, fault_chunk_sizes,
573 			      ARRAY_SIZE(fault_chunk_sizes));
574 	if (err)
575 		return err;
576 
577 	drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
578 
579 	return 0;
580 }
581 
582 /**
583  * xe_svm_close() - SVM close
584  * @vm: The VM.
585  *
586  * Close SVM state (i.e., stop and flush all SVM actions).
587  */
588 void xe_svm_close(struct xe_vm *vm)
589 {
590 	xe_assert(vm->xe, xe_vm_is_closed(vm));
591 	flush_work(&vm->svm.garbage_collector.work);
592 }
593 
594 /**
595  * xe_svm_fini() - SVM finalize
596  * @vm: The VM.
597  *
598  * Finalize SVM state which is embedded within the VM.
599  */
600 void xe_svm_fini(struct xe_vm *vm)
601 {
602 	xe_assert(vm->xe, xe_vm_is_closed(vm));
603 
604 	drm_gpusvm_fini(&vm->svm.gpusvm);
605 }
606 
607 static bool xe_svm_range_is_valid(struct xe_svm_range *range,
608 				  struct xe_tile *tile,
609 				  bool devmem_only)
610 {
611 	return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
612 					    range->tile_invalidated) &&
613 		(!devmem_only || xe_svm_range_in_vram(range)));
614 }
615 
616 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
617  * @vm: xe_vm pointer
618  * @range: Pointer to the SVM range structure
619  *
620  * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
621  * and migrates them to SMEM
622  */
623 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
624 {
625 	if (xe_svm_range_in_vram(range))
626 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
627 }
628 
629 /**
630  * xe_svm_range_validate() - Check if the SVM range is valid
631  * @vm: xe_vm pointer
632  * @range: Pointer to the SVM range structure
633  * @tile_mask: Mask representing the tiles to be checked
634  * @devmem_preferred : if true range needs to be in devmem
635  *
636  * The xe_svm_range_validate() function checks if a range is
637  * valid and located in the desired memory region.
638  *
639  * Return: true if the range is valid, false otherwise
640  */
641 bool xe_svm_range_validate(struct xe_vm *vm,
642 			   struct xe_svm_range *range,
643 			   u8 tile_mask, bool devmem_preferred)
644 {
645 	bool ret;
646 
647 	xe_svm_notifier_lock(vm);
648 
649 	ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
650 	       (devmem_preferred == range->base.flags.has_devmem_pages);
651 
652 	xe_svm_notifier_unlock(vm);
653 
654 	return ret;
655 }
656 
657 /**
658  * xe_svm_find_vma_start - Find start of CPU VMA
659  * @vm: xe_vm pointer
660  * @start: start address
661  * @end: end address
662  * @vma: Pointer to struct xe_vma
663  *
664  *
665  * This function searches for a cpu vma, within the specified
666  * range [start, end] in the given VM. It adjusts the range based on the
667  * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
668  *
669  * Return: The starting address of the VMA within the range,
670  * or ULONG_MAX if no VMA is found
671  */
672 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
673 {
674 	return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
675 					 max(start, xe_vma_start(vma)),
676 					 min(end, xe_vma_end(vma)));
677 }
678 
679 #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
680 static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
681 {
682 	return &tile->mem.vram;
683 }
684 
685 /**
686  * xe_svm_alloc_vram()- Allocate device memory pages for range,
687  * migrating existing data.
688  * @vm: The VM.
689  * @tile: tile to allocate vram from
690  * @range: SVM range
691  * @ctx: DRM GPU SVM context
692  *
693  * Return: 0 on success, error code on failure.
694  */
695 int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
696 		      struct xe_svm_range *range,
697 		      const struct drm_gpusvm_ctx *ctx)
698 {
699 	struct mm_struct *mm = vm->svm.gpusvm.mm;
700 	struct xe_vram_region *vr = tile_to_vr(tile);
701 	struct drm_buddy_block *block;
702 	struct list_head *blocks;
703 	struct xe_bo *bo;
704 	ktime_t end = 0;
705 	int err;
706 
707 	range_debug(range, "ALLOCATE VRAM");
708 
709 	if (!mmget_not_zero(mm))
710 		return -EFAULT;
711 	mmap_read_lock(mm);
712 
713 retry:
714 	bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL,
715 				 xe_svm_range_size(range),
716 				 ttm_bo_type_device,
717 				 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
718 				 XE_BO_FLAG_CPU_ADDR_MIRROR);
719 	if (IS_ERR(bo)) {
720 		err = PTR_ERR(bo);
721 		if (xe_vm_validate_should_retry(NULL, err, &end))
722 			goto retry;
723 		goto unlock;
724 	}
725 
726 	drm_gpusvm_devmem_init(&bo->devmem_allocation,
727 			       vm->xe->drm.dev, mm,
728 			       &gpusvm_devmem_ops,
729 			       &tile->mem.vram.dpagemap,
730 			       xe_svm_range_size(range));
731 
732 	blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
733 	list_for_each_entry(block, blocks, link)
734 		block->private = vr;
735 
736 	xe_bo_get(bo);
737 	err = drm_gpusvm_migrate_to_devmem(&vm->svm.gpusvm, &range->base,
738 					   &bo->devmem_allocation, ctx);
739 	if (err)
740 		xe_svm_devmem_release(&bo->devmem_allocation);
741 
742 	xe_bo_unlock(bo);
743 	xe_bo_put(bo);
744 
745 unlock:
746 	mmap_read_unlock(mm);
747 	mmput(mm);
748 
749 	return err;
750 }
751 #endif
752 
753 static bool supports_4K_migration(struct xe_device *xe)
754 {
755 	if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
756 		return false;
757 
758 	return true;
759 }
760 
761 /**
762  * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
763  * @range: SVM range for which migration needs to be decided
764  * @vma: vma which has range
765  * @preferred_region_is_vram: preferred region for range is vram
766  *
767  * Return: True for range needing migration and migration is supported else false
768  */
769 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
770 					bool preferred_region_is_vram)
771 {
772 	struct xe_vm *vm = range_to_vm(&range->base);
773 	u64 range_size = xe_svm_range_size(range);
774 
775 	if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
776 		return false;
777 
778 	xe_assert(vm->xe, IS_DGFX(vm->xe));
779 
780 	if (preferred_region_is_vram && xe_svm_range_in_vram(range)) {
781 		drm_info(&vm->xe->drm, "Range is already in VRAM\n");
782 		return false;
783 	}
784 
785 	if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
786 		drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
787 		return false;
788 	}
789 
790 	return true;
791 }
792 
793 /**
794  * xe_svm_handle_pagefault() - SVM handle page fault
795  * @vm: The VM.
796  * @vma: The CPU address mirror VMA.
797  * @gt: The gt upon the fault occurred.
798  * @fault_addr: The GPU fault address.
799  * @atomic: The fault atomic access bit.
800  *
801  * Create GPU bindings for a SVM page fault. Optionally migrate to device
802  * memory.
803  *
804  * Return: 0 on success, negative error code on error.
805  */
806 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
807 			    struct xe_gt *gt, u64 fault_addr,
808 			    bool atomic)
809 {
810 	struct drm_gpusvm_ctx ctx = {
811 		.read_only = xe_vma_read_only(vma),
812 		.devmem_possible = IS_DGFX(vm->xe) &&
813 			IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
814 		.check_pages_threshold = IS_DGFX(vm->xe) &&
815 			IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
816 		.devmem_only = atomic && IS_DGFX(vm->xe) &&
817 			IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
818 		.timeslice_ms = atomic && IS_DGFX(vm->xe) &&
819 			IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ?
820 			vm->xe->atomic_svm_timeslice_ms : 0,
821 	};
822 	struct xe_svm_range *range;
823 	struct dma_fence *fence;
824 	struct xe_tile *tile = gt_to_tile(gt);
825 	int migrate_try_count = ctx.devmem_only ? 3 : 1;
826 	ktime_t end = 0;
827 	int err;
828 
829 	lockdep_assert_held_write(&vm->lock);
830 	xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
831 
832 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
833 
834 retry:
835 	/* Always process UNMAPs first so view SVM ranges is current */
836 	err = xe_svm_garbage_collector(vm);
837 	if (err)
838 		return err;
839 
840 	range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
841 
842 	if (IS_ERR(range))
843 		return PTR_ERR(range);
844 
845 	if (ctx.devmem_only && !range->base.flags.migrate_devmem)
846 		return -EACCES;
847 
848 	if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
849 		return 0;
850 
851 	range_debug(range, "PAGE FAULT");
852 
853 	if (--migrate_try_count >= 0 &&
854 	    xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) {
855 		err = xe_svm_alloc_vram(vm, tile, range, &ctx);
856 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
857 		if (err) {
858 			if (migrate_try_count || !ctx.devmem_only) {
859 				drm_dbg(&vm->xe->drm,
860 					"VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
861 					vm->usm.asid, ERR_PTR(err));
862 				goto retry;
863 			} else {
864 				drm_err(&vm->xe->drm,
865 					"VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
866 					vm->usm.asid, ERR_PTR(err));
867 				return err;
868 			}
869 		}
870 	}
871 
872 	range_debug(range, "GET PAGES");
873 	err = xe_svm_range_get_pages(vm, range, &ctx);
874 	/* Corner where CPU mappings have changed */
875 	if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
876 		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
877 		if (migrate_try_count > 0 || !ctx.devmem_only) {
878 			drm_dbg(&vm->xe->drm,
879 				"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
880 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
881 			range_debug(range, "PAGE FAULT - RETRY PAGES");
882 			goto retry;
883 		} else {
884 			drm_err(&vm->xe->drm,
885 				"Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
886 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
887 		}
888 	}
889 	if (err) {
890 		range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
891 		goto err_out;
892 	}
893 
894 	range_debug(range, "PAGE FAULT - BIND");
895 
896 retry_bind:
897 	xe_vm_lock(vm, false);
898 	fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
899 	if (IS_ERR(fence)) {
900 		xe_vm_unlock(vm);
901 		err = PTR_ERR(fence);
902 		if (err == -EAGAIN) {
903 			ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
904 			range_debug(range, "PAGE FAULT - RETRY BIND");
905 			goto retry;
906 		}
907 		if (xe_vm_validate_should_retry(NULL, err, &end))
908 			goto retry_bind;
909 		goto err_out;
910 	}
911 	xe_vm_unlock(vm);
912 
913 	dma_fence_wait(fence, false);
914 	dma_fence_put(fence);
915 
916 err_out:
917 
918 	return err;
919 }
920 
921 /**
922  * xe_svm_has_mapping() - SVM has mappings
923  * @vm: The VM.
924  * @start: Start address.
925  * @end: End address.
926  *
927  * Check if an address range has SVM mappings.
928  *
929  * Return: True if address range has a SVM mapping, False otherwise
930  */
931 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
932 {
933 	return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
934 }
935 
936 /**
937  * xe_svm_bo_evict() - SVM evict BO to system memory
938  * @bo: BO to evict
939  *
940  * SVM evict BO to system memory. GPU SVM layer ensures all device pages
941  * are evicted before returning.
942  *
943  * Return: 0 on success standard error code otherwise
944  */
945 int xe_svm_bo_evict(struct xe_bo *bo)
946 {
947 	return drm_gpusvm_evict_to_ram(&bo->devmem_allocation);
948 }
949 
950 /**
951  * xe_svm_range_find_or_insert- Find or insert GPU SVM range
952  * @vm: xe_vm pointer
953  * @addr: address for which range needs to be found/inserted
954  * @vma:  Pointer to struct xe_vma which mirrors CPU
955  * @ctx: GPU SVM context
956  *
957  * This function finds or inserts a newly allocated a SVM range based on the
958  * address.
959  *
960  * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
961  */
962 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
963 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
964 {
965 	struct drm_gpusvm_range *r;
966 
967 	r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
968 					    xe_vma_start(vma), xe_vma_end(vma), ctx);
969 	if (IS_ERR(r))
970 		return ERR_PTR(PTR_ERR(r));
971 
972 	return to_xe_range(r);
973 }
974 
975 /**
976  * xe_svm_range_get_pages() - Get pages for a SVM range
977  * @vm: Pointer to the struct xe_vm
978  * @range: Pointer to the xe SVM range structure
979  * @ctx: GPU SVM context
980  *
981  * This function gets pages for a SVM range and ensures they are mapped for
982  * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
983  *
984  * Return: 0 on success, negative error code on failure.
985  */
986 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
987 			   struct drm_gpusvm_ctx *ctx)
988 {
989 	int err = 0;
990 
991 	err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
992 	if (err == -EOPNOTSUPP) {
993 		range_debug(range, "PAGE FAULT - EVICT PAGES");
994 		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
995 	}
996 
997 	return err;
998 }
999 
1000 #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
1001 
1002 static struct drm_pagemap_device_addr
1003 xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
1004 			  struct device *dev,
1005 			  struct page *page,
1006 			  unsigned int order,
1007 			  enum dma_data_direction dir)
1008 {
1009 	struct device *pgmap_dev = dpagemap->dev;
1010 	enum drm_interconnect_protocol prot;
1011 	dma_addr_t addr;
1012 
1013 	if (pgmap_dev == dev) {
1014 		addr = xe_vram_region_page_to_dpa(page_to_vr(page), page);
1015 		prot = XE_INTERCONNECT_VRAM;
1016 	} else {
1017 		addr = DMA_MAPPING_ERROR;
1018 		prot = 0;
1019 	}
1020 
1021 	return drm_pagemap_device_addr_encode(addr, prot, order, dir);
1022 }
1023 
1024 static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
1025 	.device_map = xe_drm_pagemap_device_map,
1026 };
1027 
1028 /**
1029  * xe_devm_add: Remap and provide memmap backing for device memory
1030  * @tile: tile that the memory region belongs to
1031  * @vr: vram memory region to remap
1032  *
1033  * This remap device memory to host physical address space and create
1034  * struct page to back device memory
1035  *
1036  * Return: 0 on success standard error code otherwise
1037  */
1038 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1039 {
1040 	struct xe_device *xe = tile_to_xe(tile);
1041 	struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
1042 	struct resource *res;
1043 	void *addr;
1044 	int ret;
1045 
1046 	res = devm_request_free_mem_region(dev, &iomem_resource,
1047 					   vr->usable_size);
1048 	if (IS_ERR(res)) {
1049 		ret = PTR_ERR(res);
1050 		return ret;
1051 	}
1052 
1053 	vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
1054 	vr->pagemap.range.start = res->start;
1055 	vr->pagemap.range.end = res->end;
1056 	vr->pagemap.nr_range = 1;
1057 	vr->pagemap.ops = drm_gpusvm_pagemap_ops_get();
1058 	vr->pagemap.owner = xe_svm_devm_owner(xe);
1059 	addr = devm_memremap_pages(dev, &vr->pagemap);
1060 
1061 	vr->dpagemap.dev = dev;
1062 	vr->dpagemap.ops = &xe_drm_pagemap_ops;
1063 
1064 	if (IS_ERR(addr)) {
1065 		devm_release_mem_region(dev, res->start, resource_size(res));
1066 		ret = PTR_ERR(addr);
1067 		drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
1068 			tile->id, ERR_PTR(ret));
1069 		return ret;
1070 	}
1071 	vr->hpa_base = res->start;
1072 
1073 	drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
1074 		tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
1075 	return 0;
1076 }
1077 #else
1078 int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
1079 		      struct xe_svm_range *range,
1080 		      const struct drm_gpusvm_ctx *ctx)
1081 {
1082 	return -EOPNOTSUPP;
1083 }
1084 
1085 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
1086 {
1087 	return 0;
1088 }
1089 #endif
1090 
1091 /**
1092  * xe_svm_flush() - SVM flush
1093  * @vm: The VM.
1094  *
1095  * Flush all SVM actions.
1096  */
1097 void xe_svm_flush(struct xe_vm *vm)
1098 {
1099 	if (xe_vm_in_fault_mode(vm))
1100 		flush_work(&vm->svm.garbage_collector.work);
1101 }
1102