xref: /linux/drivers/gpu/drm/xe/xe_bo.c (revision dd08ebf6c3525a7ea2186e636df064ea47281987)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 
7 #include "xe_bo.h"
8 
9 #include <linux/dma-buf.h>
10 
11 #include <drm/drm_drv.h>
12 #include <drm/drm_gem_ttm_helper.h>
13 #include <drm/ttm/ttm_device.h>
14 #include <drm/ttm/ttm_placement.h>
15 #include <drm/ttm/ttm_tt.h>
16 #include <drm/xe_drm.h>
17 
18 #include "xe_device.h"
19 #include "xe_dma_buf.h"
20 #include "xe_ggtt.h"
21 #include "xe_gt.h"
22 #include "xe_map.h"
23 #include "xe_migrate.h"
24 #include "xe_preempt_fence.h"
25 #include "xe_res_cursor.h"
26 #include "xe_trace.h"
27 #include "xe_vm.h"
28 
29 static const struct ttm_place sys_placement_flags = {
30 	.fpfn = 0,
31 	.lpfn = 0,
32 	.mem_type = XE_PL_SYSTEM,
33 	.flags = 0,
34 };
35 
36 static struct ttm_placement sys_placement = {
37 	.num_placement = 1,
38 	.placement = &sys_placement_flags,
39 	.num_busy_placement = 1,
40 	.busy_placement = &sys_placement_flags,
41 };
42 
43 bool mem_type_is_vram(u32 mem_type)
44 {
45 	return mem_type >= XE_PL_VRAM0;
46 }
47 
48 static bool resource_is_vram(struct ttm_resource *res)
49 {
50 	return mem_type_is_vram(res->mem_type);
51 }
52 
53 bool xe_bo_is_vram(struct xe_bo *bo)
54 {
55 	return resource_is_vram(bo->ttm.resource);
56 }
57 
58 static bool xe_bo_is_user(struct xe_bo *bo)
59 {
60 	return bo->flags & XE_BO_CREATE_USER_BIT;
61 }
62 
63 static struct xe_gt *
64 mem_type_to_gt(struct xe_device *xe, u32 mem_type)
65 {
66 	XE_BUG_ON(!mem_type_is_vram(mem_type));
67 
68 	return xe_device_get_gt(xe, mem_type - XE_PL_VRAM0);
69 }
70 
71 static void try_add_system(struct xe_bo *bo, struct ttm_place *places,
72 			   u32 bo_flags, u32 *c)
73 {
74 	if (bo_flags & XE_BO_CREATE_SYSTEM_BIT) {
75 		places[*c] = (struct ttm_place) {
76 			.mem_type = XE_PL_TT,
77 		};
78 		*c += 1;
79 
80 		if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
81 			bo->props.preferred_mem_type = XE_PL_TT;
82 	}
83 }
84 
85 static void try_add_vram0(struct xe_device *xe, struct xe_bo *bo,
86 			  struct ttm_place *places, u32 bo_flags, u32 *c)
87 {
88 	struct xe_gt *gt;
89 
90 	if (bo_flags & XE_BO_CREATE_VRAM0_BIT) {
91 		gt = mem_type_to_gt(xe, XE_PL_VRAM0);
92 		XE_BUG_ON(!gt->mem.vram.size);
93 
94 		places[*c] = (struct ttm_place) {
95 			.mem_type = XE_PL_VRAM0,
96 			/*
97 			 * For eviction / restore on suspend / resume objects
98 			 * pinned in VRAM must be contiguous
99 			 */
100 			.flags = bo_flags & (XE_BO_CREATE_PINNED_BIT |
101 					     XE_BO_CREATE_GGTT_BIT) ?
102 				TTM_PL_FLAG_CONTIGUOUS : 0,
103 		};
104 		*c += 1;
105 
106 		if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
107 			bo->props.preferred_mem_type = XE_PL_VRAM0;
108 	}
109 }
110 
111 static void try_add_vram1(struct xe_device *xe, struct xe_bo *bo,
112 			  struct ttm_place *places, u32 bo_flags, u32 *c)
113 {
114 	struct xe_gt *gt;
115 
116 	if (bo_flags & XE_BO_CREATE_VRAM1_BIT) {
117 		gt = mem_type_to_gt(xe, XE_PL_VRAM1);
118 		XE_BUG_ON(!gt->mem.vram.size);
119 
120 		places[*c] = (struct ttm_place) {
121 			.mem_type = XE_PL_VRAM1,
122 			/*
123 			 * For eviction / restore on suspend / resume objects
124 			 * pinned in VRAM must be contiguous
125 			 */
126 			.flags = bo_flags & (XE_BO_CREATE_PINNED_BIT |
127 					     XE_BO_CREATE_GGTT_BIT) ?
128 				TTM_PL_FLAG_CONTIGUOUS : 0,
129 		};
130 		*c += 1;
131 
132 		if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
133 			bo->props.preferred_mem_type = XE_PL_VRAM1;
134 	}
135 }
136 
137 static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
138 				       u32 bo_flags)
139 {
140 	struct ttm_place *places = bo->placements;
141 	u32 c = 0;
142 
143 	bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
144 
145 	/* The order of placements should indicate preferred location */
146 
147 	if (bo->props.preferred_mem_class == XE_MEM_REGION_CLASS_SYSMEM) {
148 		try_add_system(bo, places, bo_flags, &c);
149 		if (bo->props.preferred_gt == XE_GT1) {
150 			try_add_vram1(xe, bo, places, bo_flags, &c);
151 			try_add_vram0(xe, bo, places, bo_flags, &c);
152 		} else {
153 			try_add_vram0(xe, bo, places, bo_flags, &c);
154 			try_add_vram1(xe, bo, places, bo_flags, &c);
155 		}
156 	} else if (bo->props.preferred_gt == XE_GT1) {
157 		try_add_vram1(xe, bo, places, bo_flags, &c);
158 		try_add_vram0(xe, bo, places, bo_flags, &c);
159 		try_add_system(bo, places, bo_flags, &c);
160 	} else {
161 		try_add_vram0(xe, bo, places, bo_flags, &c);
162 		try_add_vram1(xe, bo, places, bo_flags, &c);
163 		try_add_system(bo, places, bo_flags, &c);
164 	}
165 
166 	if (!c)
167 		return -EINVAL;
168 
169 	bo->placement = (struct ttm_placement) {
170 		.num_placement = c,
171 		.placement = places,
172 		.num_busy_placement = c,
173 		.busy_placement = places,
174 	};
175 
176 	return 0;
177 }
178 
179 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
180 			      u32 bo_flags)
181 {
182 	xe_bo_assert_held(bo);
183 	return __xe_bo_placement_for_flags(xe, bo, bo_flags);
184 }
185 
186 static void xe_evict_flags(struct ttm_buffer_object *tbo,
187 			   struct ttm_placement *placement)
188 {
189 	struct xe_bo *bo;
190 
191 	if (!xe_bo_is_xe_bo(tbo)) {
192 		/* Don't handle scatter gather BOs */
193 		if (tbo->type == ttm_bo_type_sg) {
194 			placement->num_placement = 0;
195 			placement->num_busy_placement = 0;
196 			return;
197 		}
198 
199 		*placement = sys_placement;
200 		return;
201 	}
202 
203 	/*
204 	 * For xe, sg bos that are evicted to system just triggers a
205 	 * rebind of the sg list upon subsequent validation to XE_PL_TT.
206 	 */
207 
208 	bo = ttm_to_xe_bo(tbo);
209 	switch (tbo->resource->mem_type) {
210 	case XE_PL_VRAM0:
211 	case XE_PL_VRAM1:
212 	case XE_PL_TT:
213 	default:
214 		/* for now kick out to system */
215 		*placement = sys_placement;
216 		break;
217 	}
218 }
219 
220 struct xe_ttm_tt {
221 	struct ttm_tt ttm;
222 	struct device *dev;
223 	struct sg_table sgt;
224 	struct sg_table *sg;
225 };
226 
227 static int xe_tt_map_sg(struct ttm_tt *tt)
228 {
229 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
230 	unsigned long num_pages = tt->num_pages;
231 	int ret;
232 
233 	XE_BUG_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL);
234 
235 	if (xe_tt->sg)
236 		return 0;
237 
238 	ret = sg_alloc_table_from_pages(&xe_tt->sgt, tt->pages, num_pages,
239 					0, (u64)num_pages << PAGE_SHIFT,
240 					GFP_KERNEL);
241 	if (ret)
242 		return ret;
243 
244 	xe_tt->sg = &xe_tt->sgt;
245 	ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL,
246 			      DMA_ATTR_SKIP_CPU_SYNC);
247 	if (ret) {
248 		sg_free_table(xe_tt->sg);
249 		xe_tt->sg = NULL;
250 		return ret;
251 	}
252 
253 	return 0;
254 }
255 
256 struct sg_table *xe_bo_get_sg(struct xe_bo *bo)
257 {
258 	struct ttm_tt *tt = bo->ttm.ttm;
259 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
260 
261 	return xe_tt->sg;
262 }
263 
264 static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
265 				       u32 page_flags)
266 {
267 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
268 	struct xe_device *xe = xe_bo_device(bo);
269 	struct xe_ttm_tt *tt;
270 	int err;
271 
272 	tt = kzalloc(sizeof(*tt), GFP_KERNEL);
273 	if (!tt)
274 		return NULL;
275 
276 	tt->dev = xe->drm.dev;
277 
278 	/* TODO: Select caching mode */
279 	err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags,
280 			  bo->flags & XE_BO_SCANOUT_BIT ? ttm_write_combined : ttm_cached,
281 			  DIV_ROUND_UP(xe_device_ccs_bytes(xe_bo_device(bo),
282 							   bo->ttm.base.size),
283 				       PAGE_SIZE));
284 	if (err) {
285 		kfree(tt);
286 		return NULL;
287 	}
288 
289 	return &tt->ttm;
290 }
291 
292 static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
293 			      struct ttm_operation_ctx *ctx)
294 {
295 	int err;
296 
297 	/*
298 	 * dma-bufs are not populated with pages, and the dma-
299 	 * addresses are set up when moved to XE_PL_TT.
300 	 */
301 	if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
302 		return 0;
303 
304 	err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
305 	if (err)
306 		return err;
307 
308 	/* A follow up may move this xe_bo_move when BO is moved to XE_PL_TT */
309 	err = xe_tt_map_sg(tt);
310 	if (err)
311 		ttm_pool_free(&ttm_dev->pool, tt);
312 
313 	return err;
314 }
315 
316 static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
317 {
318 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
319 
320 	if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
321 		return;
322 
323 	if (xe_tt->sg) {
324 		dma_unmap_sgtable(xe_tt->dev, xe_tt->sg,
325 				  DMA_BIDIRECTIONAL, 0);
326 		sg_free_table(xe_tt->sg);
327 		xe_tt->sg = NULL;
328 	}
329 
330 	return ttm_pool_free(&ttm_dev->pool, tt);
331 }
332 
333 static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
334 {
335 	ttm_tt_fini(tt);
336 	kfree(tt);
337 }
338 
339 static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
340 				 struct ttm_resource *mem)
341 {
342 	struct xe_device *xe = ttm_to_xe_device(bdev);
343 	struct xe_gt *gt;
344 
345 	switch (mem->mem_type) {
346 	case XE_PL_SYSTEM:
347 	case XE_PL_TT:
348 		return 0;
349 	case XE_PL_VRAM0:
350 	case XE_PL_VRAM1:
351 		gt = mem_type_to_gt(xe, mem->mem_type);
352 		mem->bus.offset = mem->start << PAGE_SHIFT;
353 
354 		if (gt->mem.vram.mapping &&
355 		    mem->placement & TTM_PL_FLAG_CONTIGUOUS)
356 			mem->bus.addr = (u8 *)gt->mem.vram.mapping +
357 				mem->bus.offset;
358 
359 		mem->bus.offset += gt->mem.vram.io_start;
360 		mem->bus.is_iomem = true;
361 
362 #if  !defined(CONFIG_X86)
363 		mem->bus.caching = ttm_write_combined;
364 #endif
365 		break;
366 	default:
367 		return -EINVAL;
368 	}
369 	return 0;
370 }
371 
372 static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
373 				const struct ttm_operation_ctx *ctx)
374 {
375 	struct dma_resv_iter cursor;
376 	struct dma_fence *fence;
377 	struct xe_vma *vma;
378 	int ret = 0;
379 
380 	dma_resv_assert_held(bo->ttm.base.resv);
381 
382 	if (!xe_device_in_fault_mode(xe) && !list_empty(&bo->vmas)) {
383 		dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
384 				    DMA_RESV_USAGE_BOOKKEEP);
385 		dma_resv_for_each_fence_unlocked(&cursor, fence)
386 			dma_fence_enable_sw_signaling(fence);
387 		dma_resv_iter_end(&cursor);
388 	}
389 
390 	list_for_each_entry(vma, &bo->vmas, bo_link) {
391 		struct xe_vm *vm = vma->vm;
392 
393 		trace_xe_vma_evict(vma);
394 
395 		if (xe_vm_in_fault_mode(vm)) {
396 			/* Wait for pending binds / unbinds. */
397 			long timeout;
398 
399 			if (ctx->no_wait_gpu &&
400 			    !dma_resv_test_signaled(bo->ttm.base.resv,
401 						    DMA_RESV_USAGE_BOOKKEEP))
402 				return -EBUSY;
403 
404 			timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
405 							DMA_RESV_USAGE_BOOKKEEP,
406 							ctx->interruptible,
407 							MAX_SCHEDULE_TIMEOUT);
408 			if (timeout > 0) {
409 				ret = xe_vm_invalidate_vma(vma);
410 				XE_WARN_ON(ret);
411 			} else if (!timeout) {
412 				ret = -ETIME;
413 			} else {
414 				ret = timeout;
415 			}
416 
417 		} else {
418 			bool vm_resv_locked = false;
419 			struct xe_vm *vm = vma->vm;
420 
421 			/*
422 			 * We need to put the vma on the vm's rebind_list,
423 			 * but need the vm resv to do so. If we can't verify
424 			 * that we indeed have it locked, put the vma an the
425 			 * vm's notifier.rebind_list instead and scoop later.
426 			 */
427 			if (dma_resv_trylock(&vm->resv))
428 				vm_resv_locked = true;
429 			else if (ctx->resv != &vm->resv) {
430 				spin_lock(&vm->notifier.list_lock);
431 				list_move_tail(&vma->notifier.rebind_link,
432 					       &vm->notifier.rebind_list);
433 				spin_unlock(&vm->notifier.list_lock);
434 				continue;
435 			}
436 
437 			xe_vm_assert_held(vm);
438 			if (list_empty(&vma->rebind_link) && vma->gt_present)
439 				list_add_tail(&vma->rebind_link, &vm->rebind_list);
440 
441 			if (vm_resv_locked)
442 				dma_resv_unlock(&vm->resv);
443 		}
444 	}
445 
446 	return ret;
447 }
448 
449 /*
450  * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
451  * Note that unmapping the attachment is deferred to the next
452  * map_attachment time, or to bo destroy (after idling) whichever comes first.
453  * This is to avoid syncing before unmap_attachment(), assuming that the
454  * caller relies on idling the reservation object before moving the
455  * backing store out. Should that assumption not hold, then we will be able
456  * to unconditionally call unmap_attachment() when moving out to system.
457  */
458 static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
459 			     struct ttm_resource *old_res,
460 			     struct ttm_resource *new_res)
461 {
462 	struct dma_buf_attachment *attach = ttm_bo->base.import_attach;
463 	struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
464 					       ttm);
465 	struct sg_table *sg;
466 
467 	XE_BUG_ON(!attach);
468 	XE_BUG_ON(!ttm_bo->ttm);
469 
470 	if (new_res->mem_type == XE_PL_SYSTEM)
471 		goto out;
472 
473 	if (ttm_bo->sg) {
474 		dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
475 		ttm_bo->sg = NULL;
476 	}
477 
478 	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
479 	if (IS_ERR(sg))
480 		return PTR_ERR(sg);
481 
482 	ttm_bo->sg = sg;
483 	xe_tt->sg = sg;
484 
485 out:
486 	ttm_bo_move_null(ttm_bo, new_res);
487 
488 	return 0;
489 }
490 
491 /**
492  * xe_bo_move_notify - Notify subsystems of a pending move
493  * @bo: The buffer object
494  * @ctx: The struct ttm_operation_ctx controlling locking and waits.
495  *
496  * This function notifies subsystems of an upcoming buffer move.
497  * Upon receiving such a notification, subsystems should schedule
498  * halting access to the underlying pages and optionally add a fence
499  * to the buffer object's dma_resv object, that signals when access is
500  * stopped. The caller will wait on all dma_resv fences before
501  * starting the move.
502  *
503  * A subsystem may commence access to the object after obtaining
504  * bindings to the new backing memory under the object lock.
505  *
506  * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
507  * negative error code on error.
508  */
509 static int xe_bo_move_notify(struct xe_bo *bo,
510 			     const struct ttm_operation_ctx *ctx)
511 {
512 	struct ttm_buffer_object *ttm_bo = &bo->ttm;
513 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
514 	int ret;
515 
516 	/*
517 	 * If this starts to call into many components, consider
518 	 * using a notification chain here.
519 	 */
520 
521 	if (xe_bo_is_pinned(bo))
522 		return -EINVAL;
523 
524 	xe_bo_vunmap(bo);
525 	ret = xe_bo_trigger_rebind(xe, bo, ctx);
526 	if (ret)
527 		return ret;
528 
529 	/* Don't call move_notify() for imported dma-bufs. */
530 	if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach)
531 		dma_buf_move_notify(ttm_bo->base.dma_buf);
532 
533 	return 0;
534 }
535 
536 static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
537 		      struct ttm_operation_ctx *ctx,
538 		      struct ttm_resource *new_mem,
539 		      struct ttm_place *hop)
540 {
541 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
542 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
543 	struct ttm_resource *old_mem = ttm_bo->resource;
544 	struct ttm_tt *ttm = ttm_bo->ttm;
545 	struct xe_gt *gt = NULL;
546 	struct dma_fence *fence;
547 	bool move_lacks_source;
548 	bool needs_clear;
549 	int ret = 0;
550 
551 	if (!old_mem) {
552 		if (new_mem->mem_type != TTM_PL_SYSTEM) {
553 			hop->mem_type = TTM_PL_SYSTEM;
554 			hop->flags = TTM_PL_FLAG_TEMPORARY;
555 			ret = -EMULTIHOP;
556 			goto out;
557 		}
558 
559 		ttm_bo_move_null(ttm_bo, new_mem);
560 		goto out;
561 	}
562 
563 	if (ttm_bo->type == ttm_bo_type_sg) {
564 		ret = xe_bo_move_notify(bo, ctx);
565 		if (!ret)
566 			ret = xe_bo_move_dmabuf(ttm_bo, old_mem, new_mem);
567 		goto out;
568 	}
569 
570 	move_lacks_source = !resource_is_vram(old_mem) &&
571 		(!ttm || !ttm_tt_is_populated(ttm));
572 
573 	needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
574 		(!ttm && ttm_bo->type == ttm_bo_type_device);
575 
576 	if ((move_lacks_source && !needs_clear) ||
577 	    (old_mem->mem_type == XE_PL_SYSTEM &&
578 	     new_mem->mem_type == XE_PL_TT)) {
579 		ttm_bo_move_null(ttm_bo, new_mem);
580 		goto out;
581 	}
582 
583 	if (!move_lacks_source && !xe_bo_is_pinned(bo)) {
584 		ret = xe_bo_move_notify(bo, ctx);
585 		if (ret)
586 			goto out;
587 	}
588 
589 	if (old_mem->mem_type == XE_PL_TT &&
590 	    new_mem->mem_type == XE_PL_SYSTEM) {
591 		long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
592 						     DMA_RESV_USAGE_BOOKKEEP,
593 						     true,
594 						     MAX_SCHEDULE_TIMEOUT);
595 		if (timeout < 0) {
596 			ret = timeout;
597 			goto out;
598 		}
599 		ttm_bo_move_null(ttm_bo, new_mem);
600 		goto out;
601 	}
602 
603 	if (!move_lacks_source &&
604 	    ((old_mem->mem_type == XE_PL_SYSTEM && resource_is_vram(new_mem)) ||
605 	     (resource_is_vram(old_mem) &&
606 	      new_mem->mem_type == XE_PL_SYSTEM))) {
607 		hop->fpfn = 0;
608 		hop->lpfn = 0;
609 		hop->mem_type = XE_PL_TT;
610 		hop->flags = TTM_PL_FLAG_TEMPORARY;
611 		ret = -EMULTIHOP;
612 		goto out;
613 	}
614 
615 	if (bo->gt)
616 		gt = bo->gt;
617 	else if (resource_is_vram(new_mem))
618 		gt = mem_type_to_gt(xe, new_mem->mem_type);
619 	else if (resource_is_vram(old_mem))
620 		gt = mem_type_to_gt(xe, old_mem->mem_type);
621 
622 	XE_BUG_ON(!gt);
623 	XE_BUG_ON(!gt->migrate);
624 
625 	trace_xe_bo_move(bo);
626 	xe_device_mem_access_get(xe);
627 
628 	if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
629 		/*
630 		 * Kernel memory that is pinned should only be moved on suspend
631 		 * / resume, some of the pinned memory is required for the
632 		 * device to resume / use the GPU to move other evicted memory
633 		 * (user memory) around. This likely could be optimized a bit
634 		 * futher where we find the minimum set of pinned memory
635 		 * required for resume but for simplity doing a memcpy for all
636 		 * pinned memory.
637 		 */
638 		ret = xe_bo_vmap(bo);
639 		if (!ret) {
640 			ret = ttm_bo_move_memcpy(ttm_bo, ctx, new_mem);
641 
642 			/* Create a new VMAP once kernel BO back in VRAM */
643 			if (!ret && resource_is_vram(new_mem)) {
644 				void *new_addr = gt->mem.vram.mapping +
645 					(new_mem->start << PAGE_SHIFT);
646 
647 				XE_BUG_ON(new_mem->start !=
648 					  bo->placements->fpfn);
649 
650 				iosys_map_set_vaddr_iomem(&bo->vmap, new_addr);
651 			}
652 		}
653 	} else {
654 		if (move_lacks_source)
655 			fence = xe_migrate_clear(gt->migrate, bo, new_mem, 0);
656 		else
657 			fence = xe_migrate_copy(gt->migrate, bo, old_mem, new_mem);
658 		if (IS_ERR(fence)) {
659 			ret = PTR_ERR(fence);
660 			xe_device_mem_access_put(xe);
661 			goto out;
662 		}
663 		ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict, true,
664 						new_mem);
665 		dma_fence_put(fence);
666 	}
667 
668 	xe_device_mem_access_put(xe);
669 	trace_printk("new_mem->mem_type=%d\n", new_mem->mem_type);
670 
671 out:
672 	return ret;
673 
674 }
675 
676 static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
677 				       unsigned long page_offset)
678 {
679 	struct xe_device *xe = ttm_to_xe_device(bo->bdev);
680 	struct xe_gt *gt = mem_type_to_gt(xe, bo->resource->mem_type);
681 	struct xe_res_cursor cursor;
682 
683 	xe_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
684 	return (gt->mem.vram.io_start + cursor.start) >> PAGE_SHIFT;
685 }
686 
687 static void __xe_bo_vunmap(struct xe_bo *bo);
688 
689 /*
690  * TODO: Move this function to TTM so we don't rely on how TTM does its
691  * locking, thereby abusing TTM internals.
692  */
693 static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
694 {
695 	bool locked;
696 
697 	XE_WARN_ON(kref_read(&ttm_bo->kref));
698 
699 	/*
700 	 * We can typically only race with TTM trylocking under the
701 	 * lru_lock, which will immediately be unlocked again since
702 	 * the ttm_bo refcount is zero at this point. So trylocking *should*
703 	 * always succeed here, as long as we hold the lru lock.
704 	 */
705 	spin_lock(&ttm_bo->bdev->lru_lock);
706 	locked = dma_resv_trylock(ttm_bo->base.resv);
707 	spin_unlock(&ttm_bo->bdev->lru_lock);
708 	XE_WARN_ON(!locked);
709 
710 	return locked;
711 }
712 
713 static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
714 {
715 	struct dma_resv_iter cursor;
716 	struct dma_fence *fence;
717 	struct dma_fence *replacement = NULL;
718 	struct xe_bo *bo;
719 
720 	if (!xe_bo_is_xe_bo(ttm_bo))
721 		return;
722 
723 	bo = ttm_to_xe_bo(ttm_bo);
724 	XE_WARN_ON(bo->created && kref_read(&ttm_bo->base.refcount));
725 
726 	/*
727 	 * Corner case where TTM fails to allocate memory and this BOs resv
728 	 * still points the VMs resv
729 	 */
730 	if (ttm_bo->base.resv != &ttm_bo->base._resv)
731 		return;
732 
733 	if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
734 		return;
735 
736 	/*
737 	 * Scrub the preempt fences if any. The unbind fence is already
738 	 * attached to the resv.
739 	 * TODO: Don't do this for external bos once we scrub them after
740 	 * unbind.
741 	 */
742 	dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
743 				DMA_RESV_USAGE_BOOKKEEP, fence) {
744 		if (xe_fence_is_xe_preempt(fence) &&
745 		    !dma_fence_is_signaled(fence)) {
746 			if (!replacement)
747 				replacement = dma_fence_get_stub();
748 
749 			dma_resv_replace_fences(ttm_bo->base.resv,
750 						fence->context,
751 						replacement,
752 						DMA_RESV_USAGE_BOOKKEEP);
753 		}
754 	}
755 	dma_fence_put(replacement);
756 
757 	dma_resv_unlock(ttm_bo->base.resv);
758 }
759 
760 static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
761 {
762 	if (!xe_bo_is_xe_bo(ttm_bo))
763 		return;
764 
765 	/*
766 	 * Object is idle and about to be destroyed. Release the
767 	 * dma-buf attachment.
768 	 */
769 	if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
770 		struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm,
771 						       struct xe_ttm_tt, ttm);
772 
773 		dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg,
774 					 DMA_BIDIRECTIONAL);
775 		ttm_bo->sg = NULL;
776 		xe_tt->sg = NULL;
777 	}
778 }
779 
780 struct ttm_device_funcs xe_ttm_funcs = {
781 	.ttm_tt_create = xe_ttm_tt_create,
782 	.ttm_tt_populate = xe_ttm_tt_populate,
783 	.ttm_tt_unpopulate = xe_ttm_tt_unpopulate,
784 	.ttm_tt_destroy = xe_ttm_tt_destroy,
785 	.evict_flags = xe_evict_flags,
786 	.move = xe_bo_move,
787 	.io_mem_reserve = xe_ttm_io_mem_reserve,
788 	.io_mem_pfn = xe_ttm_io_mem_pfn,
789 	.release_notify = xe_ttm_bo_release_notify,
790 	.eviction_valuable = ttm_bo_eviction_valuable,
791 	.delete_mem_notify = xe_ttm_bo_delete_mem_notify,
792 };
793 
794 static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
795 {
796 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
797 
798 	if (bo->ttm.base.import_attach)
799 		drm_prime_gem_destroy(&bo->ttm.base, NULL);
800 	drm_gem_object_release(&bo->ttm.base);
801 
802 	WARN_ON(!list_empty(&bo->vmas));
803 
804 	if (bo->ggtt_node.size)
805 		xe_ggtt_remove_bo(bo->gt->mem.ggtt, bo);
806 
807 	if (bo->vm && xe_bo_is_user(bo))
808 		xe_vm_put(bo->vm);
809 
810 	kfree(bo);
811 }
812 
813 static void xe_gem_object_free(struct drm_gem_object *obj)
814 {
815 	/* Our BO reference counting scheme works as follows:
816 	 *
817 	 * The gem object kref is typically used throughout the driver,
818 	 * and the gem object holds a ttm_buffer_object refcount, so
819 	 * that when the last gem object reference is put, which is when
820 	 * we end up in this function, we put also that ttm_buffer_object
821 	 * refcount. Anything using gem interfaces is then no longer
822 	 * allowed to access the object in a way that requires a gem
823 	 * refcount, including locking the object.
824 	 *
825 	 * driver ttm callbacks is allowed to use the ttm_buffer_object
826 	 * refcount directly if needed.
827 	 */
828 	__xe_bo_vunmap(gem_to_xe_bo(obj));
829 	ttm_bo_put(container_of(obj, struct ttm_buffer_object, base));
830 }
831 
832 static bool should_migrate_to_system(struct xe_bo *bo)
833 {
834 	struct xe_device *xe = xe_bo_device(bo);
835 
836 	return xe_device_in_fault_mode(xe) && bo->props.cpu_atomic;
837 }
838 
839 static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
840 {
841 	struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
842 	struct drm_device *ddev = tbo->base.dev;
843 	vm_fault_t ret;
844 	int idx, r = 0;
845 
846 	ret = ttm_bo_vm_reserve(tbo, vmf);
847 	if (ret)
848 		return ret;
849 
850 	if (drm_dev_enter(ddev, &idx)) {
851 		struct xe_bo *bo = ttm_to_xe_bo(tbo);
852 
853 		trace_xe_bo_cpu_fault(bo);
854 
855 		if (should_migrate_to_system(bo)) {
856 			r = xe_bo_migrate(bo, XE_PL_TT);
857 			if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
858 				ret = VM_FAULT_NOPAGE;
859 			else if (r)
860 				ret = VM_FAULT_SIGBUS;
861 		}
862 		if (!ret)
863 			ret = ttm_bo_vm_fault_reserved(vmf,
864 						       vmf->vma->vm_page_prot,
865 						       TTM_BO_VM_NUM_PREFAULT);
866 
867 		drm_dev_exit(idx);
868 	} else {
869 		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
870 	}
871 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
872 		return ret;
873 
874 	dma_resv_unlock(tbo->base.resv);
875 	return ret;
876 }
877 
878 static const struct vm_operations_struct xe_gem_vm_ops = {
879 	.fault = xe_gem_fault,
880 	.open = ttm_bo_vm_open,
881 	.close = ttm_bo_vm_close,
882 	.access = ttm_bo_vm_access
883 };
884 
885 static const struct drm_gem_object_funcs xe_gem_object_funcs = {
886 	.free = xe_gem_object_free,
887 	.mmap = drm_gem_ttm_mmap,
888 	.export = xe_gem_prime_export,
889 	.vm_ops = &xe_gem_vm_ops,
890 };
891 
892 /**
893  * xe_bo_alloc - Allocate storage for a struct xe_bo
894  *
895  * This funcition is intended to allocate storage to be used for input
896  * to __xe_bo_create_locked(), in the case a pointer to the bo to be
897  * created is needed before the call to __xe_bo_create_locked().
898  * If __xe_bo_create_locked ends up never to be called, then the
899  * storage allocated with this function needs to be freed using
900  * xe_bo_free().
901  *
902  * Return: A pointer to an uninitialized struct xe_bo on success,
903  * ERR_PTR(-ENOMEM) on error.
904  */
905 struct xe_bo *xe_bo_alloc(void)
906 {
907 	struct xe_bo *bo = kzalloc(sizeof(*bo), GFP_KERNEL);
908 
909 	if (!bo)
910 		return ERR_PTR(-ENOMEM);
911 
912 	return bo;
913 }
914 
915 /**
916  * xe_bo_free - Free storage allocated using xe_bo_alloc()
917  * @bo: The buffer object storage.
918  *
919  * Refer to xe_bo_alloc() documentation for valid use-cases.
920  */
921 void xe_bo_free(struct xe_bo *bo)
922 {
923 	kfree(bo);
924 }
925 
926 struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
927 				    struct xe_gt *gt, struct dma_resv *resv,
928 				    size_t size, enum ttm_bo_type type,
929 				    u32 flags)
930 {
931 	struct ttm_operation_ctx ctx = {
932 		.interruptible = true,
933 		.no_wait_gpu = false,
934 	};
935 	struct ttm_placement *placement;
936 	uint32_t alignment;
937 	int err;
938 
939 	/* Only kernel objects should set GT */
940 	XE_BUG_ON(gt && type != ttm_bo_type_kernel);
941 
942 	if (!bo) {
943 		bo = xe_bo_alloc();
944 		if (IS_ERR(bo))
945 			return bo;
946 	}
947 
948 	if (flags & (XE_BO_CREATE_VRAM0_BIT | XE_BO_CREATE_VRAM1_BIT) &&
949 	    !(flags & XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT) &&
950 	    xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) {
951 		size = ALIGN(size, SZ_64K);
952 		flags |= XE_BO_INTERNAL_64K;
953 		alignment = SZ_64K >> PAGE_SHIFT;
954 	} else {
955 		alignment = SZ_4K >> PAGE_SHIFT;
956 	}
957 
958 	bo->gt = gt;
959 	bo->size = size;
960 	bo->flags = flags;
961 	bo->ttm.base.funcs = &xe_gem_object_funcs;
962 	bo->props.preferred_mem_class = XE_BO_PROPS_INVALID;
963 	bo->props.preferred_gt = XE_BO_PROPS_INVALID;
964 	bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
965 	bo->ttm.priority = DRM_XE_VMA_PRIORITY_NORMAL;
966 	INIT_LIST_HEAD(&bo->vmas);
967 	INIT_LIST_HEAD(&bo->pinned_link);
968 
969 	drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
970 
971 	if (resv) {
972 		ctx.allow_res_evict = true;
973 		ctx.resv = resv;
974 	}
975 
976 	err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
977 	if (WARN_ON(err))
978 		return ERR_PTR(err);
979 
980 	/* Defer populating type_sg bos */
981 	placement = (type == ttm_bo_type_sg ||
982 		     bo->flags & XE_BO_DEFER_BACKING) ? &sys_placement :
983 		&bo->placement;
984 	err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
985 				   placement, alignment,
986 				   &ctx, NULL, resv, xe_ttm_bo_destroy);
987 	if (err)
988 		return ERR_PTR(err);
989 
990 	bo->created = true;
991 	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
992 
993 	return bo;
994 }
995 
996 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_gt *gt,
997 				  struct xe_vm *vm, size_t size,
998 				  enum ttm_bo_type type, u32 flags)
999 {
1000 	struct xe_bo *bo;
1001 	int err;
1002 
1003 	if (vm)
1004 		xe_vm_assert_held(vm);
1005 	bo = __xe_bo_create_locked(xe, NULL, gt, vm ? &vm->resv : NULL, size,
1006 				   type, flags);
1007 	if (IS_ERR(bo))
1008 		return bo;
1009 
1010 	if (vm && xe_bo_is_user(bo))
1011 		xe_vm_get(vm);
1012 	bo->vm = vm;
1013 
1014 	if (flags & XE_BO_CREATE_GGTT_BIT) {
1015 		XE_BUG_ON(!gt);
1016 
1017 		err = xe_ggtt_insert_bo(gt->mem.ggtt, bo);
1018 		if (err)
1019 			goto err_unlock_put_bo;
1020 	}
1021 
1022 	return bo;
1023 
1024 err_unlock_put_bo:
1025 	xe_bo_unlock_vm_held(bo);
1026 	xe_bo_put(bo);
1027 	return ERR_PTR(err);
1028 }
1029 
1030 struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_gt *gt,
1031 			   struct xe_vm *vm, size_t size,
1032 			   enum ttm_bo_type type, u32 flags)
1033 {
1034 	struct xe_bo *bo = xe_bo_create_locked(xe, gt, vm, size, type, flags);
1035 
1036 	if (!IS_ERR(bo))
1037 		xe_bo_unlock_vm_held(bo);
1038 
1039 	return bo;
1040 }
1041 
1042 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_gt *gt,
1043 				   struct xe_vm *vm, size_t size,
1044 				   enum ttm_bo_type type, u32 flags)
1045 {
1046 	struct xe_bo *bo = xe_bo_create_locked(xe, gt, vm, size, type, flags);
1047 	int err;
1048 
1049 	if (IS_ERR(bo))
1050 		return bo;
1051 
1052 	err = xe_bo_pin(bo);
1053 	if (err)
1054 		goto err_put;
1055 
1056 	err = xe_bo_vmap(bo);
1057 	if (err)
1058 		goto err_unpin;
1059 
1060 	xe_bo_unlock_vm_held(bo);
1061 
1062 	return bo;
1063 
1064 err_unpin:
1065 	xe_bo_unpin(bo);
1066 err_put:
1067 	xe_bo_unlock_vm_held(bo);
1068 	xe_bo_put(bo);
1069 	return ERR_PTR(err);
1070 }
1071 
1072 struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_gt *gt,
1073 				     const void *data, size_t size,
1074 				     enum ttm_bo_type type, u32 flags)
1075 {
1076 	struct xe_bo *bo = xe_bo_create_pin_map(xe, gt, NULL,
1077 						ALIGN(size, PAGE_SIZE),
1078 						type, flags);
1079 	if (IS_ERR(bo))
1080 		return bo;
1081 
1082 	xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
1083 
1084 	return bo;
1085 }
1086 
1087 /*
1088  * XXX: This is in the VM bind data path, likely should calculate this once and
1089  * store, with a recalculation if the BO is moved.
1090  */
1091 static uint64_t vram_region_io_offset(struct xe_bo *bo)
1092 {
1093 	struct xe_device *xe = xe_bo_device(bo);
1094 	struct xe_gt *gt = mem_type_to_gt(xe, bo->ttm.resource->mem_type);
1095 
1096 	return gt->mem.vram.io_start - xe->mem.vram.io_start;
1097 }
1098 
1099 /**
1100  * xe_bo_pin_external - pin an external BO
1101  * @bo: buffer object to be pinned
1102  *
1103  * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
1104  * BO. Unique call compared to xe_bo_pin as this function has it own set of
1105  * asserts and code to ensure evict / restore on suspend / resume.
1106  *
1107  * Returns 0 for success, negative error code otherwise.
1108  */
1109 int xe_bo_pin_external(struct xe_bo *bo)
1110 {
1111 	struct xe_device *xe = xe_bo_device(bo);
1112 	int err;
1113 
1114 	XE_BUG_ON(bo->vm);
1115 	XE_BUG_ON(!xe_bo_is_user(bo));
1116 
1117 	if (!xe_bo_is_pinned(bo)) {
1118 		err = xe_bo_validate(bo, NULL, false);
1119 		if (err)
1120 			return err;
1121 
1122 		if (xe_bo_is_vram(bo)) {
1123 			spin_lock(&xe->pinned.lock);
1124 			list_add_tail(&bo->pinned_link,
1125 				      &xe->pinned.external_vram);
1126 			spin_unlock(&xe->pinned.lock);
1127 		}
1128 	}
1129 
1130 	ttm_bo_pin(&bo->ttm);
1131 
1132 	/*
1133 	 * FIXME: If we always use the reserve / unreserve functions for locking
1134 	 * we do not need this.
1135 	 */
1136 	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1137 
1138 	return 0;
1139 }
1140 
1141 int xe_bo_pin(struct xe_bo *bo)
1142 {
1143 	struct xe_device *xe = xe_bo_device(bo);
1144 	int err;
1145 
1146 	/* We currently don't expect user BO to be pinned */
1147 	XE_BUG_ON(xe_bo_is_user(bo));
1148 
1149 	/* Pinned object must be in GGTT or have pinned flag */
1150 	XE_BUG_ON(!(bo->flags & (XE_BO_CREATE_PINNED_BIT |
1151 				 XE_BO_CREATE_GGTT_BIT)));
1152 
1153 	/*
1154 	 * No reason we can't support pinning imported dma-bufs we just don't
1155 	 * expect to pin an imported dma-buf.
1156 	 */
1157 	XE_BUG_ON(bo->ttm.base.import_attach);
1158 
1159 	/* We only expect at most 1 pin */
1160 	XE_BUG_ON(xe_bo_is_pinned(bo));
1161 
1162 	err = xe_bo_validate(bo, NULL, false);
1163 	if (err)
1164 		return err;
1165 
1166 	/*
1167 	 * For pinned objects in on DGFX, we expect these objects to be in
1168 	 * contiguous VRAM memory. Required eviction / restore during suspend /
1169 	 * resume (force restore to same physical address).
1170 	 */
1171 	if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
1172 	    bo->flags & XE_BO_INTERNAL_TEST)) {
1173 		struct ttm_place *place = &(bo->placements[0]);
1174 		bool lmem;
1175 
1176 		XE_BUG_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS));
1177 		XE_BUG_ON(!mem_type_is_vram(place->mem_type));
1178 
1179 		place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE, &lmem) -
1180 			       vram_region_io_offset(bo)) >> PAGE_SHIFT;
1181 		place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
1182 
1183 		spin_lock(&xe->pinned.lock);
1184 		list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
1185 		spin_unlock(&xe->pinned.lock);
1186 	}
1187 
1188 	ttm_bo_pin(&bo->ttm);
1189 
1190 	/*
1191 	 * FIXME: If we always use the reserve / unreserve functions for locking
1192 	 * we do not need this.
1193 	 */
1194 	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1195 
1196 	return 0;
1197 }
1198 
1199 /**
1200  * xe_bo_unpin_external - unpin an external BO
1201  * @bo: buffer object to be unpinned
1202  *
1203  * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
1204  * BO. Unique call compared to xe_bo_unpin as this function has it own set of
1205  * asserts and code to ensure evict / restore on suspend / resume.
1206  *
1207  * Returns 0 for success, negative error code otherwise.
1208  */
1209 void xe_bo_unpin_external(struct xe_bo *bo)
1210 {
1211 	struct xe_device *xe = xe_bo_device(bo);
1212 
1213 	XE_BUG_ON(bo->vm);
1214 	XE_BUG_ON(!xe_bo_is_pinned(bo));
1215 	XE_BUG_ON(!xe_bo_is_user(bo));
1216 
1217 	if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) {
1218 		spin_lock(&xe->pinned.lock);
1219 		list_del_init(&bo->pinned_link);
1220 		spin_unlock(&xe->pinned.lock);
1221 	}
1222 
1223 	ttm_bo_unpin(&bo->ttm);
1224 
1225 	/*
1226 	 * FIXME: If we always use the reserve / unreserve functions for locking
1227 	 * we do not need this.
1228 	 */
1229 	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1230 }
1231 
1232 void xe_bo_unpin(struct xe_bo *bo)
1233 {
1234 	struct xe_device *xe = xe_bo_device(bo);
1235 
1236 	XE_BUG_ON(bo->ttm.base.import_attach);
1237 	XE_BUG_ON(!xe_bo_is_pinned(bo));
1238 
1239 	if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
1240 	    bo->flags & XE_BO_INTERNAL_TEST)) {
1241 		XE_BUG_ON(list_empty(&bo->pinned_link));
1242 
1243 		spin_lock(&xe->pinned.lock);
1244 		list_del_init(&bo->pinned_link);
1245 		spin_unlock(&xe->pinned.lock);
1246 	}
1247 
1248 	ttm_bo_unpin(&bo->ttm);
1249 }
1250 
1251 /**
1252  * xe_bo_validate() - Make sure the bo is in an allowed placement
1253  * @bo: The bo,
1254  * @vm: Pointer to a the vm the bo shares a locked dma_resv object with, or
1255  *      NULL. Used together with @allow_res_evict.
1256  * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's
1257  *                   reservation object.
1258  *
1259  * Make sure the bo is in allowed placement, migrating it if necessary. If
1260  * needed, other bos will be evicted. If bos selected for eviction shares
1261  * the @vm's reservation object, they can be evicted iff @allow_res_evict is
1262  * set to true, otherwise they will be bypassed.
1263  *
1264  * Return: 0 on success, negative error code on failure. May return
1265  * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
1266  */
1267 int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
1268 {
1269 	struct ttm_operation_ctx ctx = {
1270 		.interruptible = true,
1271 		.no_wait_gpu = false,
1272 	};
1273 
1274 	if (vm) {
1275 		lockdep_assert_held(&vm->lock);
1276 		xe_vm_assert_held(vm);
1277 
1278 		ctx.allow_res_evict = allow_res_evict;
1279 		ctx.resv = &vm->resv;
1280 	}
1281 
1282 	return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
1283 }
1284 
1285 bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
1286 {
1287 	if (bo->destroy == &xe_ttm_bo_destroy)
1288 		return true;
1289 
1290 	return false;
1291 }
1292 
1293 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset,
1294 		      size_t page_size, bool *is_lmem)
1295 {
1296 	struct xe_res_cursor cur;
1297 	u64 page;
1298 
1299 	if (!READ_ONCE(bo->ttm.pin_count))
1300 		xe_bo_assert_held(bo);
1301 
1302 	XE_BUG_ON(page_size > PAGE_SIZE);
1303 	page = offset >> PAGE_SHIFT;
1304 	offset &= (PAGE_SIZE - 1);
1305 
1306 	*is_lmem = xe_bo_is_vram(bo);
1307 
1308 	if (!*is_lmem) {
1309 		XE_BUG_ON(!bo->ttm.ttm);
1310 
1311 		xe_res_first_sg(xe_bo_get_sg(bo), page << PAGE_SHIFT,
1312 				page_size, &cur);
1313 		return xe_res_dma(&cur) + offset;
1314 	} else {
1315 		struct xe_res_cursor cur;
1316 
1317 		xe_res_first(bo->ttm.resource, page << PAGE_SHIFT,
1318 			     page_size, &cur);
1319 		return cur.start + offset + vram_region_io_offset(bo);
1320 	}
1321 }
1322 
1323 int xe_bo_vmap(struct xe_bo *bo)
1324 {
1325 	void *virtual;
1326 	bool is_iomem;
1327 	int ret;
1328 
1329 	xe_bo_assert_held(bo);
1330 
1331 	if (!iosys_map_is_null(&bo->vmap))
1332 		return 0;
1333 
1334 	/*
1335 	 * We use this more or less deprecated interface for now since
1336 	 * ttm_bo_vmap() doesn't offer the optimization of kmapping
1337 	 * single page bos, which is done here.
1338 	 * TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap
1339 	 * to use struct iosys_map.
1340 	 */
1341 	ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap);
1342 	if (ret)
1343 		return ret;
1344 
1345 	virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
1346 	if (is_iomem)
1347 		iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual);
1348 	else
1349 		iosys_map_set_vaddr(&bo->vmap, virtual);
1350 
1351 	return 0;
1352 }
1353 
1354 static void __xe_bo_vunmap(struct xe_bo *bo)
1355 {
1356 	if (!iosys_map_is_null(&bo->vmap)) {
1357 		iosys_map_clear(&bo->vmap);
1358 		ttm_bo_kunmap(&bo->kmap);
1359 	}
1360 }
1361 
1362 void xe_bo_vunmap(struct xe_bo *bo)
1363 {
1364 	xe_bo_assert_held(bo);
1365 	__xe_bo_vunmap(bo);
1366 }
1367 
1368 int xe_gem_create_ioctl(struct drm_device *dev, void *data,
1369 			struct drm_file *file)
1370 {
1371 	struct xe_device *xe = to_xe_device(dev);
1372 	struct xe_file *xef = to_xe_file(file);
1373 	struct drm_xe_gem_create *args = data;
1374 	struct ww_acquire_ctx ww;
1375 	struct xe_vm *vm = NULL;
1376 	struct xe_bo *bo;
1377 	unsigned bo_flags = XE_BO_CREATE_USER_BIT;
1378 	u32 handle;
1379 	int err;
1380 
1381 	if (XE_IOCTL_ERR(xe, args->extensions))
1382 		return -EINVAL;
1383 
1384 	if (XE_IOCTL_ERR(xe, args->flags &
1385 			 ~(XE_GEM_CREATE_FLAG_DEFER_BACKING |
1386 			   XE_GEM_CREATE_FLAG_SCANOUT |
1387 			   xe->info.mem_region_mask)))
1388 		return -EINVAL;
1389 
1390 	/* at least one memory type must be specified */
1391 	if (XE_IOCTL_ERR(xe, !(args->flags & xe->info.mem_region_mask)))
1392 		return -EINVAL;
1393 
1394 	if (XE_IOCTL_ERR(xe, args->handle))
1395 		return -EINVAL;
1396 
1397 	if (XE_IOCTL_ERR(xe, args->size > SIZE_MAX))
1398 		return -EINVAL;
1399 
1400 	if (XE_IOCTL_ERR(xe, args->size & ~PAGE_MASK))
1401 		return -EINVAL;
1402 
1403 	if (args->vm_id) {
1404 		vm = xe_vm_lookup(xef, args->vm_id);
1405 		if (XE_IOCTL_ERR(xe, !vm))
1406 			return -ENOENT;
1407 		err = xe_vm_lock(vm, &ww, 0, true);
1408 		if (err) {
1409 			xe_vm_put(vm);
1410 			return err;
1411 		}
1412 	}
1413 
1414 	if (args->flags & XE_GEM_CREATE_FLAG_DEFER_BACKING)
1415 		bo_flags |= XE_BO_DEFER_BACKING;
1416 
1417 	if (args->flags & XE_GEM_CREATE_FLAG_SCANOUT)
1418 		bo_flags |= XE_BO_SCANOUT_BIT;
1419 
1420 	bo_flags |= args->flags << (ffs(XE_BO_CREATE_SYSTEM_BIT) - 1);
1421 	bo = xe_bo_create(xe, NULL, vm, args->size, ttm_bo_type_device,
1422 			  bo_flags);
1423 	if (vm) {
1424 		xe_vm_unlock(vm, &ww);
1425 		xe_vm_put(vm);
1426 	}
1427 
1428 	if (IS_ERR(bo))
1429 		return PTR_ERR(bo);
1430 
1431 	err = drm_gem_handle_create(file, &bo->ttm.base, &handle);
1432 	xe_bo_put(bo);
1433 	if (err)
1434 		return err;
1435 
1436 	args->handle = handle;
1437 
1438 	return 0;
1439 }
1440 
1441 int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
1442 			     struct drm_file *file)
1443 {
1444 	struct xe_device *xe = to_xe_device(dev);
1445 	struct drm_xe_gem_mmap_offset *args = data;
1446 	struct drm_gem_object *gem_obj;
1447 
1448 	if (XE_IOCTL_ERR(xe, args->extensions))
1449 		return -EINVAL;
1450 
1451 	if (XE_IOCTL_ERR(xe, args->flags))
1452 		return -EINVAL;
1453 
1454 	gem_obj = drm_gem_object_lookup(file, args->handle);
1455 	if (XE_IOCTL_ERR(xe, !gem_obj))
1456 		return -ENOENT;
1457 
1458 	/* The mmap offset was set up at BO allocation time. */
1459 	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
1460 
1461 	xe_bo_put(gem_to_xe_bo(gem_obj));
1462 	return 0;
1463 }
1464 
1465 int xe_bo_lock(struct xe_bo *bo, struct ww_acquire_ctx *ww,
1466 	       int num_resv, bool intr)
1467 {
1468 	struct ttm_validate_buffer tv_bo;
1469 	LIST_HEAD(objs);
1470 	LIST_HEAD(dups);
1471 
1472 	XE_BUG_ON(!ww);
1473 
1474 	tv_bo.num_shared = num_resv;
1475 	tv_bo.bo = &bo->ttm;;
1476 	list_add_tail(&tv_bo.head, &objs);
1477 
1478 	return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
1479 }
1480 
1481 void xe_bo_unlock(struct xe_bo *bo, struct ww_acquire_ctx *ww)
1482 {
1483 	dma_resv_unlock(bo->ttm.base.resv);
1484 	ww_acquire_fini(ww);
1485 }
1486 
1487 /**
1488  * xe_bo_can_migrate - Whether a buffer object likely can be migrated
1489  * @bo: The buffer object to migrate
1490  * @mem_type: The TTM memory type intended to migrate to
1491  *
1492  * Check whether the buffer object supports migration to the
1493  * given memory type. Note that pinning may affect the ability to migrate as
1494  * returned by this function.
1495  *
1496  * This function is primarily intended as a helper for checking the
1497  * possibility to migrate buffer objects and can be called without
1498  * the object lock held.
1499  *
1500  * Return: true if migration is possible, false otherwise.
1501  */
1502 bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type)
1503 {
1504 	unsigned int cur_place;
1505 
1506 	if (bo->ttm.type == ttm_bo_type_kernel)
1507 		return true;
1508 
1509 	if (bo->ttm.type == ttm_bo_type_sg)
1510 		return false;
1511 
1512 	for (cur_place = 0; cur_place < bo->placement.num_placement;
1513 	     cur_place++) {
1514 		if (bo->placements[cur_place].mem_type == mem_type)
1515 			return true;
1516 	}
1517 
1518 	return false;
1519 }
1520 
1521 static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
1522 {
1523 	memset(place, 0, sizeof(*place));
1524 	place->mem_type = mem_type;
1525 }
1526 
1527 /**
1528  * xe_bo_migrate - Migrate an object to the desired region id
1529  * @bo: The buffer object to migrate.
1530  * @mem_type: The TTM region type to migrate to.
1531  *
1532  * Attempt to migrate the buffer object to the desired memory region. The
1533  * buffer object may not be pinned, and must be locked.
1534  * On successful completion, the object memory type will be updated,
1535  * but an async migration task may not have completed yet, and to
1536  * accomplish that, the object's kernel fences must be signaled with
1537  * the object lock held.
1538  *
1539  * Return: 0 on success. Negative error code on failure. In particular may
1540  * return -EINTR or -ERESTARTSYS if signal pending.
1541  */
1542 int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
1543 {
1544 	struct ttm_operation_ctx ctx = {
1545 		.interruptible = true,
1546 		.no_wait_gpu = false,
1547 	};
1548 	struct ttm_placement placement;
1549 	struct ttm_place requested;
1550 
1551 	xe_bo_assert_held(bo);
1552 
1553 	if (bo->ttm.resource->mem_type == mem_type)
1554 		return 0;
1555 
1556 	if (xe_bo_is_pinned(bo))
1557 		return -EBUSY;
1558 
1559 	if (!xe_bo_can_migrate(bo, mem_type))
1560 		return -EINVAL;
1561 
1562 	xe_place_from_ttm_type(mem_type, &requested);
1563 	placement.num_placement = 1;
1564 	placement.num_busy_placement = 1;
1565 	placement.placement = &requested;
1566 	placement.busy_placement = &requested;
1567 
1568 	return ttm_bo_validate(&bo->ttm, &placement, &ctx);
1569 }
1570 
1571 /**
1572  * xe_bo_evict - Evict an object to evict placement
1573  * @bo: The buffer object to migrate.
1574  * @force_alloc: Set force_alloc in ttm_operation_ctx
1575  *
1576  * On successful completion, the object memory will be moved to evict
1577  * placement. Ths function blocks until the object has been fully moved.
1578  *
1579  * Return: 0 on success. Negative error code on failure.
1580  */
1581 int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
1582 {
1583 	struct ttm_operation_ctx ctx = {
1584 		.interruptible = false,
1585 		.no_wait_gpu = false,
1586 		.force_alloc = force_alloc,
1587 	};
1588 	struct ttm_placement placement;
1589 	int ret;
1590 
1591 	xe_evict_flags(&bo->ttm, &placement);
1592 	ret = ttm_bo_validate(&bo->ttm, &placement, &ctx);
1593 	if (ret)
1594 		return ret;
1595 
1596 	dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
1597 			      false, MAX_SCHEDULE_TIMEOUT);
1598 
1599 	return 0;
1600 }
1601 
1602 /**
1603  * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
1604  * placed in system memory.
1605  * @bo: The xe_bo
1606  *
1607  * If a bo has an allowable placement in XE_PL_TT memory, it can't use
1608  * flat CCS compression, because the GPU then has no way to access the
1609  * CCS metadata using relevant commands. For the opposite case, we need to
1610  * allocate storage for the CCS metadata when the BO is not resident in
1611  * VRAM memory.
1612  *
1613  * Return: true if extra pages need to be allocated, false otherwise.
1614  */
1615 bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
1616 {
1617 	return bo->ttm.type == ttm_bo_type_device &&
1618 		!(bo->flags & XE_BO_CREATE_SYSTEM_BIT) &&
1619 		(bo->flags & (XE_BO_CREATE_VRAM0_BIT | XE_BO_CREATE_VRAM1_BIT));
1620 }
1621 
1622 /**
1623  * __xe_bo_release_dummy() - Dummy kref release function
1624  * @kref: The embedded struct kref.
1625  *
1626  * Dummy release function for xe_bo_put_deferred(). Keep off.
1627  */
1628 void __xe_bo_release_dummy(struct kref *kref)
1629 {
1630 }
1631 
1632 /**
1633  * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
1634  * @deferred: The lockless list used for the call to xe_bo_put_deferred().
1635  *
1636  * Puts all bos whose put was deferred by xe_bo_put_deferred().
1637  * The @deferred list can be either an onstack local list or a global
1638  * shared list used by a workqueue.
1639  */
1640 void xe_bo_put_commit(struct llist_head *deferred)
1641 {
1642 	struct llist_node *freed;
1643 	struct xe_bo *bo, *next;
1644 
1645 	if (!deferred)
1646 		return;
1647 
1648 	freed = llist_del_all(deferred);
1649 	if (!freed)
1650 		return;
1651 
1652 	llist_for_each_entry_safe(bo, next, freed, freed)
1653 		drm_gem_object_free(&bo->ttm.base.refcount);
1654 }
1655 
1656 /**
1657  * xe_bo_dumb_create - Create a dumb bo as backing for a fb
1658  * @file_priv: ...
1659  * @dev: ...
1660  * @args: ...
1661  *
1662  * See dumb_create() hook in include/drm/drm_drv.h
1663  *
1664  * Return: ...
1665  */
1666 int xe_bo_dumb_create(struct drm_file *file_priv,
1667 		      struct drm_device *dev,
1668 		      struct drm_mode_create_dumb *args)
1669 {
1670 	struct xe_device *xe = to_xe_device(dev);
1671 	struct xe_bo *bo;
1672 	uint32_t handle;
1673 	int cpp = DIV_ROUND_UP(args->bpp, 8);
1674 	int err;
1675 	u32 page_size = max_t(u32, PAGE_SIZE,
1676 		xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K);
1677 
1678 	args->pitch = ALIGN(args->width * cpp, 64);
1679 	args->size = ALIGN(mul_u32_u32(args->pitch, args->height),
1680 			   page_size);
1681 
1682 	bo = xe_bo_create(xe, NULL, NULL, args->size, ttm_bo_type_device,
1683 			  XE_BO_CREATE_VRAM_IF_DGFX(to_gt(xe)) |
1684 			  XE_BO_CREATE_USER_BIT | XE_BO_SCANOUT_BIT);
1685 	if (IS_ERR(bo))
1686 		return PTR_ERR(bo);
1687 
1688 	err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle);
1689 	/* drop reference from allocate - handle holds it now */
1690 	drm_gem_object_put(&bo->ttm.base);
1691 	if (!err)
1692 		args->handle = handle;
1693 	return err;
1694 }
1695 
1696 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1697 #include "tests/xe_bo.c"
1698 #endif
1699