xref: /linux/drivers/gpu/drm/xe/xe_bo.c (revision 83675851547e835c15252c601f41acf269c351d9)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_bo.h"
7 
8 #include <linux/dma-buf.h>
9 #include <linux/nospec.h>
10 
11 #include <drm/drm_drv.h>
12 #include <drm/drm_dumb_buffers.h>
13 #include <drm/drm_gem_ttm_helper.h>
14 #include <drm/drm_managed.h>
15 #include <drm/ttm/ttm_backup.h>
16 #include <drm/ttm/ttm_device.h>
17 #include <drm/ttm/ttm_placement.h>
18 #include <drm/ttm/ttm_tt.h>
19 #include <uapi/drm/xe_drm.h>
20 
21 #include <kunit/static_stub.h>
22 
23 #include <trace/events/gpu_mem.h>
24 
25 #include "xe_device.h"
26 #include "xe_dma_buf.h"
27 #include "xe_drm_client.h"
28 #include "xe_ggtt.h"
29 #include "xe_map.h"
30 #include "xe_migrate.h"
31 #include "xe_pat.h"
32 #include "xe_pm.h"
33 #include "xe_preempt_fence.h"
34 #include "xe_pxp.h"
35 #include "xe_res_cursor.h"
36 #include "xe_shrinker.h"
37 #include "xe_sriov_vf_ccs.h"
38 #include "xe_tile.h"
39 #include "xe_trace_bo.h"
40 #include "xe_ttm_stolen_mgr.h"
41 #include "xe_vm.h"
42 #include "xe_vram_types.h"
43 
44 const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES]  = {
45 	[XE_PL_SYSTEM] = "system",
46 	[XE_PL_TT] = "gtt",
47 	[XE_PL_VRAM0] = "vram0",
48 	[XE_PL_VRAM1] = "vram1",
49 	[XE_PL_STOLEN] = "stolen"
50 };
51 
52 static const struct ttm_place sys_placement_flags = {
53 	.fpfn = 0,
54 	.lpfn = 0,
55 	.mem_type = XE_PL_SYSTEM,
56 	.flags = 0,
57 };
58 
59 static struct ttm_placement sys_placement = {
60 	.num_placement = 1,
61 	.placement = &sys_placement_flags,
62 };
63 
64 static struct ttm_placement purge_placement;
65 
66 static const struct ttm_place tt_placement_flags[] = {
67 	{
68 		.fpfn = 0,
69 		.lpfn = 0,
70 		.mem_type = XE_PL_TT,
71 		.flags = TTM_PL_FLAG_DESIRED,
72 	},
73 	{
74 		.fpfn = 0,
75 		.lpfn = 0,
76 		.mem_type = XE_PL_SYSTEM,
77 		.flags = TTM_PL_FLAG_FALLBACK,
78 	}
79 };
80 
81 static struct ttm_placement tt_placement = {
82 	.num_placement = 2,
83 	.placement = tt_placement_flags,
84 };
85 
86 #define for_each_set_bo_vram_flag(bit__, bo_flags__) \
87 	for (unsigned int __bit_tmp = BIT(0); __bit_tmp <= XE_BO_FLAG_VRAM_MASK; __bit_tmp <<= 1) \
88 		for_each_if(((bit__) = __bit_tmp) & (bo_flags__) & XE_BO_FLAG_VRAM_MASK)
89 
90 bool mem_type_is_vram(u32 mem_type)
91 {
92 	return mem_type >= XE_PL_VRAM0 && mem_type != XE_PL_STOLEN;
93 }
94 
95 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res)
96 {
97 	return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe);
98 }
99 
100 static bool resource_is_vram(struct ttm_resource *res)
101 {
102 	return mem_type_is_vram(res->mem_type);
103 }
104 
105 bool xe_bo_is_vram(struct xe_bo *bo)
106 {
107 	return resource_is_vram(bo->ttm.resource) ||
108 		resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource);
109 }
110 
111 bool xe_bo_is_stolen(struct xe_bo *bo)
112 {
113 	return bo->ttm.resource->mem_type == XE_PL_STOLEN;
114 }
115 
116 /**
117  * xe_bo_has_single_placement - check if BO is placed only in one memory location
118  * @bo: The BO
119  *
120  * This function checks whether a given BO is placed in only one memory location.
121  *
122  * Returns: true if the BO is placed in a single memory location, false otherwise.
123  *
124  */
125 bool xe_bo_has_single_placement(struct xe_bo *bo)
126 {
127 	return bo->placement.num_placement == 1;
128 }
129 
130 /**
131  * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR
132  * @bo: The BO
133  *
134  * The stolen memory is accessed through the PCI BAR for both DGFX and some
135  * integrated platforms that have a dedicated bit in the PTE for devmem (DM).
136  *
137  * Returns: true if it's stolen memory accessed via PCI BAR, false otherwise.
138  */
139 bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
140 {
141 	return xe_bo_is_stolen(bo) &&
142 		GRAPHICS_VERx100(xe_bo_device(bo)) >= 1270;
143 }
144 
145 /**
146  * xe_bo_is_vm_bound - check if BO has any mappings through VM_BIND
147  * @bo: The BO
148  *
149  * Check if a given bo is bound through VM_BIND. This requires the
150  * reservation lock for the BO to be held.
151  *
152  * Returns: boolean
153  */
154 bool xe_bo_is_vm_bound(struct xe_bo *bo)
155 {
156 	xe_bo_assert_held(bo);
157 
158 	return !list_empty(&bo->ttm.base.gpuva.list);
159 }
160 
161 static bool xe_bo_is_user(struct xe_bo *bo)
162 {
163 	return bo->flags & XE_BO_FLAG_USER;
164 }
165 
166 static struct xe_migrate *
167 mem_type_to_migrate(struct xe_device *xe, u32 mem_type)
168 {
169 	struct xe_tile *tile;
170 
171 	xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type));
172 	tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
173 	return tile->migrate;
174 }
175 
176 static struct xe_vram_region *res_to_mem_region(struct ttm_resource *res)
177 {
178 	struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
179 	struct ttm_resource_manager *mgr;
180 	struct xe_ttm_vram_mgr *vram_mgr;
181 
182 	xe_assert(xe, resource_is_vram(res));
183 	mgr = ttm_manager_type(&xe->ttm, res->mem_type);
184 	vram_mgr = to_xe_ttm_vram_mgr(mgr);
185 
186 	return container_of(vram_mgr, struct xe_vram_region, ttm);
187 }
188 
189 static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
190 			   u32 bo_flags, u32 *c)
191 {
192 	if (bo_flags & XE_BO_FLAG_SYSTEM) {
193 		xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
194 
195 		bo->placements[*c] = (struct ttm_place) {
196 			.mem_type = XE_PL_TT,
197 			.flags = (bo_flags & XE_BO_FLAG_VRAM_MASK) ?
198 			TTM_PL_FLAG_FALLBACK : 0,
199 		};
200 		*c += 1;
201 	}
202 }
203 
204 static bool force_contiguous(u32 bo_flags)
205 {
206 	if (bo_flags & XE_BO_FLAG_STOLEN)
207 		return true; /* users expect this */
208 	else if (bo_flags & XE_BO_FLAG_PINNED &&
209 		 !(bo_flags & XE_BO_FLAG_PINNED_LATE_RESTORE))
210 		return true; /* needs vmap */
211 	else if (bo_flags & XE_BO_FLAG_CPU_ADDR_MIRROR)
212 		return true;
213 
214 	/*
215 	 * For eviction / restore on suspend / resume objects pinned in VRAM
216 	 * must be contiguous, also only contiguous BOs support xe_bo_vmap.
217 	 */
218 	return bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS &&
219 	       bo_flags & XE_BO_FLAG_PINNED;
220 }
221 
222 static u8 vram_bo_flag_to_tile_id(struct xe_device *xe, u32 vram_bo_flag)
223 {
224 	xe_assert(xe, vram_bo_flag & XE_BO_FLAG_VRAM_MASK);
225 	xe_assert(xe, (vram_bo_flag & (vram_bo_flag - 1)) == 0);
226 
227 	return __ffs(vram_bo_flag >> (__ffs(XE_BO_FLAG_VRAM0) - 1)) - 1;
228 }
229 
230 static u32 bo_vram_flags_to_vram_placement(struct xe_device *xe, u32 bo_flags, u32 vram_flag,
231 					   enum ttm_bo_type type)
232 {
233 	u8 tile_id = vram_bo_flag_to_tile_id(xe, vram_flag);
234 
235 	xe_assert(xe, tile_id < xe->info.tile_count);
236 
237 	if (type == ttm_bo_type_kernel && !(bo_flags & XE_BO_FLAG_FORCE_USER_VRAM))
238 		return xe->tiles[tile_id].mem.kernel_vram->placement;
239 	else
240 		return xe->tiles[tile_id].mem.vram->placement;
241 }
242 
243 static void add_vram(struct xe_device *xe, struct xe_bo *bo,
244 		     struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
245 {
246 	struct ttm_place place = { .mem_type = mem_type };
247 	struct ttm_resource_manager *mgr = ttm_manager_type(&xe->ttm, mem_type);
248 	struct xe_ttm_vram_mgr *vram_mgr = to_xe_ttm_vram_mgr(mgr);
249 
250 	struct xe_vram_region *vram;
251 	u64 io_size;
252 
253 	xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
254 
255 	vram = container_of(vram_mgr, struct xe_vram_region, ttm);
256 	xe_assert(xe, vram && vram->usable_size);
257 	io_size = vram->io_size;
258 
259 	if (force_contiguous(bo_flags))
260 		place.flags |= TTM_PL_FLAG_CONTIGUOUS;
261 
262 	if (io_size < vram->usable_size) {
263 		if (bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) {
264 			place.fpfn = 0;
265 			place.lpfn = io_size >> PAGE_SHIFT;
266 		} else {
267 			place.flags |= TTM_PL_FLAG_TOPDOWN;
268 		}
269 	}
270 	places[*c] = place;
271 	*c += 1;
272 }
273 
274 static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
275 			 u32 bo_flags, enum ttm_bo_type type, u32 *c)
276 {
277 	u32 vram_flag;
278 
279 	for_each_set_bo_vram_flag(vram_flag, bo_flags) {
280 		u32 pl = bo_vram_flags_to_vram_placement(xe, bo_flags, vram_flag, type);
281 
282 		add_vram(xe, bo, bo->placements, bo_flags, pl, c);
283 	}
284 }
285 
286 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
287 			   u32 bo_flags, u32 *c)
288 {
289 	if (bo_flags & XE_BO_FLAG_STOLEN) {
290 		xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
291 
292 		bo->placements[*c] = (struct ttm_place) {
293 			.mem_type = XE_PL_STOLEN,
294 			.flags = force_contiguous(bo_flags) ?
295 				TTM_PL_FLAG_CONTIGUOUS : 0,
296 		};
297 		*c += 1;
298 	}
299 }
300 
301 static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
302 				       u32 bo_flags, enum ttm_bo_type type)
303 {
304 	u32 c = 0;
305 
306 	try_add_vram(xe, bo, bo_flags, type, &c);
307 	try_add_system(xe, bo, bo_flags, &c);
308 	try_add_stolen(xe, bo, bo_flags, &c);
309 
310 	if (!c)
311 		return -EINVAL;
312 
313 	bo->placement = (struct ttm_placement) {
314 		.num_placement = c,
315 		.placement = bo->placements,
316 	};
317 
318 	return 0;
319 }
320 
321 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
322 			      u32 bo_flags, enum ttm_bo_type type)
323 {
324 	xe_bo_assert_held(bo);
325 	return __xe_bo_placement_for_flags(xe, bo, bo_flags, type);
326 }
327 
328 static void xe_evict_flags(struct ttm_buffer_object *tbo,
329 			   struct ttm_placement *placement)
330 {
331 	struct xe_device *xe = container_of(tbo->bdev, typeof(*xe), ttm);
332 	bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
333 	struct xe_bo *bo;
334 
335 	if (!xe_bo_is_xe_bo(tbo)) {
336 		/* Don't handle scatter gather BOs */
337 		if (tbo->type == ttm_bo_type_sg) {
338 			placement->num_placement = 0;
339 			return;
340 		}
341 
342 		*placement = device_unplugged ? purge_placement : sys_placement;
343 		return;
344 	}
345 
346 	bo = ttm_to_xe_bo(tbo);
347 	if (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) {
348 		*placement = sys_placement;
349 		return;
350 	}
351 
352 	if (device_unplugged && !tbo->base.dma_buf) {
353 		*placement = purge_placement;
354 		return;
355 	}
356 
357 	/*
358 	 * For xe, sg bos that are evicted to system just triggers a
359 	 * rebind of the sg list upon subsequent validation to XE_PL_TT.
360 	 */
361 	switch (tbo->resource->mem_type) {
362 	case XE_PL_VRAM0:
363 	case XE_PL_VRAM1:
364 	case XE_PL_STOLEN:
365 		*placement = tt_placement;
366 		break;
367 	case XE_PL_TT:
368 	default:
369 		*placement = sys_placement;
370 		break;
371 	}
372 }
373 
374 /* struct xe_ttm_tt - Subclassed ttm_tt for xe */
375 struct xe_ttm_tt {
376 	struct ttm_tt ttm;
377 	struct sg_table sgt;
378 	struct sg_table *sg;
379 	/** @purgeable: Whether the content of the pages of @ttm is purgeable. */
380 	bool purgeable;
381 };
382 
383 static int xe_tt_map_sg(struct xe_device *xe, struct ttm_tt *tt)
384 {
385 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
386 	unsigned long num_pages = tt->num_pages;
387 	int ret;
388 
389 	XE_WARN_ON((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
390 		   !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE));
391 
392 	if (xe_tt->sg)
393 		return 0;
394 
395 	ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
396 						num_pages, 0,
397 						(u64)num_pages << PAGE_SHIFT,
398 						xe_sg_segment_size(xe->drm.dev),
399 						GFP_KERNEL);
400 	if (ret)
401 		return ret;
402 
403 	xe_tt->sg = &xe_tt->sgt;
404 	ret = dma_map_sgtable(xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL,
405 			      DMA_ATTR_SKIP_CPU_SYNC);
406 	if (ret) {
407 		sg_free_table(xe_tt->sg);
408 		xe_tt->sg = NULL;
409 		return ret;
410 	}
411 
412 	return 0;
413 }
414 
415 static void xe_tt_unmap_sg(struct xe_device *xe, struct ttm_tt *tt)
416 {
417 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
418 
419 	if (xe_tt->sg) {
420 		dma_unmap_sgtable(xe->drm.dev, xe_tt->sg,
421 				  DMA_BIDIRECTIONAL, 0);
422 		sg_free_table(xe_tt->sg);
423 		xe_tt->sg = NULL;
424 	}
425 }
426 
427 struct sg_table *xe_bo_sg(struct xe_bo *bo)
428 {
429 	struct ttm_tt *tt = bo->ttm.ttm;
430 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
431 
432 	return xe_tt->sg;
433 }
434 
435 /*
436  * Account ttm pages against the device shrinker's shrinkable and
437  * purgeable counts.
438  */
439 static void xe_ttm_tt_account_add(struct xe_device *xe, struct ttm_tt *tt)
440 {
441 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
442 
443 	if (xe_tt->purgeable)
444 		xe_shrinker_mod_pages(xe->mem.shrinker, 0, tt->num_pages);
445 	else
446 		xe_shrinker_mod_pages(xe->mem.shrinker, tt->num_pages, 0);
447 }
448 
449 static void xe_ttm_tt_account_subtract(struct xe_device *xe, struct ttm_tt *tt)
450 {
451 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
452 
453 	if (xe_tt->purgeable)
454 		xe_shrinker_mod_pages(xe->mem.shrinker, 0, -(long)tt->num_pages);
455 	else
456 		xe_shrinker_mod_pages(xe->mem.shrinker, -(long)tt->num_pages, 0);
457 }
458 
459 static void update_global_total_pages(struct ttm_device *ttm_dev,
460 				      long num_pages)
461 {
462 #if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
463 	struct xe_device *xe = ttm_to_xe_device(ttm_dev);
464 	u64 global_total_pages =
465 		atomic64_add_return(num_pages, &xe->global_total_pages);
466 
467 	trace_gpu_mem_total(xe->drm.primary->index, 0,
468 			    global_total_pages << PAGE_SHIFT);
469 #endif
470 }
471 
472 static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
473 				       u32 page_flags)
474 {
475 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
476 	struct xe_device *xe = xe_bo_device(bo);
477 	struct xe_ttm_tt *xe_tt;
478 	struct ttm_tt *tt;
479 	unsigned long extra_pages;
480 	enum ttm_caching caching = ttm_cached;
481 	int err;
482 
483 	xe_tt = kzalloc(sizeof(*xe_tt), GFP_KERNEL);
484 	if (!xe_tt)
485 		return NULL;
486 
487 	tt = &xe_tt->ttm;
488 
489 	extra_pages = 0;
490 	if (xe_bo_needs_ccs_pages(bo))
491 		extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, xe_bo_size(bo)),
492 					   PAGE_SIZE);
493 
494 	/*
495 	 * DGFX system memory is always WB / ttm_cached, since
496 	 * other caching modes are only supported on x86. DGFX
497 	 * GPU system memory accesses are always coherent with the
498 	 * CPU.
499 	 */
500 	if (!IS_DGFX(xe)) {
501 		switch (bo->cpu_caching) {
502 		case DRM_XE_GEM_CPU_CACHING_WC:
503 			caching = ttm_write_combined;
504 			break;
505 		default:
506 			caching = ttm_cached;
507 			break;
508 		}
509 
510 		WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching);
511 
512 		/*
513 		 * Display scanout is always non-coherent with the CPU cache.
514 		 *
515 		 * For Xe_LPG and beyond, PPGTT PTE lookups are also
516 		 * non-coherent and require a CPU:WC mapping.
517 		 */
518 		if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) ||
519 		     (!xe->info.has_cached_pt && bo->flags & XE_BO_FLAG_PAGETABLE))
520 			caching = ttm_write_combined;
521 	}
522 
523 	if (bo->flags & XE_BO_FLAG_NEEDS_UC) {
524 		/*
525 		 * Valid only for internally-created buffers only, for
526 		 * which cpu_caching is never initialized.
527 		 */
528 		xe_assert(xe, bo->cpu_caching == 0);
529 		caching = ttm_uncached;
530 	}
531 
532 	if (ttm_bo->type != ttm_bo_type_sg)
533 		page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
534 
535 	err = ttm_tt_init(tt, &bo->ttm, page_flags, caching, extra_pages);
536 	if (err) {
537 		kfree(xe_tt);
538 		return NULL;
539 	}
540 
541 	if (ttm_bo->type != ttm_bo_type_sg) {
542 		err = ttm_tt_setup_backup(tt);
543 		if (err) {
544 			ttm_tt_fini(tt);
545 			kfree(xe_tt);
546 			return NULL;
547 		}
548 	}
549 
550 	return tt;
551 }
552 
553 static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
554 			      struct ttm_operation_ctx *ctx)
555 {
556 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
557 	int err;
558 
559 	/*
560 	 * dma-bufs are not populated with pages, and the dma-
561 	 * addresses are set up when moved to XE_PL_TT.
562 	 */
563 	if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
564 	    !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
565 		return 0;
566 
567 	if (ttm_tt_is_backed_up(tt) && !xe_tt->purgeable) {
568 		err = ttm_tt_restore(ttm_dev, tt, ctx);
569 	} else {
570 		ttm_tt_clear_backed_up(tt);
571 		err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
572 	}
573 	if (err)
574 		return err;
575 
576 	xe_tt->purgeable = false;
577 	xe_ttm_tt_account_add(ttm_to_xe_device(ttm_dev), tt);
578 	update_global_total_pages(ttm_dev, tt->num_pages);
579 
580 	return 0;
581 }
582 
583 static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
584 {
585 	struct xe_device *xe = ttm_to_xe_device(ttm_dev);
586 
587 	if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
588 	    !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
589 		return;
590 
591 	xe_tt_unmap_sg(xe, tt);
592 
593 	ttm_pool_free(&ttm_dev->pool, tt);
594 	xe_ttm_tt_account_subtract(xe, tt);
595 	update_global_total_pages(ttm_dev, -(long)tt->num_pages);
596 }
597 
598 static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
599 {
600 	ttm_tt_fini(tt);
601 	kfree(tt);
602 }
603 
604 static bool xe_ttm_resource_visible(struct ttm_resource *mem)
605 {
606 	struct xe_ttm_vram_mgr_resource *vres =
607 		to_xe_ttm_vram_mgr_resource(mem);
608 
609 	return vres->used_visible_size == mem->size;
610 }
611 
612 /**
613  * xe_bo_is_visible_vram - check if BO is placed entirely in visible VRAM.
614  * @bo: The BO
615  *
616  * This function checks whether a given BO resides entirely in memory visible from the CPU
617  *
618  * Returns: true if the BO is entirely visible, false otherwise.
619  *
620  */
621 bool xe_bo_is_visible_vram(struct xe_bo *bo)
622 {
623 	if (drm_WARN_ON(bo->ttm.base.dev, !xe_bo_is_vram(bo)))
624 		return false;
625 
626 	return xe_ttm_resource_visible(bo->ttm.resource);
627 }
628 
629 static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
630 				 struct ttm_resource *mem)
631 {
632 	struct xe_device *xe = ttm_to_xe_device(bdev);
633 
634 	switch (mem->mem_type) {
635 	case XE_PL_SYSTEM:
636 	case XE_PL_TT:
637 		return 0;
638 	case XE_PL_VRAM0:
639 	case XE_PL_VRAM1: {
640 		struct xe_vram_region *vram = res_to_mem_region(mem);
641 
642 		if (!xe_ttm_resource_visible(mem))
643 			return -EINVAL;
644 
645 		mem->bus.offset = mem->start << PAGE_SHIFT;
646 
647 		if (vram->mapping &&
648 		    mem->placement & TTM_PL_FLAG_CONTIGUOUS)
649 			mem->bus.addr = (u8 __force *)vram->mapping +
650 				mem->bus.offset;
651 
652 		mem->bus.offset += vram->io_start;
653 		mem->bus.is_iomem = true;
654 
655 #if  !IS_ENABLED(CONFIG_X86)
656 		mem->bus.caching = ttm_write_combined;
657 #endif
658 		return 0;
659 	} case XE_PL_STOLEN:
660 		return xe_ttm_stolen_io_mem_reserve(xe, mem);
661 	default:
662 		return -EINVAL;
663 	}
664 }
665 
666 static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
667 				const struct ttm_operation_ctx *ctx)
668 {
669 	struct dma_resv_iter cursor;
670 	struct dma_fence *fence;
671 	struct drm_gem_object *obj = &bo->ttm.base;
672 	struct drm_gpuvm_bo *vm_bo;
673 	bool idle = false;
674 	int ret = 0;
675 
676 	dma_resv_assert_held(bo->ttm.base.resv);
677 
678 	if (!list_empty(&bo->ttm.base.gpuva.list)) {
679 		dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
680 				    DMA_RESV_USAGE_BOOKKEEP);
681 		dma_resv_for_each_fence_unlocked(&cursor, fence)
682 			dma_fence_enable_sw_signaling(fence);
683 		dma_resv_iter_end(&cursor);
684 	}
685 
686 	drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
687 		struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
688 		struct drm_gpuva *gpuva;
689 
690 		if (!xe_vm_in_fault_mode(vm)) {
691 			drm_gpuvm_bo_evict(vm_bo, true);
692 			continue;
693 		}
694 
695 		if (!idle) {
696 			long timeout;
697 
698 			if (ctx->no_wait_gpu &&
699 			    !dma_resv_test_signaled(bo->ttm.base.resv,
700 						    DMA_RESV_USAGE_BOOKKEEP))
701 				return -EBUSY;
702 
703 			timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
704 							DMA_RESV_USAGE_BOOKKEEP,
705 							ctx->interruptible,
706 							MAX_SCHEDULE_TIMEOUT);
707 			if (!timeout)
708 				return -ETIME;
709 			if (timeout < 0)
710 				return timeout;
711 
712 			idle = true;
713 		}
714 
715 		drm_gpuvm_bo_for_each_va(gpuva, vm_bo) {
716 			struct xe_vma *vma = gpuva_to_vma(gpuva);
717 
718 			trace_xe_vma_evict(vma);
719 			ret = xe_vm_invalidate_vma(vma);
720 			if (XE_WARN_ON(ret))
721 				return ret;
722 		}
723 	}
724 
725 	return ret;
726 }
727 
728 /*
729  * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
730  * Note that unmapping the attachment is deferred to the next
731  * map_attachment time, or to bo destroy (after idling) whichever comes first.
732  * This is to avoid syncing before unmap_attachment(), assuming that the
733  * caller relies on idling the reservation object before moving the
734  * backing store out. Should that assumption not hold, then we will be able
735  * to unconditionally call unmap_attachment() when moving out to system.
736  */
737 static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
738 			     struct ttm_resource *new_res)
739 {
740 	struct dma_buf_attachment *attach = ttm_bo->base.import_attach;
741 	struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
742 					       ttm);
743 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
744 	bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
745 	struct sg_table *sg;
746 
747 	xe_assert(xe, attach);
748 	xe_assert(xe, ttm_bo->ttm);
749 
750 	if (device_unplugged && new_res->mem_type == XE_PL_SYSTEM &&
751 	    ttm_bo->sg) {
752 		dma_resv_wait_timeout(ttm_bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
753 				      false, MAX_SCHEDULE_TIMEOUT);
754 		dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
755 		ttm_bo->sg = NULL;
756 	}
757 
758 	if (new_res->mem_type == XE_PL_SYSTEM)
759 		goto out;
760 
761 	if (ttm_bo->sg) {
762 		dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
763 		ttm_bo->sg = NULL;
764 	}
765 
766 	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
767 	if (IS_ERR(sg))
768 		return PTR_ERR(sg);
769 
770 	ttm_bo->sg = sg;
771 	xe_tt->sg = sg;
772 
773 out:
774 	ttm_bo_move_null(ttm_bo, new_res);
775 
776 	return 0;
777 }
778 
779 /**
780  * xe_bo_move_notify - Notify subsystems of a pending move
781  * @bo: The buffer object
782  * @ctx: The struct ttm_operation_ctx controlling locking and waits.
783  *
784  * This function notifies subsystems of an upcoming buffer move.
785  * Upon receiving such a notification, subsystems should schedule
786  * halting access to the underlying pages and optionally add a fence
787  * to the buffer object's dma_resv object, that signals when access is
788  * stopped. The caller will wait on all dma_resv fences before
789  * starting the move.
790  *
791  * A subsystem may commence access to the object after obtaining
792  * bindings to the new backing memory under the object lock.
793  *
794  * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
795  * negative error code on error.
796  */
797 static int xe_bo_move_notify(struct xe_bo *bo,
798 			     const struct ttm_operation_ctx *ctx)
799 {
800 	struct ttm_buffer_object *ttm_bo = &bo->ttm;
801 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
802 	struct ttm_resource *old_mem = ttm_bo->resource;
803 	u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
804 	int ret;
805 
806 	/*
807 	 * If this starts to call into many components, consider
808 	 * using a notification chain here.
809 	 */
810 
811 	if (xe_bo_is_pinned(bo))
812 		return -EINVAL;
813 
814 	xe_bo_vunmap(bo);
815 	ret = xe_bo_trigger_rebind(xe, bo, ctx);
816 	if (ret)
817 		return ret;
818 
819 	/* Don't call move_notify() for imported dma-bufs. */
820 	if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach)
821 		dma_buf_move_notify(ttm_bo->base.dma_buf);
822 
823 	/*
824 	 * TTM has already nuked the mmap for us (see ttm_bo_unmap_virtual),
825 	 * so if we moved from VRAM make sure to unlink this from the userfault
826 	 * tracking.
827 	 */
828 	if (mem_type_is_vram(old_mem_type)) {
829 		mutex_lock(&xe->mem_access.vram_userfault.lock);
830 		if (!list_empty(&bo->vram_userfault_link))
831 			list_del_init(&bo->vram_userfault_link);
832 		mutex_unlock(&xe->mem_access.vram_userfault.lock);
833 	}
834 
835 	return 0;
836 }
837 
838 static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
839 		      struct ttm_operation_ctx *ctx,
840 		      struct ttm_resource *new_mem,
841 		      struct ttm_place *hop)
842 {
843 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
844 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
845 	struct ttm_resource *old_mem = ttm_bo->resource;
846 	u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
847 	struct ttm_tt *ttm = ttm_bo->ttm;
848 	struct xe_migrate *migrate = NULL;
849 	struct dma_fence *fence;
850 	bool move_lacks_source;
851 	bool tt_has_data;
852 	bool needs_clear;
853 	bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) &&
854 				  ttm && ttm_tt_is_populated(ttm)) ? true : false;
855 	int ret = 0;
856 
857 	/* Bo creation path, moving to system or TT. */
858 	if ((!old_mem && ttm) && !handle_system_ccs) {
859 		if (new_mem->mem_type == XE_PL_TT)
860 			ret = xe_tt_map_sg(xe, ttm);
861 		if (!ret)
862 			ttm_bo_move_null(ttm_bo, new_mem);
863 		goto out;
864 	}
865 
866 	if (ttm_bo->type == ttm_bo_type_sg) {
867 		if (new_mem->mem_type == XE_PL_SYSTEM)
868 			ret = xe_bo_move_notify(bo, ctx);
869 		if (!ret)
870 			ret = xe_bo_move_dmabuf(ttm_bo, new_mem);
871 		return ret;
872 	}
873 
874 	tt_has_data = ttm && (ttm_tt_is_populated(ttm) || ttm_tt_is_swapped(ttm));
875 
876 	move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) :
877 					 (!mem_type_is_vram(old_mem_type) && !tt_has_data));
878 
879 	needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
880 		(!ttm && ttm_bo->type == ttm_bo_type_device);
881 
882 	if (new_mem->mem_type == XE_PL_TT) {
883 		ret = xe_tt_map_sg(xe, ttm);
884 		if (ret)
885 			goto out;
886 	}
887 
888 	if ((move_lacks_source && !needs_clear)) {
889 		ttm_bo_move_null(ttm_bo, new_mem);
890 		goto out;
891 	}
892 
893 	if (!move_lacks_source && (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) &&
894 	    new_mem->mem_type == XE_PL_SYSTEM) {
895 		ret = xe_svm_bo_evict(bo);
896 		if (!ret) {
897 			drm_dbg(&xe->drm, "Evict system allocator BO success\n");
898 			ttm_bo_move_null(ttm_bo, new_mem);
899 		} else {
900 			drm_dbg(&xe->drm, "Evict system allocator BO failed=%pe\n",
901 				ERR_PTR(ret));
902 		}
903 
904 		goto out;
905 	}
906 
907 	if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) {
908 		ttm_bo_move_null(ttm_bo, new_mem);
909 		goto out;
910 	}
911 
912 	/*
913 	 * Failed multi-hop where the old_mem is still marked as
914 	 * TTM_PL_FLAG_TEMPORARY, should just be a dummy move.
915 	 */
916 	if (old_mem_type == XE_PL_TT &&
917 	    new_mem->mem_type == XE_PL_TT) {
918 		ttm_bo_move_null(ttm_bo, new_mem);
919 		goto out;
920 	}
921 
922 	if (!move_lacks_source && !xe_bo_is_pinned(bo)) {
923 		ret = xe_bo_move_notify(bo, ctx);
924 		if (ret)
925 			goto out;
926 	}
927 
928 	if (old_mem_type == XE_PL_TT &&
929 	    new_mem->mem_type == XE_PL_SYSTEM) {
930 		long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
931 						     DMA_RESV_USAGE_BOOKKEEP,
932 						     false,
933 						     MAX_SCHEDULE_TIMEOUT);
934 		if (timeout < 0) {
935 			ret = timeout;
936 			goto out;
937 		}
938 
939 		if (!handle_system_ccs) {
940 			ttm_bo_move_null(ttm_bo, new_mem);
941 			goto out;
942 		}
943 	}
944 
945 	if (!move_lacks_source &&
946 	    ((old_mem_type == XE_PL_SYSTEM && resource_is_vram(new_mem)) ||
947 	     (mem_type_is_vram(old_mem_type) &&
948 	      new_mem->mem_type == XE_PL_SYSTEM))) {
949 		hop->fpfn = 0;
950 		hop->lpfn = 0;
951 		hop->mem_type = XE_PL_TT;
952 		hop->flags = TTM_PL_FLAG_TEMPORARY;
953 		ret = -EMULTIHOP;
954 		goto out;
955 	}
956 
957 	if (bo->tile)
958 		migrate = bo->tile->migrate;
959 	else if (resource_is_vram(new_mem))
960 		migrate = mem_type_to_migrate(xe, new_mem->mem_type);
961 	else if (mem_type_is_vram(old_mem_type))
962 		migrate = mem_type_to_migrate(xe, old_mem_type);
963 	else
964 		migrate = xe->tiles[0].migrate;
965 
966 	xe_assert(xe, migrate);
967 	trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
968 	if (xe_rpm_reclaim_safe(xe)) {
969 		/*
970 		 * We might be called through swapout in the validation path of
971 		 * another TTM device, so acquire rpm here.
972 		 */
973 		xe_pm_runtime_get(xe);
974 	} else {
975 		drm_WARN_ON(&xe->drm, handle_system_ccs);
976 		xe_pm_runtime_get_noresume(xe);
977 	}
978 
979 	if (move_lacks_source) {
980 		u32 flags = 0;
981 
982 		if (mem_type_is_vram(new_mem->mem_type))
983 			flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
984 		else if (handle_system_ccs)
985 			flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;
986 
987 		fence = xe_migrate_clear(migrate, bo, new_mem, flags);
988 	} else {
989 		fence = xe_migrate_copy(migrate, bo, bo, old_mem, new_mem,
990 					handle_system_ccs);
991 	}
992 	if (IS_ERR(fence)) {
993 		ret = PTR_ERR(fence);
994 		xe_pm_runtime_put(xe);
995 		goto out;
996 	}
997 	if (!move_lacks_source) {
998 		ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict, true,
999 						new_mem);
1000 		if (ret) {
1001 			dma_fence_wait(fence, false);
1002 			ttm_bo_move_null(ttm_bo, new_mem);
1003 			ret = 0;
1004 		}
1005 	} else {
1006 		/*
1007 		 * ttm_bo_move_accel_cleanup() may blow up if
1008 		 * bo->resource == NULL, so just attach the
1009 		 * fence and set the new resource.
1010 		 */
1011 		dma_resv_add_fence(ttm_bo->base.resv, fence,
1012 				   DMA_RESV_USAGE_KERNEL);
1013 		ttm_bo_move_null(ttm_bo, new_mem);
1014 	}
1015 
1016 	dma_fence_put(fence);
1017 	xe_pm_runtime_put(xe);
1018 
1019 	/*
1020 	 * CCS meta data is migrated from TT -> SMEM. So, let us detach the
1021 	 * BBs from BO as it is no longer needed.
1022 	 */
1023 	if (IS_VF_CCS_READY(xe) && old_mem_type == XE_PL_TT &&
1024 	    new_mem->mem_type == XE_PL_SYSTEM)
1025 		xe_sriov_vf_ccs_detach_bo(bo);
1026 
1027 	if (IS_VF_CCS_READY(xe) &&
1028 	    ((move_lacks_source && new_mem->mem_type == XE_PL_TT) ||
1029 	     (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT)) &&
1030 	    handle_system_ccs)
1031 		ret = xe_sriov_vf_ccs_attach_bo(bo);
1032 
1033 out:
1034 	if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
1035 	    ttm_bo->ttm) {
1036 		long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
1037 						     DMA_RESV_USAGE_KERNEL,
1038 						     false,
1039 						     MAX_SCHEDULE_TIMEOUT);
1040 		if (timeout < 0)
1041 			ret = timeout;
1042 
1043 		if (IS_VF_CCS_READY(xe))
1044 			xe_sriov_vf_ccs_detach_bo(bo);
1045 
1046 		xe_tt_unmap_sg(xe, ttm_bo->ttm);
1047 	}
1048 
1049 	return ret;
1050 }
1051 
1052 static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
1053 			       struct ttm_buffer_object *bo,
1054 			       unsigned long *scanned)
1055 {
1056 	struct xe_device *xe = ttm_to_xe_device(bo->bdev);
1057 	struct ttm_tt *tt = bo->ttm;
1058 	long lret;
1059 
1060 	/* Fake move to system, without copying data. */
1061 	if (bo->resource->mem_type != XE_PL_SYSTEM) {
1062 		struct ttm_resource *new_resource;
1063 
1064 		lret = ttm_bo_wait_ctx(bo, ctx);
1065 		if (lret)
1066 			return lret;
1067 
1068 		lret = ttm_bo_mem_space(bo, &sys_placement, &new_resource, ctx);
1069 		if (lret)
1070 			return lret;
1071 
1072 		xe_tt_unmap_sg(xe, bo->ttm);
1073 		ttm_bo_move_null(bo, new_resource);
1074 	}
1075 
1076 	*scanned += bo->ttm->num_pages;
1077 	lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags)
1078 			     {.purge = true,
1079 			      .writeback = false,
1080 			      .allow_move = false});
1081 
1082 	if (lret > 0) {
1083 		xe_ttm_tt_account_subtract(xe, bo->ttm);
1084 		update_global_total_pages(bo->bdev, -(long)tt->num_pages);
1085 	}
1086 
1087 	return lret;
1088 }
1089 
1090 static bool
1091 xe_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place)
1092 {
1093 	struct drm_gpuvm_bo *vm_bo;
1094 
1095 	if (!ttm_bo_eviction_valuable(bo, place))
1096 		return false;
1097 
1098 	if (!xe_bo_is_xe_bo(bo))
1099 		return true;
1100 
1101 	drm_gem_for_each_gpuvm_bo(vm_bo, &bo->base) {
1102 		if (xe_vm_is_validating(gpuvm_to_vm(vm_bo->vm)))
1103 			return false;
1104 	}
1105 
1106 	return true;
1107 }
1108 
1109 /**
1110  * xe_bo_shrink() - Try to shrink an xe bo.
1111  * @ctx: The struct ttm_operation_ctx used for shrinking.
1112  * @bo: The TTM buffer object whose pages to shrink.
1113  * @flags: Flags governing the shrink behaviour.
1114  * @scanned: Pointer to a counter of the number of pages
1115  * attempted to shrink.
1116  *
1117  * Try to shrink- or purge a bo, and if it succeeds, unmap dma.
1118  * Note that we need to be able to handle also non xe bos
1119  * (ghost bos), but only if the struct ttm_tt is embedded in
1120  * a struct xe_ttm_tt. When the function attempts to shrink
1121  * the pages of a buffer object, The value pointed to by @scanned
1122  * is updated.
1123  *
1124  * Return: The number of pages shrunken or purged, or negative error
1125  * code on failure.
1126  */
1127 long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
1128 		  const struct xe_bo_shrink_flags flags,
1129 		  unsigned long *scanned)
1130 {
1131 	struct ttm_tt *tt = bo->ttm;
1132 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
1133 	struct ttm_place place = {.mem_type = bo->resource->mem_type};
1134 	struct xe_bo *xe_bo = ttm_to_xe_bo(bo);
1135 	struct xe_device *xe = ttm_to_xe_device(bo->bdev);
1136 	bool needs_rpm;
1137 	long lret = 0L;
1138 
1139 	if (!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE) ||
1140 	    (flags.purge && !xe_tt->purgeable))
1141 		return -EBUSY;
1142 
1143 	if (!xe_bo_eviction_valuable(bo, &place))
1144 		return -EBUSY;
1145 
1146 	if (!xe_bo_is_xe_bo(bo) || !xe_bo_get_unless_zero(xe_bo))
1147 		return xe_bo_shrink_purge(ctx, bo, scanned);
1148 
1149 	if (xe_tt->purgeable) {
1150 		if (bo->resource->mem_type != XE_PL_SYSTEM)
1151 			lret = xe_bo_move_notify(xe_bo, ctx);
1152 		if (!lret)
1153 			lret = xe_bo_shrink_purge(ctx, bo, scanned);
1154 		goto out_unref;
1155 	}
1156 
1157 	/* System CCS needs gpu copy when moving PL_TT -> PL_SYSTEM */
1158 	needs_rpm = (!IS_DGFX(xe) && bo->resource->mem_type != XE_PL_SYSTEM &&
1159 		     xe_bo_needs_ccs_pages(xe_bo));
1160 	if (needs_rpm && !xe_pm_runtime_get_if_active(xe))
1161 		goto out_unref;
1162 
1163 	*scanned += tt->num_pages;
1164 	lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags)
1165 			     {.purge = false,
1166 			      .writeback = flags.writeback,
1167 			      .allow_move = true});
1168 	if (needs_rpm)
1169 		xe_pm_runtime_put(xe);
1170 
1171 	if (lret > 0) {
1172 		xe_ttm_tt_account_subtract(xe, tt);
1173 		update_global_total_pages(bo->bdev, -(long)tt->num_pages);
1174 	}
1175 
1176 out_unref:
1177 	xe_bo_put(xe_bo);
1178 
1179 	return lret;
1180 }
1181 
1182 /**
1183  * xe_bo_notifier_prepare_pinned() - Prepare a pinned VRAM object to be backed
1184  * up in system memory.
1185  * @bo: The buffer object to prepare.
1186  *
1187  * On successful completion, the object backup pages are allocated. Expectation
1188  * is that this is called from the PM notifier, prior to suspend/hibernation.
1189  *
1190  * Return: 0 on success. Negative error code on failure.
1191  */
1192 int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
1193 {
1194 	struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1195 	struct xe_validation_ctx ctx;
1196 	struct drm_exec exec;
1197 	struct xe_bo *backup;
1198 	int ret = 0;
1199 
1200 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) {
1201 		ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
1202 		drm_exec_retry_on_contention(&exec);
1203 		xe_assert(xe, !ret);
1204 		xe_assert(xe, !bo->backup_obj);
1205 
1206 		/*
1207 		 * Since this is called from the PM notifier we might have raced with
1208 		 * someone unpinning this after we dropped the pinned list lock and
1209 		 * grabbing the above bo lock.
1210 		 */
1211 		if (!xe_bo_is_pinned(bo))
1212 			break;
1213 
1214 		if (!xe_bo_is_vram(bo))
1215 			break;
1216 
1217 		if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
1218 			break;
1219 
1220 		backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo),
1221 					   DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
1222 					   XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
1223 					   XE_BO_FLAG_PINNED, &exec);
1224 		if (IS_ERR(backup)) {
1225 			drm_exec_retry_on_contention(&exec);
1226 			ret = PTR_ERR(backup);
1227 			xe_validation_retry_on_oom(&ctx, &ret);
1228 			break;
1229 		}
1230 
1231 		backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
1232 		ttm_bo_pin(&backup->ttm);
1233 		bo->backup_obj = backup;
1234 	}
1235 
1236 	return ret;
1237 }
1238 
1239 /**
1240  * xe_bo_notifier_unprepare_pinned() - Undo the previous prepare operation.
1241  * @bo: The buffer object to undo the prepare for.
1242  *
1243  * Always returns 0. The backup object is removed, if still present. Expectation
1244  * it that this called from the PM notifier when undoing the prepare step.
1245  *
1246  * Return: Always returns 0.
1247  */
1248 int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo)
1249 {
1250 	xe_bo_lock(bo, false);
1251 	if (bo->backup_obj) {
1252 		ttm_bo_unpin(&bo->backup_obj->ttm);
1253 		xe_bo_put(bo->backup_obj);
1254 		bo->backup_obj = NULL;
1255 	}
1256 	xe_bo_unlock(bo);
1257 
1258 	return 0;
1259 }
1260 
1261 static int xe_bo_evict_pinned_copy(struct xe_bo *bo, struct xe_bo *backup)
1262 {
1263 	struct xe_device *xe = xe_bo_device(bo);
1264 	bool unmap = false;
1265 	int ret = 0;
1266 
1267 	if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
1268 		struct xe_migrate *migrate;
1269 		struct dma_fence *fence;
1270 
1271 		if (bo->tile)
1272 			migrate = bo->tile->migrate;
1273 		else
1274 			migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
1275 
1276 		xe_assert(xe, bo->ttm.base.resv == backup->ttm.base.resv);
1277 		ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
1278 		if (ret)
1279 			goto out_backup;
1280 
1281 		fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource,
1282 					backup->ttm.resource, false);
1283 		if (IS_ERR(fence)) {
1284 			ret = PTR_ERR(fence);
1285 			goto out_backup;
1286 		}
1287 
1288 		dma_resv_add_fence(bo->ttm.base.resv, fence,
1289 				   DMA_RESV_USAGE_KERNEL);
1290 		dma_fence_put(fence);
1291 	} else {
1292 		ret = xe_bo_vmap(backup);
1293 		if (ret)
1294 			goto out_backup;
1295 
1296 		if (iosys_map_is_null(&bo->vmap)) {
1297 			ret = xe_bo_vmap(bo);
1298 			if (ret)
1299 				goto out_vunmap;
1300 			unmap = true;
1301 		}
1302 
1303 		xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0,
1304 				   xe_bo_size(bo));
1305 	}
1306 
1307 	if (!bo->backup_obj)
1308 		bo->backup_obj = backup;
1309 out_vunmap:
1310 	xe_bo_vunmap(backup);
1311 out_backup:
1312 	if (unmap)
1313 		xe_bo_vunmap(bo);
1314 
1315 	return ret;
1316 }
1317 
1318 /**
1319  * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
1320  * @bo: The buffer object to move.
1321  *
1322  * On successful completion, the object memory will be moved to system memory.
1323  *
1324  * This is needed to for special handling of pinned VRAM object during
1325  * suspend-resume.
1326  *
1327  * Return: 0 on success. Negative error code on failure.
1328  */
1329 int xe_bo_evict_pinned(struct xe_bo *bo)
1330 {
1331 	struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1332 	struct xe_validation_ctx ctx;
1333 	struct drm_exec exec;
1334 	struct xe_bo *backup = bo->backup_obj;
1335 	bool backup_created = false;
1336 	int ret = 0;
1337 
1338 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) {
1339 		ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
1340 		drm_exec_retry_on_contention(&exec);
1341 		xe_assert(xe, !ret);
1342 
1343 		if (WARN_ON(!bo->ttm.resource)) {
1344 			ret = -EINVAL;
1345 			break;
1346 		}
1347 
1348 		if (WARN_ON(!xe_bo_is_pinned(bo))) {
1349 			ret = -EINVAL;
1350 			break;
1351 		}
1352 
1353 		if (!xe_bo_is_vram(bo))
1354 			break;
1355 
1356 		if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
1357 			break;
1358 
1359 		if (!backup) {
1360 			backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL,
1361 						   xe_bo_size(bo),
1362 						   DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
1363 						   XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
1364 						   XE_BO_FLAG_PINNED, &exec);
1365 			if (IS_ERR(backup)) {
1366 				drm_exec_retry_on_contention(&exec);
1367 				ret = PTR_ERR(backup);
1368 				xe_validation_retry_on_oom(&ctx, &ret);
1369 				break;
1370 			}
1371 			backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
1372 			backup_created = true;
1373 		}
1374 
1375 		ret = xe_bo_evict_pinned_copy(bo, backup);
1376 	}
1377 
1378 	if (ret && backup_created)
1379 		xe_bo_put(backup);
1380 
1381 	return ret;
1382 }
1383 
1384 /**
1385  * xe_bo_restore_pinned() - Restore a pinned VRAM object
1386  * @bo: The buffer object to move.
1387  *
1388  * On successful completion, the object memory will be moved back to VRAM.
1389  *
1390  * This is needed to for special handling of pinned VRAM object during
1391  * suspend-resume.
1392  *
1393  * Return: 0 on success. Negative error code on failure.
1394  */
1395 int xe_bo_restore_pinned(struct xe_bo *bo)
1396 {
1397 	struct ttm_operation_ctx ctx = {
1398 		.interruptible = false,
1399 		.gfp_retry_mayfail = false,
1400 	};
1401 	struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1402 	struct xe_bo *backup = bo->backup_obj;
1403 	bool unmap = false;
1404 	int ret;
1405 
1406 	if (!backup)
1407 		return 0;
1408 
1409 	xe_bo_lock(bo, false);
1410 
1411 	if (!xe_bo_is_pinned(backup)) {
1412 		ret = ttm_bo_validate(&backup->ttm, &backup->placement, &ctx);
1413 		if (ret)
1414 			goto out_unlock_bo;
1415 	}
1416 
1417 	if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
1418 		struct xe_migrate *migrate;
1419 		struct dma_fence *fence;
1420 
1421 		if (bo->tile)
1422 			migrate = bo->tile->migrate;
1423 		else
1424 			migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
1425 
1426 		ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
1427 		if (ret)
1428 			goto out_unlock_bo;
1429 
1430 		fence = xe_migrate_copy(migrate, backup, bo,
1431 					backup->ttm.resource, bo->ttm.resource,
1432 					false);
1433 		if (IS_ERR(fence)) {
1434 			ret = PTR_ERR(fence);
1435 			goto out_unlock_bo;
1436 		}
1437 
1438 		dma_resv_add_fence(bo->ttm.base.resv, fence,
1439 				   DMA_RESV_USAGE_KERNEL);
1440 		dma_fence_put(fence);
1441 	} else {
1442 		ret = xe_bo_vmap(backup);
1443 		if (ret)
1444 			goto out_unlock_bo;
1445 
1446 		if (iosys_map_is_null(&bo->vmap)) {
1447 			ret = xe_bo_vmap(bo);
1448 			if (ret)
1449 				goto out_backup;
1450 			unmap = true;
1451 		}
1452 
1453 		xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr,
1454 				 xe_bo_size(bo));
1455 	}
1456 
1457 	bo->backup_obj = NULL;
1458 
1459 out_backup:
1460 	xe_bo_vunmap(backup);
1461 	if (!bo->backup_obj) {
1462 		if (xe_bo_is_pinned(backup))
1463 			ttm_bo_unpin(&backup->ttm);
1464 		xe_bo_put(backup);
1465 	}
1466 out_unlock_bo:
1467 	if (unmap)
1468 		xe_bo_vunmap(bo);
1469 	xe_bo_unlock(bo);
1470 	return ret;
1471 }
1472 
1473 int xe_bo_dma_unmap_pinned(struct xe_bo *bo)
1474 {
1475 	struct ttm_buffer_object *ttm_bo = &bo->ttm;
1476 	struct ttm_tt *tt = ttm_bo->ttm;
1477 
1478 	if (tt) {
1479 		struct xe_ttm_tt *xe_tt = container_of(tt, typeof(*xe_tt), ttm);
1480 
1481 		if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
1482 			dma_buf_unmap_attachment(ttm_bo->base.import_attach,
1483 						 ttm_bo->sg,
1484 						 DMA_BIDIRECTIONAL);
1485 			ttm_bo->sg = NULL;
1486 			xe_tt->sg = NULL;
1487 		} else if (xe_tt->sg) {
1488 			dma_unmap_sgtable(ttm_to_xe_device(ttm_bo->bdev)->drm.dev,
1489 					  xe_tt->sg,
1490 					  DMA_BIDIRECTIONAL, 0);
1491 			sg_free_table(xe_tt->sg);
1492 			xe_tt->sg = NULL;
1493 		}
1494 	}
1495 
1496 	return 0;
1497 }
1498 
1499 static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
1500 				       unsigned long page_offset)
1501 {
1502 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1503 	struct xe_res_cursor cursor;
1504 	struct xe_vram_region *vram;
1505 
1506 	if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
1507 		return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT;
1508 
1509 	vram = res_to_mem_region(ttm_bo->resource);
1510 	xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
1511 	return (vram->io_start + cursor.start) >> PAGE_SHIFT;
1512 }
1513 
1514 static void __xe_bo_vunmap(struct xe_bo *bo);
1515 
1516 /*
1517  * TODO: Move this function to TTM so we don't rely on how TTM does its
1518  * locking, thereby abusing TTM internals.
1519  */
1520 static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
1521 {
1522 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1523 	bool locked;
1524 
1525 	xe_assert(xe, !kref_read(&ttm_bo->kref));
1526 
1527 	/*
1528 	 * We can typically only race with TTM trylocking under the
1529 	 * lru_lock, which will immediately be unlocked again since
1530 	 * the ttm_bo refcount is zero at this point. So trylocking *should*
1531 	 * always succeed here, as long as we hold the lru lock.
1532 	 */
1533 	spin_lock(&ttm_bo->bdev->lru_lock);
1534 	locked = dma_resv_trylock(&ttm_bo->base._resv);
1535 	spin_unlock(&ttm_bo->bdev->lru_lock);
1536 	xe_assert(xe, locked);
1537 
1538 	return locked;
1539 }
1540 
1541 static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
1542 {
1543 	struct dma_resv_iter cursor;
1544 	struct dma_fence *fence;
1545 	struct dma_fence *replacement = NULL;
1546 	struct xe_bo *bo;
1547 
1548 	if (!xe_bo_is_xe_bo(ttm_bo))
1549 		return;
1550 
1551 	bo = ttm_to_xe_bo(ttm_bo);
1552 	xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
1553 
1554 	if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
1555 		return;
1556 
1557 	/*
1558 	 * Scrub the preempt fences if any. The unbind fence is already
1559 	 * attached to the resv.
1560 	 * TODO: Don't do this for external bos once we scrub them after
1561 	 * unbind.
1562 	 */
1563 	dma_resv_for_each_fence(&cursor, &ttm_bo->base._resv,
1564 				DMA_RESV_USAGE_BOOKKEEP, fence) {
1565 		if (xe_fence_is_xe_preempt(fence) &&
1566 		    !dma_fence_is_signaled(fence)) {
1567 			if (!replacement)
1568 				replacement = dma_fence_get_stub();
1569 
1570 			dma_resv_replace_fences(&ttm_bo->base._resv,
1571 						fence->context,
1572 						replacement,
1573 						DMA_RESV_USAGE_BOOKKEEP);
1574 		}
1575 	}
1576 	dma_fence_put(replacement);
1577 
1578 	dma_resv_unlock(&ttm_bo->base._resv);
1579 }
1580 
1581 static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
1582 {
1583 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1584 
1585 	if (!xe_bo_is_xe_bo(ttm_bo))
1586 		return;
1587 
1588 	if (IS_VF_CCS_READY(ttm_to_xe_device(ttm_bo->bdev)))
1589 		xe_sriov_vf_ccs_detach_bo(bo);
1590 
1591 	/*
1592 	 * Object is idle and about to be destroyed. Release the
1593 	 * dma-buf attachment.
1594 	 */
1595 	if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
1596 		struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm,
1597 						       struct xe_ttm_tt, ttm);
1598 
1599 		dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg,
1600 					 DMA_BIDIRECTIONAL);
1601 		ttm_bo->sg = NULL;
1602 		xe_tt->sg = NULL;
1603 	}
1604 }
1605 
1606 static void xe_ttm_bo_purge(struct ttm_buffer_object *ttm_bo, struct ttm_operation_ctx *ctx)
1607 {
1608 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1609 
1610 	if (ttm_bo->ttm) {
1611 		struct ttm_placement place = {};
1612 		int ret = ttm_bo_validate(ttm_bo, &place, ctx);
1613 
1614 		drm_WARN_ON(&xe->drm, ret);
1615 	}
1616 }
1617 
1618 static void xe_ttm_bo_swap_notify(struct ttm_buffer_object *ttm_bo)
1619 {
1620 	struct ttm_operation_ctx ctx = {
1621 		.interruptible = false,
1622 		.gfp_retry_mayfail = false,
1623 	};
1624 
1625 	if (ttm_bo->ttm) {
1626 		struct xe_ttm_tt *xe_tt =
1627 			container_of(ttm_bo->ttm, struct xe_ttm_tt, ttm);
1628 
1629 		if (xe_tt->purgeable)
1630 			xe_ttm_bo_purge(ttm_bo, &ctx);
1631 	}
1632 }
1633 
1634 static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
1635 				unsigned long offset, void *buf, int len,
1636 				int write)
1637 {
1638 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1639 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1640 	struct iosys_map vmap;
1641 	struct xe_res_cursor cursor;
1642 	struct xe_vram_region *vram;
1643 	int bytes_left = len;
1644 	int err = 0;
1645 
1646 	xe_bo_assert_held(bo);
1647 	xe_device_assert_mem_access(xe);
1648 
1649 	if (!mem_type_is_vram(ttm_bo->resource->mem_type))
1650 		return -EIO;
1651 
1652 	if (!xe_bo_is_visible_vram(bo) || len >= SZ_16K) {
1653 		struct xe_migrate *migrate =
1654 			mem_type_to_migrate(xe, ttm_bo->resource->mem_type);
1655 
1656 		err = xe_migrate_access_memory(migrate, bo, offset, buf, len,
1657 					       write);
1658 		goto out;
1659 	}
1660 
1661 	vram = res_to_mem_region(ttm_bo->resource);
1662 	xe_res_first(ttm_bo->resource, offset & PAGE_MASK,
1663 		     xe_bo_size(bo) - (offset & PAGE_MASK), &cursor);
1664 
1665 	do {
1666 		unsigned long page_offset = (offset & ~PAGE_MASK);
1667 		int byte_count = min((int)(PAGE_SIZE - page_offset), bytes_left);
1668 
1669 		iosys_map_set_vaddr_iomem(&vmap, (u8 __iomem *)vram->mapping +
1670 					  cursor.start);
1671 		if (write)
1672 			xe_map_memcpy_to(xe, &vmap, page_offset, buf, byte_count);
1673 		else
1674 			xe_map_memcpy_from(xe, buf, &vmap, page_offset, byte_count);
1675 
1676 		buf += byte_count;
1677 		offset += byte_count;
1678 		bytes_left -= byte_count;
1679 		if (bytes_left)
1680 			xe_res_next(&cursor, PAGE_SIZE);
1681 	} while (bytes_left);
1682 
1683 out:
1684 	return err ?: len;
1685 }
1686 
1687 const struct ttm_device_funcs xe_ttm_funcs = {
1688 	.ttm_tt_create = xe_ttm_tt_create,
1689 	.ttm_tt_populate = xe_ttm_tt_populate,
1690 	.ttm_tt_unpopulate = xe_ttm_tt_unpopulate,
1691 	.ttm_tt_destroy = xe_ttm_tt_destroy,
1692 	.evict_flags = xe_evict_flags,
1693 	.move = xe_bo_move,
1694 	.io_mem_reserve = xe_ttm_io_mem_reserve,
1695 	.io_mem_pfn = xe_ttm_io_mem_pfn,
1696 	.access_memory = xe_ttm_access_memory,
1697 	.release_notify = xe_ttm_bo_release_notify,
1698 	.eviction_valuable = xe_bo_eviction_valuable,
1699 	.delete_mem_notify = xe_ttm_bo_delete_mem_notify,
1700 	.swap_notify = xe_ttm_bo_swap_notify,
1701 };
1702 
1703 static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
1704 {
1705 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1706 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1707 	struct xe_tile *tile;
1708 	u8 id;
1709 
1710 	if (bo->ttm.base.import_attach)
1711 		drm_prime_gem_destroy(&bo->ttm.base, NULL);
1712 	drm_gem_object_release(&bo->ttm.base);
1713 
1714 	xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
1715 
1716 	for_each_tile(tile, xe, id)
1717 		if (bo->ggtt_node[id])
1718 			xe_ggtt_remove_bo(tile->mem.ggtt, bo);
1719 
1720 #ifdef CONFIG_PROC_FS
1721 	if (bo->client)
1722 		xe_drm_client_remove_bo(bo);
1723 #endif
1724 
1725 	if (bo->vm && xe_bo_is_user(bo))
1726 		xe_vm_put(bo->vm);
1727 
1728 	if (bo->parent_obj)
1729 		xe_bo_put(bo->parent_obj);
1730 
1731 	mutex_lock(&xe->mem_access.vram_userfault.lock);
1732 	if (!list_empty(&bo->vram_userfault_link))
1733 		list_del(&bo->vram_userfault_link);
1734 	mutex_unlock(&xe->mem_access.vram_userfault.lock);
1735 
1736 	kfree(bo);
1737 }
1738 
1739 static void xe_gem_object_free(struct drm_gem_object *obj)
1740 {
1741 	/* Our BO reference counting scheme works as follows:
1742 	 *
1743 	 * The gem object kref is typically used throughout the driver,
1744 	 * and the gem object holds a ttm_buffer_object refcount, so
1745 	 * that when the last gem object reference is put, which is when
1746 	 * we end up in this function, we put also that ttm_buffer_object
1747 	 * refcount. Anything using gem interfaces is then no longer
1748 	 * allowed to access the object in a way that requires a gem
1749 	 * refcount, including locking the object.
1750 	 *
1751 	 * driver ttm callbacks is allowed to use the ttm_buffer_object
1752 	 * refcount directly if needed.
1753 	 */
1754 	__xe_bo_vunmap(gem_to_xe_bo(obj));
1755 	ttm_bo_fini(container_of(obj, struct ttm_buffer_object, base));
1756 }
1757 
1758 static void xe_gem_object_close(struct drm_gem_object *obj,
1759 				struct drm_file *file_priv)
1760 {
1761 	struct xe_bo *bo = gem_to_xe_bo(obj);
1762 
1763 	if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) {
1764 		xe_assert(xe_bo_device(bo), xe_bo_is_user(bo));
1765 
1766 		xe_bo_lock(bo, false);
1767 		ttm_bo_set_bulk_move(&bo->ttm, NULL);
1768 		xe_bo_unlock(bo);
1769 	}
1770 }
1771 
1772 static bool should_migrate_to_smem(struct xe_bo *bo)
1773 {
1774 	/*
1775 	 * NOTE: The following atomic checks are platform-specific. For example,
1776 	 * if a device supports CXL atomics, these may not be necessary or
1777 	 * may behave differently.
1778 	 */
1779 
1780 	return bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL ||
1781 	       bo->attr.atomic_access == DRM_XE_ATOMIC_CPU;
1782 }
1783 
1784 static int xe_bo_wait_usage_kernel(struct xe_bo *bo, struct ttm_operation_ctx *ctx)
1785 {
1786 	long lerr;
1787 
1788 	if (ctx->no_wait_gpu)
1789 		return dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL) ?
1790 			0 : -EBUSY;
1791 
1792 	lerr = dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
1793 				     ctx->interruptible, MAX_SCHEDULE_TIMEOUT);
1794 	if (lerr < 0)
1795 		return lerr;
1796 	if (lerr == 0)
1797 		return -EBUSY;
1798 
1799 	return 0;
1800 }
1801 
1802 /* Populate the bo if swapped out, or migrate if the access mode requires that. */
1803 static int xe_bo_fault_migrate(struct xe_bo *bo, struct ttm_operation_ctx *ctx,
1804 			       struct drm_exec *exec)
1805 {
1806 	struct ttm_buffer_object *tbo = &bo->ttm;
1807 	int err = 0;
1808 
1809 	if (ttm_manager_type(tbo->bdev, tbo->resource->mem_type)->use_tt) {
1810 		err = xe_bo_wait_usage_kernel(bo, ctx);
1811 		if (!err)
1812 			err = ttm_bo_populate(&bo->ttm, ctx);
1813 	} else if (should_migrate_to_smem(bo)) {
1814 		xe_assert(xe_bo_device(bo), bo->flags & XE_BO_FLAG_SYSTEM);
1815 		err = xe_bo_migrate(bo, XE_PL_TT, ctx, exec);
1816 	}
1817 
1818 	return err;
1819 }
1820 
1821 /* Call into TTM to populate PTEs, and register bo for PTE removal on runtime suspend. */
1822 static vm_fault_t __xe_bo_cpu_fault(struct vm_fault *vmf, struct xe_device *xe, struct xe_bo *bo)
1823 {
1824 	vm_fault_t ret;
1825 
1826 	trace_xe_bo_cpu_fault(bo);
1827 
1828 	ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
1829 				       TTM_BO_VM_NUM_PREFAULT);
1830 	/*
1831 	 * When TTM is actually called to insert PTEs, ensure no blocking conditions
1832 	 * remain, in which case TTM may drop locks and return VM_FAULT_RETRY.
1833 	 */
1834 	xe_assert(xe, ret != VM_FAULT_RETRY);
1835 
1836 	if (ret == VM_FAULT_NOPAGE &&
1837 	    mem_type_is_vram(bo->ttm.resource->mem_type)) {
1838 		mutex_lock(&xe->mem_access.vram_userfault.lock);
1839 		if (list_empty(&bo->vram_userfault_link))
1840 			list_add(&bo->vram_userfault_link,
1841 				 &xe->mem_access.vram_userfault.list);
1842 		mutex_unlock(&xe->mem_access.vram_userfault.lock);
1843 	}
1844 
1845 	return ret;
1846 }
1847 
1848 static vm_fault_t xe_err_to_fault_t(int err)
1849 {
1850 	switch (err) {
1851 	case 0:
1852 	case -EINTR:
1853 	case -ERESTARTSYS:
1854 	case -EAGAIN:
1855 		return VM_FAULT_NOPAGE;
1856 	case -ENOMEM:
1857 	case -ENOSPC:
1858 		return VM_FAULT_OOM;
1859 	default:
1860 		break;
1861 	}
1862 	return VM_FAULT_SIGBUS;
1863 }
1864 
1865 static bool xe_ttm_bo_is_imported(struct ttm_buffer_object *tbo)
1866 {
1867 	dma_resv_assert_held(tbo->base.resv);
1868 
1869 	return tbo->ttm &&
1870 		(tbo->ttm->page_flags & (TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE)) ==
1871 		TTM_TT_FLAG_EXTERNAL;
1872 }
1873 
1874 static vm_fault_t xe_bo_cpu_fault_fastpath(struct vm_fault *vmf, struct xe_device *xe,
1875 					   struct xe_bo *bo, bool needs_rpm)
1876 {
1877 	struct ttm_buffer_object *tbo = &bo->ttm;
1878 	vm_fault_t ret = VM_FAULT_RETRY;
1879 	struct xe_validation_ctx ctx;
1880 	struct ttm_operation_ctx tctx = {
1881 		.interruptible = true,
1882 		.no_wait_gpu = true,
1883 		.gfp_retry_mayfail = true,
1884 
1885 	};
1886 	int err;
1887 
1888 	if (needs_rpm && !xe_pm_runtime_get_if_active(xe))
1889 		return VM_FAULT_RETRY;
1890 
1891 	err = xe_validation_ctx_init(&ctx, &xe->val, NULL,
1892 				     (struct xe_val_flags) {
1893 					     .interruptible = true,
1894 					     .no_block = true
1895 				     });
1896 	if (err)
1897 		goto out_pm;
1898 
1899 	if (!dma_resv_trylock(tbo->base.resv))
1900 		goto out_validation;
1901 
1902 	if (xe_ttm_bo_is_imported(tbo)) {
1903 		ret = VM_FAULT_SIGBUS;
1904 		drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n");
1905 		goto out_unlock;
1906 	}
1907 
1908 	err = xe_bo_fault_migrate(bo, &tctx, NULL);
1909 	if (err) {
1910 		/* Return VM_FAULT_RETRY on these errors. */
1911 		if (err != -ENOMEM && err != -ENOSPC && err != -EBUSY)
1912 			ret = xe_err_to_fault_t(err);
1913 		goto out_unlock;
1914 	}
1915 
1916 	if (dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL))
1917 		ret = __xe_bo_cpu_fault(vmf, xe, bo);
1918 
1919 out_unlock:
1920 	dma_resv_unlock(tbo->base.resv);
1921 out_validation:
1922 	xe_validation_ctx_fini(&ctx);
1923 out_pm:
1924 	if (needs_rpm)
1925 		xe_pm_runtime_put(xe);
1926 
1927 	return ret;
1928 }
1929 
1930 static vm_fault_t xe_bo_cpu_fault(struct vm_fault *vmf)
1931 {
1932 	struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
1933 	struct drm_device *ddev = tbo->base.dev;
1934 	struct xe_device *xe = to_xe_device(ddev);
1935 	struct xe_bo *bo = ttm_to_xe_bo(tbo);
1936 	bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
1937 	bool retry_after_wait = false;
1938 	struct xe_validation_ctx ctx;
1939 	struct drm_exec exec;
1940 	vm_fault_t ret;
1941 	int err = 0;
1942 	int idx;
1943 
1944 	if (!drm_dev_enter(&xe->drm, &idx))
1945 		return ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
1946 
1947 	ret = xe_bo_cpu_fault_fastpath(vmf, xe, bo, needs_rpm);
1948 	if (ret != VM_FAULT_RETRY)
1949 		goto out;
1950 
1951 	if (fault_flag_allow_retry_first(vmf->flags)) {
1952 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
1953 			goto out;
1954 		retry_after_wait = true;
1955 		xe_bo_get(bo);
1956 		mmap_read_unlock(vmf->vma->vm_mm);
1957 	} else {
1958 		ret = VM_FAULT_NOPAGE;
1959 	}
1960 
1961 	/*
1962 	 * The fastpath failed and we were not required to return and retry immediately.
1963 	 * We're now running in one of two modes:
1964 	 *
1965 	 * 1) retry_after_wait == true: The mmap_read_lock() is dropped, and we're trying
1966 	 * to resolve blocking waits. But we can't resolve the fault since the
1967 	 * mmap_read_lock() is dropped. After retrying the fault, the aim is that the fastpath
1968 	 * should succeed. But it may fail since we drop the bo lock.
1969 	 *
1970 	 * 2) retry_after_wait == false: The fastpath failed, typically even after
1971 	 * a retry. Do whatever's necessary to resolve the fault.
1972 	 *
1973 	 * This construct is recommended to avoid excessive waits under the mmap_lock.
1974 	 */
1975 
1976 	if (needs_rpm)
1977 		xe_pm_runtime_get(xe);
1978 
1979 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
1980 			    err) {
1981 		struct ttm_operation_ctx tctx = {
1982 			.interruptible = true,
1983 			.no_wait_gpu = false,
1984 			.gfp_retry_mayfail = retry_after_wait,
1985 		};
1986 
1987 		err = drm_exec_lock_obj(&exec, &tbo->base);
1988 		drm_exec_retry_on_contention(&exec);
1989 		if (err)
1990 			break;
1991 
1992 		if (xe_ttm_bo_is_imported(tbo)) {
1993 			err = -EFAULT;
1994 			drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n");
1995 			break;
1996 		}
1997 
1998 		err = xe_bo_fault_migrate(bo, &tctx, &exec);
1999 		if (err) {
2000 			drm_exec_retry_on_contention(&exec);
2001 			xe_validation_retry_on_oom(&ctx, &err);
2002 			break;
2003 		}
2004 
2005 		err = xe_bo_wait_usage_kernel(bo, &tctx);
2006 		if (err)
2007 			break;
2008 
2009 		if (!retry_after_wait)
2010 			ret = __xe_bo_cpu_fault(vmf, xe, bo);
2011 	}
2012 	/* if retry_after_wait == true, we *must* return VM_FAULT_RETRY. */
2013 	if (err && !retry_after_wait)
2014 		ret = xe_err_to_fault_t(err);
2015 
2016 	if (needs_rpm)
2017 		xe_pm_runtime_put(xe);
2018 
2019 	if (retry_after_wait)
2020 		xe_bo_put(bo);
2021 out:
2022 	drm_dev_exit(idx);
2023 
2024 	return ret;
2025 }
2026 
2027 static int xe_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
2028 			   void *buf, int len, int write)
2029 {
2030 	struct ttm_buffer_object *ttm_bo = vma->vm_private_data;
2031 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
2032 	struct xe_device *xe = xe_bo_device(bo);
2033 
2034 	guard(xe_pm_runtime)(xe);
2035 	return ttm_bo_vm_access(vma, addr, buf, len, write);
2036 }
2037 
2038 /**
2039  * xe_bo_read() - Read from an xe_bo
2040  * @bo: The buffer object to read from.
2041  * @offset: The byte offset to start reading from.
2042  * @dst: Location to store the read.
2043  * @size: Size in bytes for the read.
2044  *
2045  * Read @size bytes from the @bo, starting from @offset, storing into @dst.
2046  *
2047  * Return: Zero on success, or negative error.
2048  */
2049 int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size)
2050 {
2051 	int ret;
2052 
2053 	ret = ttm_bo_access(&bo->ttm, offset, dst, size, 0);
2054 	if (ret >= 0 && ret != size)
2055 		ret = -EIO;
2056 	else if (ret == size)
2057 		ret = 0;
2058 
2059 	return ret;
2060 }
2061 
2062 static const struct vm_operations_struct xe_gem_vm_ops = {
2063 	.fault = xe_bo_cpu_fault,
2064 	.open = ttm_bo_vm_open,
2065 	.close = ttm_bo_vm_close,
2066 	.access = xe_bo_vm_access,
2067 };
2068 
2069 static const struct drm_gem_object_funcs xe_gem_object_funcs = {
2070 	.free = xe_gem_object_free,
2071 	.close = xe_gem_object_close,
2072 	.mmap = drm_gem_ttm_mmap,
2073 	.export = xe_gem_prime_export,
2074 	.vm_ops = &xe_gem_vm_ops,
2075 };
2076 
2077 /**
2078  * xe_bo_alloc - Allocate storage for a struct xe_bo
2079  *
2080  * This function is intended to allocate storage to be used for input
2081  * to __xe_bo_create_locked(), in the case a pointer to the bo to be
2082  * created is needed before the call to __xe_bo_create_locked().
2083  * If __xe_bo_create_locked ends up never to be called, then the
2084  * storage allocated with this function needs to be freed using
2085  * xe_bo_free().
2086  *
2087  * Return: A pointer to an uninitialized struct xe_bo on success,
2088  * ERR_PTR(-ENOMEM) on error.
2089  */
2090 struct xe_bo *xe_bo_alloc(void)
2091 {
2092 	struct xe_bo *bo = kzalloc(sizeof(*bo), GFP_KERNEL);
2093 
2094 	if (!bo)
2095 		return ERR_PTR(-ENOMEM);
2096 
2097 	return bo;
2098 }
2099 
2100 /**
2101  * xe_bo_free - Free storage allocated using xe_bo_alloc()
2102  * @bo: The buffer object storage.
2103  *
2104  * Refer to xe_bo_alloc() documentation for valid use-cases.
2105  */
2106 void xe_bo_free(struct xe_bo *bo)
2107 {
2108 	kfree(bo);
2109 }
2110 
2111 /**
2112  * xe_bo_init_locked() - Initialize or create an xe_bo.
2113  * @xe: The xe device.
2114  * @bo: An already allocated buffer object or NULL
2115  * if the function should allocate a new one.
2116  * @tile: The tile to select for migration of this bo, and the tile used for
2117  * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2118  * @resv: Pointer to a locked shared reservation object to use for this bo,
2119  * or NULL for the xe_bo to use its own.
2120  * @bulk: The bulk move to use for LRU bumping, or NULL for external bos.
2121  * @size: The storage size to use for the bo.
2122  * @cpu_caching: The cpu caching used for system memory backing store.
2123  * @type: The TTM buffer object type.
2124  * @flags: XE_BO_FLAG_ flags.
2125  * @exec: The drm_exec transaction to use for exhaustive eviction.
2126  *
2127  * Initialize or create an xe buffer object. On failure, any allocated buffer
2128  * object passed in @bo will have been unreferenced.
2129  *
2130  * Return: The buffer object on success. Negative error pointer on failure.
2131  */
2132 struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo,
2133 				struct xe_tile *tile, struct dma_resv *resv,
2134 				struct ttm_lru_bulk_move *bulk, size_t size,
2135 				u16 cpu_caching, enum ttm_bo_type type,
2136 				u32 flags, struct drm_exec *exec)
2137 {
2138 	struct ttm_operation_ctx ctx = {
2139 		.interruptible = true,
2140 		.no_wait_gpu = false,
2141 		.gfp_retry_mayfail = true,
2142 	};
2143 	struct ttm_placement *placement;
2144 	uint32_t alignment;
2145 	size_t aligned_size;
2146 	int err;
2147 
2148 	/* Only kernel objects should set GT */
2149 	xe_assert(xe, !tile || type == ttm_bo_type_kernel);
2150 
2151 	if (XE_WARN_ON(!size)) {
2152 		xe_bo_free(bo);
2153 		return ERR_PTR(-EINVAL);
2154 	}
2155 
2156 	/* XE_BO_FLAG_GGTTx requires XE_BO_FLAG_GGTT also be set */
2157 	if ((flags & XE_BO_FLAG_GGTT_ALL) && !(flags & XE_BO_FLAG_GGTT))
2158 		return ERR_PTR(-EINVAL);
2159 
2160 	if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
2161 	    !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
2162 	    ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
2163 	     (flags & (XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_NEEDS_2M)))) {
2164 		size_t align = flags & XE_BO_FLAG_NEEDS_2M ? SZ_2M : SZ_64K;
2165 
2166 		aligned_size = ALIGN(size, align);
2167 		if (type != ttm_bo_type_device)
2168 			size = ALIGN(size, align);
2169 		flags |= XE_BO_FLAG_INTERNAL_64K;
2170 		alignment = align >> PAGE_SHIFT;
2171 	} else {
2172 		aligned_size = ALIGN(size, SZ_4K);
2173 		flags &= ~XE_BO_FLAG_INTERNAL_64K;
2174 		alignment = SZ_4K >> PAGE_SHIFT;
2175 	}
2176 
2177 	if (type == ttm_bo_type_device && aligned_size != size)
2178 		return ERR_PTR(-EINVAL);
2179 
2180 	if (!bo) {
2181 		bo = xe_bo_alloc();
2182 		if (IS_ERR(bo))
2183 			return bo;
2184 	}
2185 
2186 	bo->ccs_cleared = false;
2187 	bo->tile = tile;
2188 	bo->flags = flags;
2189 	bo->cpu_caching = cpu_caching;
2190 	bo->ttm.base.funcs = &xe_gem_object_funcs;
2191 	bo->ttm.priority = XE_BO_PRIORITY_NORMAL;
2192 	INIT_LIST_HEAD(&bo->pinned_link);
2193 #ifdef CONFIG_PROC_FS
2194 	INIT_LIST_HEAD(&bo->client_link);
2195 #endif
2196 	INIT_LIST_HEAD(&bo->vram_userfault_link);
2197 
2198 	drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
2199 
2200 	if (resv) {
2201 		ctx.allow_res_evict = !(flags & XE_BO_FLAG_NO_RESV_EVICT);
2202 		ctx.resv = resv;
2203 	}
2204 
2205 	xe_validation_assert_exec(xe, exec, &bo->ttm.base);
2206 	if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) {
2207 		err = __xe_bo_placement_for_flags(xe, bo, bo->flags, type);
2208 		if (WARN_ON(err)) {
2209 			xe_ttm_bo_destroy(&bo->ttm);
2210 			return ERR_PTR(err);
2211 		}
2212 	}
2213 
2214 	/* Defer populating type_sg bos */
2215 	placement = (type == ttm_bo_type_sg ||
2216 		     bo->flags & XE_BO_FLAG_DEFER_BACKING) ? &sys_placement :
2217 		&bo->placement;
2218 	err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
2219 				   placement, alignment,
2220 				   &ctx, NULL, resv, xe_ttm_bo_destroy);
2221 	if (err)
2222 		return ERR_PTR(err);
2223 
2224 	/*
2225 	 * The VRAM pages underneath are potentially still being accessed by the
2226 	 * GPU, as per async GPU clearing and async evictions. However TTM makes
2227 	 * sure to add any corresponding move/clear fences into the objects
2228 	 * dma-resv using the DMA_RESV_USAGE_KERNEL slot.
2229 	 *
2230 	 * For KMD internal buffers we don't care about GPU clearing, however we
2231 	 * still need to handle async evictions, where the VRAM is still being
2232 	 * accessed by the GPU. Most internal callers are not expecting this,
2233 	 * since they are missing the required synchronisation before accessing
2234 	 * the memory. To keep things simple just sync wait any kernel fences
2235 	 * here, if the buffer is designated KMD internal.
2236 	 *
2237 	 * For normal userspace objects we should already have the required
2238 	 * pipelining or sync waiting elsewhere, since we already have to deal
2239 	 * with things like async GPU clearing.
2240 	 */
2241 	if (type == ttm_bo_type_kernel) {
2242 		long timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
2243 						     DMA_RESV_USAGE_KERNEL,
2244 						     ctx.interruptible,
2245 						     MAX_SCHEDULE_TIMEOUT);
2246 
2247 		if (timeout < 0) {
2248 			if (!resv)
2249 				dma_resv_unlock(bo->ttm.base.resv);
2250 			xe_bo_put(bo);
2251 			return ERR_PTR(timeout);
2252 		}
2253 	}
2254 
2255 	bo->created = true;
2256 	if (bulk)
2257 		ttm_bo_set_bulk_move(&bo->ttm, bulk);
2258 	else
2259 		ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
2260 
2261 	return bo;
2262 }
2263 
2264 static int __xe_bo_fixed_placement(struct xe_device *xe,
2265 				   struct xe_bo *bo, enum ttm_bo_type type,
2266 				   u32 flags,
2267 				   u64 start, u64 end, u64 size)
2268 {
2269 	struct ttm_place *place = bo->placements;
2270 	u32 vram_flag, vram_stolen_flags;
2271 
2272 	/*
2273 	 * to allow fixed placement in GGTT of a VF, post-migration fixups would have to
2274 	 * include selecting a new fixed offset and shifting the page ranges for it
2275 	 */
2276 	xe_assert(xe, !IS_SRIOV_VF(xe) || !(bo->flags & XE_BO_FLAG_GGTT));
2277 
2278 	if (flags & (XE_BO_FLAG_USER | XE_BO_FLAG_SYSTEM))
2279 		return -EINVAL;
2280 
2281 	vram_flag = flags & XE_BO_FLAG_VRAM_MASK;
2282 	vram_stolen_flags = (flags & (XE_BO_FLAG_STOLEN)) | vram_flag;
2283 
2284 	/* check if more than one VRAM/STOLEN flag is set */
2285 	if (hweight32(vram_stolen_flags) > 1)
2286 		return -EINVAL;
2287 
2288 	place->flags = TTM_PL_FLAG_CONTIGUOUS;
2289 	place->fpfn = start >> PAGE_SHIFT;
2290 	place->lpfn = end >> PAGE_SHIFT;
2291 
2292 	if (flags & XE_BO_FLAG_STOLEN)
2293 		place->mem_type = XE_PL_STOLEN;
2294 	else
2295 		place->mem_type = bo_vram_flags_to_vram_placement(xe, flags, vram_flag, type);
2296 
2297 	bo->placement = (struct ttm_placement) {
2298 		.num_placement = 1,
2299 		.placement = place,
2300 	};
2301 
2302 	return 0;
2303 }
2304 
2305 static struct xe_bo *
2306 __xe_bo_create_locked(struct xe_device *xe,
2307 		      struct xe_tile *tile, struct xe_vm *vm,
2308 		      size_t size, u64 start, u64 end,
2309 		      u16 cpu_caching, enum ttm_bo_type type, u32 flags,
2310 		      u64 alignment, struct drm_exec *exec)
2311 {
2312 	struct xe_bo *bo = NULL;
2313 	int err;
2314 
2315 	if (vm)
2316 		xe_vm_assert_held(vm);
2317 
2318 	if (start || end != ~0ULL) {
2319 		bo = xe_bo_alloc();
2320 		if (IS_ERR(bo))
2321 			return bo;
2322 
2323 		flags |= XE_BO_FLAG_FIXED_PLACEMENT;
2324 		err = __xe_bo_fixed_placement(xe, bo, type, flags, start, end, size);
2325 		if (err) {
2326 			xe_bo_free(bo);
2327 			return ERR_PTR(err);
2328 		}
2329 	}
2330 
2331 	bo = xe_bo_init_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
2332 			       vm && !xe_vm_in_fault_mode(vm) &&
2333 			       flags & XE_BO_FLAG_USER ?
2334 			       &vm->lru_bulk_move : NULL, size,
2335 			       cpu_caching, type, flags, exec);
2336 	if (IS_ERR(bo))
2337 		return bo;
2338 
2339 	bo->min_align = alignment;
2340 
2341 	/*
2342 	 * Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
2343 	 * to ensure the shared resv doesn't disappear under the bo, the bo
2344 	 * will keep a reference to the vm, and avoid circular references
2345 	 * by having all the vm's bo refereferences released at vm close
2346 	 * time.
2347 	 */
2348 	if (vm && xe_bo_is_user(bo))
2349 		xe_vm_get(vm);
2350 	bo->vm = vm;
2351 
2352 	if (bo->flags & XE_BO_FLAG_GGTT) {
2353 		struct xe_tile *t;
2354 		u8 id;
2355 
2356 		if (!(bo->flags & XE_BO_FLAG_GGTT_ALL)) {
2357 			if (!tile && flags & XE_BO_FLAG_STOLEN)
2358 				tile = xe_device_get_root_tile(xe);
2359 
2360 			xe_assert(xe, tile);
2361 		}
2362 
2363 		for_each_tile(t, xe, id) {
2364 			if (t != tile && !(bo->flags & XE_BO_FLAG_GGTTx(t)))
2365 				continue;
2366 
2367 			if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
2368 				err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo,
2369 							   start + xe_bo_size(bo), U64_MAX,
2370 							   exec);
2371 			} else {
2372 				err = xe_ggtt_insert_bo(t->mem.ggtt, bo, exec);
2373 			}
2374 			if (err)
2375 				goto err_unlock_put_bo;
2376 		}
2377 	}
2378 
2379 	trace_xe_bo_create(bo);
2380 	return bo;
2381 
2382 err_unlock_put_bo:
2383 	__xe_bo_unset_bulk_move(bo);
2384 	xe_bo_unlock_vm_held(bo);
2385 	xe_bo_put(bo);
2386 	return ERR_PTR(err);
2387 }
2388 
2389 /**
2390  * xe_bo_create_locked() - Create a BO
2391  * @xe: The xe device.
2392  * @tile: The tile to select for migration of this bo, and the tile used for
2393  * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2394  * @vm: The local vm or NULL for external objects.
2395  * @size: The storage size to use for the bo.
2396  * @type: The TTM buffer object type.
2397  * @flags: XE_BO_FLAG_ flags.
2398  * @exec: The drm_exec transaction to use for exhaustive eviction.
2399  *
2400  * Create a locked xe BO with no range- nor alignment restrictions.
2401  *
2402  * Return: The buffer object on success. Negative error pointer on failure.
2403  */
2404 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
2405 				  struct xe_vm *vm, size_t size,
2406 				  enum ttm_bo_type type, u32 flags,
2407 				  struct drm_exec *exec)
2408 {
2409 	return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type,
2410 				     flags, 0, exec);
2411 }
2412 
2413 static struct xe_bo *xe_bo_create_novm(struct xe_device *xe, struct xe_tile *tile,
2414 				       size_t size, u16 cpu_caching,
2415 				       enum ttm_bo_type type, u32 flags,
2416 				       u64 alignment, bool intr)
2417 {
2418 	struct xe_validation_ctx ctx;
2419 	struct drm_exec exec;
2420 	struct xe_bo *bo;
2421 	int ret = 0;
2422 
2423 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr},
2424 			    ret) {
2425 		bo = __xe_bo_create_locked(xe, tile, NULL, size, 0, ~0ULL,
2426 					   cpu_caching, type, flags, alignment, &exec);
2427 		drm_exec_retry_on_contention(&exec);
2428 		if (IS_ERR(bo)) {
2429 			ret = PTR_ERR(bo);
2430 			xe_validation_retry_on_oom(&ctx, &ret);
2431 		} else {
2432 			xe_bo_unlock(bo);
2433 		}
2434 	}
2435 
2436 	return ret ? ERR_PTR(ret) : bo;
2437 }
2438 
2439 /**
2440  * xe_bo_create_user() - Create a user BO
2441  * @xe: The xe device.
2442  * @vm: The local vm or NULL for external objects.
2443  * @size: The storage size to use for the bo.
2444  * @cpu_caching: The caching mode to be used for system backing store.
2445  * @flags: XE_BO_FLAG_ flags.
2446  * @exec: The drm_exec transaction to use for exhaustive eviction, or NULL
2447  * if such a transaction should be initiated by the call.
2448  *
2449  * Create a bo on behalf of user-space.
2450  *
2451  * Return: The buffer object on success. Negative error pointer on failure.
2452  */
2453 struct xe_bo *xe_bo_create_user(struct xe_device *xe,
2454 				struct xe_vm *vm, size_t size,
2455 				u16 cpu_caching,
2456 				u32 flags, struct drm_exec *exec)
2457 {
2458 	struct xe_bo *bo;
2459 
2460 	flags |= XE_BO_FLAG_USER;
2461 
2462 	if (vm || exec) {
2463 		xe_assert(xe, exec);
2464 		bo = __xe_bo_create_locked(xe, NULL, vm, size, 0, ~0ULL,
2465 					   cpu_caching, ttm_bo_type_device,
2466 					   flags, 0, exec);
2467 		if (!IS_ERR(bo))
2468 			xe_bo_unlock_vm_held(bo);
2469 	} else {
2470 		bo = xe_bo_create_novm(xe, NULL, size, cpu_caching,
2471 				       ttm_bo_type_device, flags, 0, true);
2472 	}
2473 
2474 	return bo;
2475 }
2476 
2477 /**
2478  * xe_bo_create_pin_range_novm() - Create and pin a BO with range options.
2479  * @xe: The xe device.
2480  * @tile: The tile to select for migration of this bo, and the tile used for
2481  * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2482  * @size: The storage size to use for the bo.
2483  * @start: Start of fixed VRAM range or 0.
2484  * @end: End of fixed VRAM range or ~0ULL.
2485  * @type: The TTM buffer object type.
2486  * @flags: XE_BO_FLAG_ flags.
2487  *
2488  * Create an Xe BO with range- and options. If @start and @end indicate
2489  * a fixed VRAM range, this must be a ttm_bo_type_kernel bo with VRAM placement
2490  * only.
2491  *
2492  * Return: The buffer object on success. Negative error pointer on failure.
2493  */
2494 struct xe_bo *xe_bo_create_pin_range_novm(struct xe_device *xe, struct xe_tile *tile,
2495 					  size_t size, u64 start, u64 end,
2496 					  enum ttm_bo_type type, u32 flags)
2497 {
2498 	struct xe_validation_ctx ctx;
2499 	struct drm_exec exec;
2500 	struct xe_bo *bo;
2501 	int err = 0;
2502 
2503 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
2504 		bo = __xe_bo_create_locked(xe, tile, NULL, size, start, end,
2505 					   0, type, flags, 0, &exec);
2506 		if (IS_ERR(bo)) {
2507 			drm_exec_retry_on_contention(&exec);
2508 			err = PTR_ERR(bo);
2509 			xe_validation_retry_on_oom(&ctx, &err);
2510 			break;
2511 		}
2512 
2513 		err = xe_bo_pin(bo, &exec);
2514 		xe_bo_unlock(bo);
2515 		if (err) {
2516 			xe_bo_put(bo);
2517 			drm_exec_retry_on_contention(&exec);
2518 			xe_validation_retry_on_oom(&ctx, &err);
2519 			break;
2520 		}
2521 	}
2522 
2523 	return err ? ERR_PTR(err) : bo;
2524 }
2525 
2526 static struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
2527 						     struct xe_tile *tile,
2528 						     struct xe_vm *vm,
2529 						     size_t size, u64 offset,
2530 						     enum ttm_bo_type type, u32 flags,
2531 						     u64 alignment, struct drm_exec *exec)
2532 {
2533 	struct xe_bo *bo;
2534 	int err;
2535 	u64 start = offset == ~0ull ? 0 : offset;
2536 	u64 end = offset == ~0ull ? ~0ull : start + size;
2537 
2538 	if (flags & XE_BO_FLAG_STOLEN &&
2539 	    xe_ttm_stolen_cpu_access_needs_ggtt(xe))
2540 		flags |= XE_BO_FLAG_GGTT;
2541 
2542 	bo = __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
2543 				   flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED,
2544 				   alignment, exec);
2545 	if (IS_ERR(bo))
2546 		return bo;
2547 
2548 	err = xe_bo_pin(bo, exec);
2549 	if (err)
2550 		goto err_put;
2551 
2552 	err = xe_bo_vmap(bo);
2553 	if (err)
2554 		goto err_unpin;
2555 
2556 	xe_bo_unlock_vm_held(bo);
2557 
2558 	return bo;
2559 
2560 err_unpin:
2561 	xe_bo_unpin(bo);
2562 err_put:
2563 	xe_bo_unlock_vm_held(bo);
2564 	xe_bo_put(bo);
2565 	return ERR_PTR(err);
2566 }
2567 
2568 /**
2569  * xe_bo_create_pin_map_at_novm() - Create pinned and mapped bo at optional VRAM offset
2570  * @xe: The xe device.
2571  * @tile: The tile to select for migration of this bo, and the tile used for
2572  * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2573  * @size: The storage size to use for the bo.
2574  * @offset: Optional VRAM offset or %~0ull for don't care.
2575  * @type: The TTM buffer object type.
2576  * @flags: XE_BO_FLAG_ flags.
2577  * @alignment: GGTT alignment.
2578  * @intr: Whether to execute any waits for backing store interruptible.
2579  *
2580  * Create a pinned and optionally mapped bo with VRAM offset and GGTT alignment
2581  * options. The bo will be external and not associated with a VM.
2582  *
2583  * Return: The buffer object on success. Negative error pointer on failure.
2584  * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set
2585  * to true on entry.
2586  */
2587 struct xe_bo *
2588 xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile,
2589 			     size_t size, u64 offset, enum ttm_bo_type type, u32 flags,
2590 			     u64 alignment, bool intr)
2591 {
2592 	struct xe_validation_ctx ctx;
2593 	struct drm_exec exec;
2594 	struct xe_bo *bo;
2595 	int ret = 0;
2596 
2597 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr},
2598 			    ret) {
2599 		bo = xe_bo_create_pin_map_at_aligned(xe, tile, NULL, size, offset,
2600 						     type, flags, alignment, &exec);
2601 		if (IS_ERR(bo)) {
2602 			drm_exec_retry_on_contention(&exec);
2603 			ret = PTR_ERR(bo);
2604 			xe_validation_retry_on_oom(&ctx, &ret);
2605 		}
2606 	}
2607 
2608 	return ret ? ERR_PTR(ret) : bo;
2609 }
2610 
2611 /**
2612  * xe_bo_create_pin_map() - Create pinned and mapped bo
2613  * @xe: The xe device.
2614  * @tile: The tile to select for migration of this bo, and the tile used for
2615  * @vm: The vm to associate the buffer object with. The vm's resv must be locked
2616  * with the transaction represented by @exec.
2617  * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2618  * @size: The storage size to use for the bo.
2619  * @type: The TTM buffer object type.
2620  * @flags: XE_BO_FLAG_ flags.
2621  * @exec: The drm_exec transaction to use for exhaustive eviction, and
2622  * previously used for locking @vm's resv.
2623  *
2624  * Create a pinned and mapped bo. The bo will be external and not associated
2625  * with a VM.
2626  *
2627  * Return: The buffer object on success. Negative error pointer on failure.
2628  * In particular, the function may return ERR_PTR(%-EINTR) if @exec was
2629  * configured for interruptible locking.
2630  */
2631 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
2632 				   struct xe_vm *vm, size_t size,
2633 				   enum ttm_bo_type type, u32 flags,
2634 				   struct drm_exec *exec)
2635 {
2636 	return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, ~0ull, type, flags,
2637 					       0, exec);
2638 }
2639 
2640 /**
2641  * xe_bo_create_pin_map_novm() - Create pinned and mapped bo
2642  * @xe: The xe device.
2643  * @tile: The tile to select for migration of this bo, and the tile used for
2644  * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2645  * @size: The storage size to use for the bo.
2646  * @type: The TTM buffer object type.
2647  * @flags: XE_BO_FLAG_ flags.
2648  * @intr: Whether to execute any waits for backing store interruptible.
2649  *
2650  * Create a pinned and mapped bo. The bo will be external and not associated
2651  * with a VM.
2652  *
2653  * Return: The buffer object on success. Negative error pointer on failure.
2654  * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set
2655  * to true on entry.
2656  */
2657 struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile,
2658 					size_t size, enum ttm_bo_type type, u32 flags,
2659 					bool intr)
2660 {
2661 	return xe_bo_create_pin_map_at_novm(xe, tile, size, ~0ull, type, flags, 0, intr);
2662 }
2663 
2664 static void __xe_bo_unpin_map_no_vm(void *arg)
2665 {
2666 	xe_bo_unpin_map_no_vm(arg);
2667 }
2668 
2669 struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
2670 					   size_t size, u32 flags)
2671 {
2672 	struct xe_bo *bo;
2673 	int ret;
2674 
2675 	KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags);
2676 	bo = xe_bo_create_pin_map_novm(xe, tile, size, ttm_bo_type_kernel, flags, true);
2677 	if (IS_ERR(bo))
2678 		return bo;
2679 
2680 	ret = devm_add_action_or_reset(xe->drm.dev, __xe_bo_unpin_map_no_vm, bo);
2681 	if (ret)
2682 		return ERR_PTR(ret);
2683 
2684 	return bo;
2685 }
2686 
2687 void xe_managed_bo_unpin_map_no_vm(struct xe_bo *bo)
2688 {
2689 	devm_release_action(xe_bo_device(bo)->drm.dev, __xe_bo_unpin_map_no_vm, bo);
2690 }
2691 
2692 struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
2693 					     const void *data, size_t size, u32 flags)
2694 {
2695 	struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags);
2696 
2697 	if (IS_ERR(bo))
2698 		return bo;
2699 
2700 	xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
2701 
2702 	return bo;
2703 }
2704 
2705 /**
2706  * xe_managed_bo_reinit_in_vram
2707  * @xe: xe device
2708  * @tile: Tile where the new buffer will be created
2709  * @src: Managed buffer object allocated in system memory
2710  *
2711  * Replace a managed src buffer object allocated in system memory with a new
2712  * one allocated in vram, copying the data between them.
2713  * Buffer object in VRAM is not going to have the same GGTT address, the caller
2714  * is responsible for making sure that any old references to it are updated.
2715  *
2716  * Returns 0 for success, negative error code otherwise.
2717  */
2718 int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src)
2719 {
2720 	struct xe_bo *bo;
2721 	u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT;
2722 
2723 	dst_flags |= (*src)->flags & (XE_BO_FLAG_GGTT_INVALIDATE |
2724 				      XE_BO_FLAG_PINNED_NORESTORE);
2725 
2726 	xe_assert(xe, IS_DGFX(xe));
2727 	xe_assert(xe, !(*src)->vmap.is_iomem);
2728 
2729 	bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr,
2730 					    xe_bo_size(*src), dst_flags);
2731 	if (IS_ERR(bo))
2732 		return PTR_ERR(bo);
2733 
2734 	devm_release_action(xe->drm.dev, __xe_bo_unpin_map_no_vm, *src);
2735 	*src = bo;
2736 
2737 	return 0;
2738 }
2739 
2740 /*
2741  * XXX: This is in the VM bind data path, likely should calculate this once and
2742  * store, with a recalculation if the BO is moved.
2743  */
2744 uint64_t vram_region_gpu_offset(struct ttm_resource *res)
2745 {
2746 	struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
2747 
2748 	switch (res->mem_type) {
2749 	case XE_PL_STOLEN:
2750 		return xe_ttm_stolen_gpu_offset(xe);
2751 	case XE_PL_TT:
2752 	case XE_PL_SYSTEM:
2753 		return 0;
2754 	default:
2755 		return res_to_mem_region(res)->dpa_base;
2756 	}
2757 	return 0;
2758 }
2759 
2760 /**
2761  * xe_bo_pin_external - pin an external BO
2762  * @bo: buffer object to be pinned
2763  * @in_place: Pin in current placement, don't attempt to migrate.
2764  * @exec: The drm_exec transaction to use for exhaustive eviction.
2765  *
2766  * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
2767  * BO. Unique call compared to xe_bo_pin as this function has it own set of
2768  * asserts and code to ensure evict / restore on suspend / resume.
2769  *
2770  * Returns 0 for success, negative error code otherwise.
2771  */
2772 int xe_bo_pin_external(struct xe_bo *bo, bool in_place, struct drm_exec *exec)
2773 {
2774 	struct xe_device *xe = xe_bo_device(bo);
2775 	int err;
2776 
2777 	xe_assert(xe, !bo->vm);
2778 	xe_assert(xe, xe_bo_is_user(bo));
2779 
2780 	if (!xe_bo_is_pinned(bo)) {
2781 		if (!in_place) {
2782 			err = xe_bo_validate(bo, NULL, false, exec);
2783 			if (err)
2784 				return err;
2785 		}
2786 
2787 		spin_lock(&xe->pinned.lock);
2788 		list_add_tail(&bo->pinned_link, &xe->pinned.late.external);
2789 		spin_unlock(&xe->pinned.lock);
2790 	}
2791 
2792 	ttm_bo_pin(&bo->ttm);
2793 	if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
2794 		xe_ttm_tt_account_subtract(xe, bo->ttm.ttm);
2795 
2796 	/*
2797 	 * FIXME: If we always use the reserve / unreserve functions for locking
2798 	 * we do not need this.
2799 	 */
2800 	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
2801 
2802 	return 0;
2803 }
2804 
2805 /**
2806  * xe_bo_pin() - Pin a kernel bo after potentially migrating it
2807  * @bo: The kernel bo to pin.
2808  * @exec: The drm_exec transaction to use for exhaustive eviction.
2809  *
2810  * Attempts to migrate a bo to @bo->placement. If that succeeds,
2811  * pins the bo.
2812  *
2813  * Return: %0 on success, negative error code on migration failure.
2814  */
2815 int xe_bo_pin(struct xe_bo *bo, struct drm_exec *exec)
2816 {
2817 	struct ttm_place *place = &bo->placements[0];
2818 	struct xe_device *xe = xe_bo_device(bo);
2819 	int err;
2820 
2821 	/* We currently don't expect user BO to be pinned */
2822 	xe_assert(xe, !xe_bo_is_user(bo));
2823 
2824 	/* Pinned object must be in GGTT or have pinned flag */
2825 	xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED |
2826 				   XE_BO_FLAG_GGTT));
2827 
2828 	/*
2829 	 * No reason we can't support pinning imported dma-bufs we just don't
2830 	 * expect to pin an imported dma-buf.
2831 	 */
2832 	xe_assert(xe, !bo->ttm.base.import_attach);
2833 
2834 	/* We only expect at most 1 pin */
2835 	xe_assert(xe, !xe_bo_is_pinned(bo));
2836 
2837 	err = xe_bo_validate(bo, NULL, false, exec);
2838 	if (err)
2839 		return err;
2840 
2841 	if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
2842 		spin_lock(&xe->pinned.lock);
2843 		if (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)
2844 			list_add_tail(&bo->pinned_link, &xe->pinned.late.kernel_bo_present);
2845 		else
2846 			list_add_tail(&bo->pinned_link, &xe->pinned.early.kernel_bo_present);
2847 		spin_unlock(&xe->pinned.lock);
2848 	}
2849 
2850 	ttm_bo_pin(&bo->ttm);
2851 	if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
2852 		xe_ttm_tt_account_subtract(xe, bo->ttm.ttm);
2853 
2854 	/*
2855 	 * FIXME: If we always use the reserve / unreserve functions for locking
2856 	 * we do not need this.
2857 	 */
2858 	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
2859 
2860 	return 0;
2861 }
2862 
2863 /**
2864  * xe_bo_unpin_external - unpin an external BO
2865  * @bo: buffer object to be unpinned
2866  *
2867  * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
2868  * BO. Unique call compared to xe_bo_unpin as this function has it own set of
2869  * asserts and code to ensure evict / restore on suspend / resume.
2870  *
2871  * Returns 0 for success, negative error code otherwise.
2872  */
2873 void xe_bo_unpin_external(struct xe_bo *bo)
2874 {
2875 	struct xe_device *xe = xe_bo_device(bo);
2876 
2877 	xe_assert(xe, !bo->vm);
2878 	xe_assert(xe, xe_bo_is_pinned(bo));
2879 	xe_assert(xe, xe_bo_is_user(bo));
2880 
2881 	spin_lock(&xe->pinned.lock);
2882 	if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link))
2883 		list_del_init(&bo->pinned_link);
2884 	spin_unlock(&xe->pinned.lock);
2885 
2886 	ttm_bo_unpin(&bo->ttm);
2887 	if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
2888 		xe_ttm_tt_account_add(xe, bo->ttm.ttm);
2889 
2890 	/*
2891 	 * FIXME: If we always use the reserve / unreserve functions for locking
2892 	 * we do not need this.
2893 	 */
2894 	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
2895 }
2896 
2897 void xe_bo_unpin(struct xe_bo *bo)
2898 {
2899 	struct ttm_place *place = &bo->placements[0];
2900 	struct xe_device *xe = xe_bo_device(bo);
2901 
2902 	xe_assert(xe, !bo->ttm.base.import_attach);
2903 	xe_assert(xe, xe_bo_is_pinned(bo));
2904 
2905 	if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
2906 		spin_lock(&xe->pinned.lock);
2907 		xe_assert(xe, !list_empty(&bo->pinned_link));
2908 		list_del_init(&bo->pinned_link);
2909 		spin_unlock(&xe->pinned.lock);
2910 
2911 		if (bo->backup_obj) {
2912 			if (xe_bo_is_pinned(bo->backup_obj))
2913 				ttm_bo_unpin(&bo->backup_obj->ttm);
2914 			xe_bo_put(bo->backup_obj);
2915 			bo->backup_obj = NULL;
2916 		}
2917 	}
2918 	ttm_bo_unpin(&bo->ttm);
2919 	if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
2920 		xe_ttm_tt_account_add(xe, bo->ttm.ttm);
2921 }
2922 
2923 /**
2924  * xe_bo_validate() - Make sure the bo is in an allowed placement
2925  * @bo: The bo,
2926  * @vm: Pointer to a the vm the bo shares a locked dma_resv object with, or
2927  *      NULL. Used together with @allow_res_evict.
2928  * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's
2929  *                   reservation object.
2930  * @exec: The drm_exec transaction to use for exhaustive eviction.
2931  *
2932  * Make sure the bo is in allowed placement, migrating it if necessary. If
2933  * needed, other bos will be evicted. If bos selected for eviction shares
2934  * the @vm's reservation object, they can be evicted iff @allow_res_evict is
2935  * set to true, otherwise they will be bypassed.
2936  *
2937  * Return: 0 on success, negative error code on failure. May return
2938  * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
2939  */
2940 int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict,
2941 		   struct drm_exec *exec)
2942 {
2943 	struct ttm_operation_ctx ctx = {
2944 		.interruptible = true,
2945 		.no_wait_gpu = false,
2946 		.gfp_retry_mayfail = true,
2947 	};
2948 	int ret;
2949 
2950 	if (xe_bo_is_pinned(bo))
2951 		return 0;
2952 
2953 	if (vm) {
2954 		lockdep_assert_held(&vm->lock);
2955 		xe_vm_assert_held(vm);
2956 
2957 		ctx.allow_res_evict = allow_res_evict;
2958 		ctx.resv = xe_vm_resv(vm);
2959 	}
2960 
2961 	xe_vm_set_validating(vm, allow_res_evict);
2962 	trace_xe_bo_validate(bo);
2963 	xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base);
2964 	ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
2965 	xe_vm_clear_validating(vm, allow_res_evict);
2966 
2967 	return ret;
2968 }
2969 
2970 bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
2971 {
2972 	if (bo->destroy == &xe_ttm_bo_destroy)
2973 		return true;
2974 
2975 	return false;
2976 }
2977 
2978 /*
2979  * Resolve a BO address. There is no assert to check if the proper lock is held
2980  * so it should only be used in cases where it is not fatal to get the wrong
2981  * address, such as printing debug information, but not in cases where memory is
2982  * written based on this result.
2983  */
2984 dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
2985 {
2986 	struct xe_device *xe = xe_bo_device(bo);
2987 	struct xe_res_cursor cur;
2988 	u64 page;
2989 
2990 	xe_assert(xe, page_size <= PAGE_SIZE);
2991 	page = offset >> PAGE_SHIFT;
2992 	offset &= (PAGE_SIZE - 1);
2993 
2994 	if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
2995 		xe_assert(xe, bo->ttm.ttm);
2996 
2997 		xe_res_first_sg(xe_bo_sg(bo), page << PAGE_SHIFT,
2998 				page_size, &cur);
2999 		return xe_res_dma(&cur) + offset;
3000 	} else {
3001 		struct xe_res_cursor cur;
3002 
3003 		xe_res_first(bo->ttm.resource, page << PAGE_SHIFT,
3004 			     page_size, &cur);
3005 		return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource);
3006 	}
3007 }
3008 
3009 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
3010 {
3011 	if (!READ_ONCE(bo->ttm.pin_count))
3012 		xe_bo_assert_held(bo);
3013 	return __xe_bo_addr(bo, offset, page_size);
3014 }
3015 
3016 int xe_bo_vmap(struct xe_bo *bo)
3017 {
3018 	struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
3019 	void *virtual;
3020 	bool is_iomem;
3021 	int ret;
3022 
3023 	xe_bo_assert_held(bo);
3024 
3025 	if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) ||
3026 			!force_contiguous(bo->flags)))
3027 		return -EINVAL;
3028 
3029 	if (!iosys_map_is_null(&bo->vmap))
3030 		return 0;
3031 
3032 	/*
3033 	 * We use this more or less deprecated interface for now since
3034 	 * ttm_bo_vmap() doesn't offer the optimization of kmapping
3035 	 * single page bos, which is done here.
3036 	 * TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap
3037 	 * to use struct iosys_map.
3038 	 */
3039 	ret = ttm_bo_kmap(&bo->ttm, 0, xe_bo_size(bo) >> PAGE_SHIFT, &bo->kmap);
3040 	if (ret)
3041 		return ret;
3042 
3043 	virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
3044 	if (is_iomem)
3045 		iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual);
3046 	else
3047 		iosys_map_set_vaddr(&bo->vmap, virtual);
3048 
3049 	return 0;
3050 }
3051 
3052 static void __xe_bo_vunmap(struct xe_bo *bo)
3053 {
3054 	if (!iosys_map_is_null(&bo->vmap)) {
3055 		iosys_map_clear(&bo->vmap);
3056 		ttm_bo_kunmap(&bo->kmap);
3057 	}
3058 }
3059 
3060 void xe_bo_vunmap(struct xe_bo *bo)
3061 {
3062 	xe_bo_assert_held(bo);
3063 	__xe_bo_vunmap(bo);
3064 }
3065 
3066 static int gem_create_set_pxp_type(struct xe_device *xe, struct xe_bo *bo, u64 value)
3067 {
3068 	if (value == DRM_XE_PXP_TYPE_NONE)
3069 		return 0;
3070 
3071 	/* we only support DRM_XE_PXP_TYPE_HWDRM for now */
3072 	if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
3073 		return -EINVAL;
3074 
3075 	return xe_pxp_key_assign(xe->pxp, bo);
3076 }
3077 
3078 typedef int (*xe_gem_create_set_property_fn)(struct xe_device *xe,
3079 					     struct xe_bo *bo,
3080 					     u64 value);
3081 
3082 static const xe_gem_create_set_property_fn gem_create_set_property_funcs[] = {
3083 	[DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE] = gem_create_set_pxp_type,
3084 };
3085 
3086 static int gem_create_user_ext_set_property(struct xe_device *xe,
3087 					    struct xe_bo *bo,
3088 					    u64 extension)
3089 {
3090 	u64 __user *address = u64_to_user_ptr(extension);
3091 	struct drm_xe_ext_set_property ext;
3092 	int err;
3093 	u32 idx;
3094 
3095 	err = copy_from_user(&ext, address, sizeof(ext));
3096 	if (XE_IOCTL_DBG(xe, err))
3097 		return -EFAULT;
3098 
3099 	if (XE_IOCTL_DBG(xe, ext.property >=
3100 			 ARRAY_SIZE(gem_create_set_property_funcs)) ||
3101 	    XE_IOCTL_DBG(xe, ext.pad) ||
3102 	    XE_IOCTL_DBG(xe, ext.property != DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY))
3103 		return -EINVAL;
3104 
3105 	idx = array_index_nospec(ext.property, ARRAY_SIZE(gem_create_set_property_funcs));
3106 	if (!gem_create_set_property_funcs[idx])
3107 		return -EINVAL;
3108 
3109 	return gem_create_set_property_funcs[idx](xe, bo, ext.value);
3110 }
3111 
3112 typedef int (*xe_gem_create_user_extension_fn)(struct xe_device *xe,
3113 					       struct xe_bo *bo,
3114 					       u64 extension);
3115 
3116 static const xe_gem_create_user_extension_fn gem_create_user_extension_funcs[] = {
3117 	[DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY] = gem_create_user_ext_set_property,
3118 };
3119 
3120 #define MAX_USER_EXTENSIONS	16
3121 static int gem_create_user_extensions(struct xe_device *xe, struct xe_bo *bo,
3122 				      u64 extensions, int ext_number)
3123 {
3124 	u64 __user *address = u64_to_user_ptr(extensions);
3125 	struct drm_xe_user_extension ext;
3126 	int err;
3127 	u32 idx;
3128 
3129 	if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
3130 		return -E2BIG;
3131 
3132 	err = copy_from_user(&ext, address, sizeof(ext));
3133 	if (XE_IOCTL_DBG(xe, err))
3134 		return -EFAULT;
3135 
3136 	if (XE_IOCTL_DBG(xe, ext.pad) ||
3137 	    XE_IOCTL_DBG(xe, ext.name >= ARRAY_SIZE(gem_create_user_extension_funcs)))
3138 		return -EINVAL;
3139 
3140 	idx = array_index_nospec(ext.name,
3141 				 ARRAY_SIZE(gem_create_user_extension_funcs));
3142 	err = gem_create_user_extension_funcs[idx](xe, bo, extensions);
3143 	if (XE_IOCTL_DBG(xe, err))
3144 		return err;
3145 
3146 	if (ext.next_extension)
3147 		return gem_create_user_extensions(xe, bo, ext.next_extension,
3148 						  ++ext_number);
3149 
3150 	return 0;
3151 }
3152 
3153 int xe_gem_create_ioctl(struct drm_device *dev, void *data,
3154 			struct drm_file *file)
3155 {
3156 	struct xe_device *xe = to_xe_device(dev);
3157 	struct xe_file *xef = to_xe_file(file);
3158 	struct drm_xe_gem_create *args = data;
3159 	struct xe_validation_ctx ctx;
3160 	struct drm_exec exec;
3161 	struct xe_vm *vm = NULL;
3162 	struct xe_bo *bo;
3163 	unsigned int bo_flags;
3164 	u32 handle;
3165 	int err;
3166 
3167 	if (XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
3168 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
3169 		return -EINVAL;
3170 
3171 	/* at least one valid memory placement must be specified */
3172 	if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) ||
3173 			 !args->placement))
3174 		return -EINVAL;
3175 
3176 	if (XE_IOCTL_DBG(xe, args->flags &
3177 			 ~(DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING |
3178 			   DRM_XE_GEM_CREATE_FLAG_SCANOUT |
3179 			   DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM |
3180 			   DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION)))
3181 		return -EINVAL;
3182 
3183 	if (XE_IOCTL_DBG(xe, args->handle))
3184 		return -EINVAL;
3185 
3186 	if (XE_IOCTL_DBG(xe, !args->size))
3187 		return -EINVAL;
3188 
3189 	if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX))
3190 		return -EINVAL;
3191 
3192 	if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK))
3193 		return -EINVAL;
3194 
3195 	bo_flags = 0;
3196 	if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING)
3197 		bo_flags |= XE_BO_FLAG_DEFER_BACKING;
3198 
3199 	if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT)
3200 		bo_flags |= XE_BO_FLAG_SCANOUT;
3201 
3202 	if (args->flags & DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION) {
3203 		if (XE_IOCTL_DBG(xe, GRAPHICS_VER(xe) < 20))
3204 			return -EOPNOTSUPP;
3205 		bo_flags |= XE_BO_FLAG_NO_COMPRESSION;
3206 	}
3207 
3208 	bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1);
3209 
3210 	/* CCS formats need physical placement at a 64K alignment in VRAM. */
3211 	if ((bo_flags & XE_BO_FLAG_VRAM_MASK) &&
3212 	    (bo_flags & XE_BO_FLAG_SCANOUT) &&
3213 	    !(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) &&
3214 	    IS_ALIGNED(args->size, SZ_64K))
3215 		bo_flags |= XE_BO_FLAG_NEEDS_64K;
3216 
3217 	if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
3218 		if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK)))
3219 			return -EINVAL;
3220 
3221 		bo_flags |= XE_BO_FLAG_NEEDS_CPU_ACCESS;
3222 	}
3223 
3224 	if (XE_IOCTL_DBG(xe, !args->cpu_caching ||
3225 			 args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC))
3226 		return -EINVAL;
3227 
3228 	if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK &&
3229 			 args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC))
3230 		return -EINVAL;
3231 
3232 	if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT &&
3233 			 args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB))
3234 		return -EINVAL;
3235 
3236 	if (args->vm_id) {
3237 		vm = xe_vm_lookup(xef, args->vm_id);
3238 		if (XE_IOCTL_DBG(xe, !vm))
3239 			return -ENOENT;
3240 	}
3241 
3242 	err = 0;
3243 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
3244 			    err) {
3245 		if (vm) {
3246 			err = xe_vm_drm_exec_lock(vm, &exec);
3247 			drm_exec_retry_on_contention(&exec);
3248 			if (err)
3249 				break;
3250 		}
3251 		bo = xe_bo_create_user(xe, vm, args->size, args->cpu_caching,
3252 				       bo_flags, &exec);
3253 		drm_exec_retry_on_contention(&exec);
3254 		if (IS_ERR(bo)) {
3255 			err = PTR_ERR(bo);
3256 			xe_validation_retry_on_oom(&ctx, &err);
3257 			break;
3258 		}
3259 	}
3260 	if (err)
3261 		goto out_vm;
3262 
3263 	if (args->extensions) {
3264 		err = gem_create_user_extensions(xe, bo, args->extensions, 0);
3265 		if (err)
3266 			goto out_bulk;
3267 	}
3268 
3269 	err = drm_gem_handle_create(file, &bo->ttm.base, &handle);
3270 	if (err)
3271 		goto out_bulk;
3272 
3273 	args->handle = handle;
3274 	goto out_put;
3275 
3276 out_bulk:
3277 	if (vm && !xe_vm_in_fault_mode(vm)) {
3278 		xe_vm_lock(vm, false);
3279 		__xe_bo_unset_bulk_move(bo);
3280 		xe_vm_unlock(vm);
3281 	}
3282 out_put:
3283 	xe_bo_put(bo);
3284 out_vm:
3285 	if (vm)
3286 		xe_vm_put(vm);
3287 
3288 	return err;
3289 }
3290 
3291 int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
3292 			     struct drm_file *file)
3293 {
3294 	struct xe_device *xe = to_xe_device(dev);
3295 	struct drm_xe_gem_mmap_offset *args = data;
3296 	struct drm_gem_object *gem_obj;
3297 
3298 	if (XE_IOCTL_DBG(xe, args->extensions) ||
3299 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
3300 		return -EINVAL;
3301 
3302 	if (XE_IOCTL_DBG(xe, args->flags &
3303 			 ~DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER))
3304 		return -EINVAL;
3305 
3306 	if (args->flags & DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER) {
3307 		if (XE_IOCTL_DBG(xe, !IS_DGFX(xe)))
3308 			return -EINVAL;
3309 
3310 		if (XE_IOCTL_DBG(xe, args->handle))
3311 			return -EINVAL;
3312 
3313 		if (XE_IOCTL_DBG(xe, PAGE_SIZE > SZ_4K))
3314 			return -EINVAL;
3315 
3316 		BUILD_BUG_ON(((XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT) +
3317 			      SZ_4K) >= DRM_FILE_PAGE_OFFSET_START);
3318 		args->offset = XE_PCI_BARRIER_MMAP_OFFSET;
3319 		return 0;
3320 	}
3321 
3322 	gem_obj = drm_gem_object_lookup(file, args->handle);
3323 	if (XE_IOCTL_DBG(xe, !gem_obj))
3324 		return -ENOENT;
3325 
3326 	/* The mmap offset was set up at BO allocation time. */
3327 	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
3328 
3329 	xe_bo_put(gem_to_xe_bo(gem_obj));
3330 	return 0;
3331 }
3332 
3333 /**
3334  * xe_bo_lock() - Lock the buffer object's dma_resv object
3335  * @bo: The struct xe_bo whose lock is to be taken
3336  * @intr: Whether to perform any wait interruptible
3337  *
3338  * Locks the buffer object's dma_resv object. If the buffer object is
3339  * pointing to a shared dma_resv object, that shared lock is locked.
3340  *
3341  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3342  * contended lock was interrupted. If @intr is set to false, the
3343  * function always returns 0.
3344  */
3345 int xe_bo_lock(struct xe_bo *bo, bool intr)
3346 {
3347 	if (intr)
3348 		return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL);
3349 
3350 	dma_resv_lock(bo->ttm.base.resv, NULL);
3351 
3352 	return 0;
3353 }
3354 
3355 /**
3356  * xe_bo_unlock() - Unlock the buffer object's dma_resv object
3357  * @bo: The struct xe_bo whose lock is to be released.
3358  *
3359  * Unlock a buffer object lock that was locked by xe_bo_lock().
3360  */
3361 void xe_bo_unlock(struct xe_bo *bo)
3362 {
3363 	dma_resv_unlock(bo->ttm.base.resv);
3364 }
3365 
3366 /**
3367  * xe_bo_can_migrate - Whether a buffer object likely can be migrated
3368  * @bo: The buffer object to migrate
3369  * @mem_type: The TTM memory type intended to migrate to
3370  *
3371  * Check whether the buffer object supports migration to the
3372  * given memory type. Note that pinning may affect the ability to migrate as
3373  * returned by this function.
3374  *
3375  * This function is primarily intended as a helper for checking the
3376  * possibility to migrate buffer objects and can be called without
3377  * the object lock held.
3378  *
3379  * Return: true if migration is possible, false otherwise.
3380  */
3381 bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type)
3382 {
3383 	unsigned int cur_place;
3384 
3385 	if (bo->ttm.type == ttm_bo_type_kernel)
3386 		return true;
3387 
3388 	if (bo->ttm.type == ttm_bo_type_sg)
3389 		return false;
3390 
3391 	for (cur_place = 0; cur_place < bo->placement.num_placement;
3392 	     cur_place++) {
3393 		if (bo->placements[cur_place].mem_type == mem_type)
3394 			return true;
3395 	}
3396 
3397 	return false;
3398 }
3399 
3400 static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
3401 {
3402 	memset(place, 0, sizeof(*place));
3403 	place->mem_type = mem_type;
3404 }
3405 
3406 /**
3407  * xe_bo_migrate - Migrate an object to the desired region id
3408  * @bo: The buffer object to migrate.
3409  * @mem_type: The TTM region type to migrate to.
3410  * @tctx: A pointer to a struct ttm_operation_ctx or NULL if
3411  * a default interruptibe ctx is to be used.
3412  * @exec: The drm_exec transaction to use for exhaustive eviction.
3413  *
3414  * Attempt to migrate the buffer object to the desired memory region. The
3415  * buffer object may not be pinned, and must be locked.
3416  * On successful completion, the object memory type will be updated,
3417  * but an async migration task may not have completed yet, and to
3418  * accomplish that, the object's kernel fences must be signaled with
3419  * the object lock held.
3420  *
3421  * Return: 0 on success. Negative error code on failure. In particular may
3422  * return -EINTR or -ERESTARTSYS if signal pending.
3423  */
3424 int xe_bo_migrate(struct xe_bo *bo, u32 mem_type, struct ttm_operation_ctx *tctx,
3425 		  struct drm_exec *exec)
3426 {
3427 	struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
3428 	struct ttm_operation_ctx ctx = {
3429 		.interruptible = true,
3430 		.no_wait_gpu = false,
3431 		.gfp_retry_mayfail = true,
3432 	};
3433 	struct ttm_placement placement;
3434 	struct ttm_place requested;
3435 
3436 	xe_bo_assert_held(bo);
3437 	tctx = tctx ? tctx : &ctx;
3438 
3439 	if (bo->ttm.resource->mem_type == mem_type)
3440 		return 0;
3441 
3442 	if (xe_bo_is_pinned(bo))
3443 		return -EBUSY;
3444 
3445 	if (!xe_bo_can_migrate(bo, mem_type))
3446 		return -EINVAL;
3447 
3448 	xe_place_from_ttm_type(mem_type, &requested);
3449 	placement.num_placement = 1;
3450 	placement.placement = &requested;
3451 
3452 	/*
3453 	 * Stolen needs to be handled like below VRAM handling if we ever need
3454 	 * to support it.
3455 	 */
3456 	drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN);
3457 
3458 	if (mem_type_is_vram(mem_type)) {
3459 		u32 c = 0;
3460 
3461 		add_vram(xe, bo, &requested, bo->flags, mem_type, &c);
3462 	}
3463 
3464 	if (!tctx->no_wait_gpu)
3465 		xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base);
3466 	return ttm_bo_validate(&bo->ttm, &placement, tctx);
3467 }
3468 
3469 /**
3470  * xe_bo_evict - Evict an object to evict placement
3471  * @bo: The buffer object to migrate.
3472  * @exec: The drm_exec transaction to use for exhaustive eviction.
3473  *
3474  * On successful completion, the object memory will be moved to evict
3475  * placement. This function blocks until the object has been fully moved.
3476  *
3477  * Return: 0 on success. Negative error code on failure.
3478  */
3479 int xe_bo_evict(struct xe_bo *bo, struct drm_exec *exec)
3480 {
3481 	struct ttm_operation_ctx ctx = {
3482 		.interruptible = false,
3483 		.no_wait_gpu = false,
3484 		.gfp_retry_mayfail = true,
3485 	};
3486 	struct ttm_placement placement;
3487 	int ret;
3488 
3489 	xe_evict_flags(&bo->ttm, &placement);
3490 	ret = ttm_bo_validate(&bo->ttm, &placement, &ctx);
3491 	if (ret)
3492 		return ret;
3493 
3494 	dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
3495 			      false, MAX_SCHEDULE_TIMEOUT);
3496 
3497 	return 0;
3498 }
3499 
3500 /**
3501  * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
3502  * placed in system memory.
3503  * @bo: The xe_bo
3504  *
3505  * Return: true if extra pages need to be allocated, false otherwise.
3506  */
3507 bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
3508 {
3509 	struct xe_device *xe = xe_bo_device(bo);
3510 
3511 	if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe))
3512 		return false;
3513 
3514 	if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device)
3515 		return false;
3516 
3517 	/* On discrete GPUs, if the GPU can access this buffer from
3518 	 * system memory (i.e., it allows XE_PL_TT placement), FlatCCS
3519 	 * can't be used since there's no CCS storage associated with
3520 	 * non-VRAM addresses.
3521 	 */
3522 	if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM))
3523 		return false;
3524 
3525 	/* Check if userspace explicitly requested no compression */
3526 	if (bo->flags & XE_BO_FLAG_NO_COMPRESSION)
3527 		return false;
3528 
3529 	/*
3530 	 * For WB (Write-Back) CPU caching mode, check if the device
3531 	 * supports WB compression with coherency.
3532 	 */
3533 	if (bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB &&
3534 	    xe->pat.idx[XE_CACHE_WB_COMPRESSION] == XE_PAT_INVALID_IDX)
3535 		return false;
3536 
3537 	return true;
3538 }
3539 
3540 /**
3541  * __xe_bo_release_dummy() - Dummy kref release function
3542  * @kref: The embedded struct kref.
3543  *
3544  * Dummy release function for xe_bo_put_deferred(). Keep off.
3545  */
3546 void __xe_bo_release_dummy(struct kref *kref)
3547 {
3548 }
3549 
3550 /**
3551  * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
3552  * @deferred: The lockless list used for the call to xe_bo_put_deferred().
3553  *
3554  * Puts all bos whose put was deferred by xe_bo_put_deferred().
3555  * The @deferred list can be either an onstack local list or a global
3556  * shared list used by a workqueue.
3557  */
3558 void xe_bo_put_commit(struct llist_head *deferred)
3559 {
3560 	struct llist_node *freed;
3561 	struct xe_bo *bo, *next;
3562 
3563 	if (!deferred)
3564 		return;
3565 
3566 	freed = llist_del_all(deferred);
3567 	if (!freed)
3568 		return;
3569 
3570 	llist_for_each_entry_safe(bo, next, freed, freed)
3571 		drm_gem_object_free(&bo->ttm.base.refcount);
3572 }
3573 
3574 static void xe_bo_dev_work_func(struct work_struct *work)
3575 {
3576 	struct xe_bo_dev *bo_dev = container_of(work, typeof(*bo_dev), async_free);
3577 
3578 	xe_bo_put_commit(&bo_dev->async_list);
3579 }
3580 
3581 /**
3582  * xe_bo_dev_init() - Initialize BO dev to manage async BO freeing
3583  * @bo_dev: The BO dev structure
3584  */
3585 void xe_bo_dev_init(struct xe_bo_dev *bo_dev)
3586 {
3587 	INIT_WORK(&bo_dev->async_free, xe_bo_dev_work_func);
3588 }
3589 
3590 /**
3591  * xe_bo_dev_fini() - Finalize BO dev managing async BO freeing
3592  * @bo_dev: The BO dev structure
3593  */
3594 void xe_bo_dev_fini(struct xe_bo_dev *bo_dev)
3595 {
3596 	flush_work(&bo_dev->async_free);
3597 }
3598 
3599 void xe_bo_put(struct xe_bo *bo)
3600 {
3601 	struct xe_tile *tile;
3602 	u8 id;
3603 
3604 	might_sleep();
3605 	if (bo) {
3606 #ifdef CONFIG_PROC_FS
3607 		if (bo->client)
3608 			might_lock(&bo->client->bos_lock);
3609 #endif
3610 		for_each_tile(tile, xe_bo_device(bo), id)
3611 			if (bo->ggtt_node[id])
3612 				xe_ggtt_might_lock(tile->mem.ggtt);
3613 		drm_gem_object_put(&bo->ttm.base);
3614 	}
3615 }
3616 
3617 /**
3618  * xe_bo_dumb_create - Create a dumb bo as backing for a fb
3619  * @file_priv: ...
3620  * @dev: ...
3621  * @args: ...
3622  *
3623  * See dumb_create() hook in include/drm/drm_drv.h
3624  *
3625  * Return: ...
3626  */
3627 int xe_bo_dumb_create(struct drm_file *file_priv,
3628 		      struct drm_device *dev,
3629 		      struct drm_mode_create_dumb *args)
3630 {
3631 	struct xe_device *xe = to_xe_device(dev);
3632 	struct xe_bo *bo;
3633 	uint32_t handle;
3634 	int err;
3635 	u32 page_size = max_t(u32, PAGE_SIZE,
3636 		xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K);
3637 
3638 	err = drm_mode_size_dumb(dev, args, SZ_64, page_size);
3639 	if (err)
3640 		return err;
3641 
3642 	bo = xe_bo_create_user(xe, NULL, args->size,
3643 			       DRM_XE_GEM_CPU_CACHING_WC,
3644 			       XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
3645 			       XE_BO_FLAG_SCANOUT |
3646 			       XE_BO_FLAG_NEEDS_CPU_ACCESS, NULL);
3647 	if (IS_ERR(bo))
3648 		return PTR_ERR(bo);
3649 
3650 	err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle);
3651 	/* drop reference from allocate - handle holds it now */
3652 	drm_gem_object_put(&bo->ttm.base);
3653 	if (!err)
3654 		args->handle = handle;
3655 	return err;
3656 }
3657 
3658 void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo)
3659 {
3660 	struct ttm_buffer_object *tbo = &bo->ttm;
3661 	struct ttm_device *bdev = tbo->bdev;
3662 
3663 	drm_vma_node_unmap(&tbo->base.vma_node, bdev->dev_mapping);
3664 
3665 	list_del_init(&bo->vram_userfault_link);
3666 }
3667 
3668 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
3669 #include "tests/xe_bo.c"
3670 #endif
3671