xref: /linux/drivers/gpu/drm/xe/xe_bo.c (revision 47c3ea3359d14ffa4ff94511ae905978d86bb5dd)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_bo.h"
7 
8 #include <linux/dma-buf.h>
9 #include <linux/nospec.h>
10 
11 #include <drm/drm_drv.h>
12 #include <drm/drm_gem_ttm_helper.h>
13 #include <drm/drm_managed.h>
14 #include <drm/ttm/ttm_backup.h>
15 #include <drm/ttm/ttm_device.h>
16 #include <drm/ttm/ttm_placement.h>
17 #include <drm/ttm/ttm_tt.h>
18 #include <uapi/drm/xe_drm.h>
19 
20 #include <kunit/static_stub.h>
21 
22 #include <trace/events/gpu_mem.h>
23 
24 #include "xe_device.h"
25 #include "xe_dma_buf.h"
26 #include "xe_drm_client.h"
27 #include "xe_ggtt.h"
28 #include "xe_gt.h"
29 #include "xe_map.h"
30 #include "xe_migrate.h"
31 #include "xe_pm.h"
32 #include "xe_preempt_fence.h"
33 #include "xe_pxp.h"
34 #include "xe_res_cursor.h"
35 #include "xe_shrinker.h"
36 #include "xe_sriov_vf_ccs.h"
37 #include "xe_tile.h"
38 #include "xe_trace_bo.h"
39 #include "xe_ttm_stolen_mgr.h"
40 #include "xe_vm.h"
41 #include "xe_vram_types.h"
42 
43 const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES]  = {
44 	[XE_PL_SYSTEM] = "system",
45 	[XE_PL_TT] = "gtt",
46 	[XE_PL_VRAM0] = "vram0",
47 	[XE_PL_VRAM1] = "vram1",
48 	[XE_PL_STOLEN] = "stolen"
49 };
50 
51 static const struct ttm_place sys_placement_flags = {
52 	.fpfn = 0,
53 	.lpfn = 0,
54 	.mem_type = XE_PL_SYSTEM,
55 	.flags = 0,
56 };
57 
58 static struct ttm_placement sys_placement = {
59 	.num_placement = 1,
60 	.placement = &sys_placement_flags,
61 };
62 
63 static struct ttm_placement purge_placement;
64 
65 static const struct ttm_place tt_placement_flags[] = {
66 	{
67 		.fpfn = 0,
68 		.lpfn = 0,
69 		.mem_type = XE_PL_TT,
70 		.flags = TTM_PL_FLAG_DESIRED,
71 	},
72 	{
73 		.fpfn = 0,
74 		.lpfn = 0,
75 		.mem_type = XE_PL_SYSTEM,
76 		.flags = TTM_PL_FLAG_FALLBACK,
77 	}
78 };
79 
80 static struct ttm_placement tt_placement = {
81 	.num_placement = 2,
82 	.placement = tt_placement_flags,
83 };
84 
85 #define for_each_set_bo_vram_flag(bit__, bo_flags__) \
86 	for (unsigned int __bit_tmp = BIT(0); __bit_tmp <= XE_BO_FLAG_VRAM_MASK; __bit_tmp <<= 1) \
87 		for_each_if(((bit__) = __bit_tmp) & (bo_flags__) & XE_BO_FLAG_VRAM_MASK)
88 
89 bool mem_type_is_vram(u32 mem_type)
90 {
91 	return mem_type >= XE_PL_VRAM0 && mem_type != XE_PL_STOLEN;
92 }
93 
94 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res)
95 {
96 	return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe);
97 }
98 
99 static bool resource_is_vram(struct ttm_resource *res)
100 {
101 	return mem_type_is_vram(res->mem_type);
102 }
103 
104 bool xe_bo_is_vram(struct xe_bo *bo)
105 {
106 	return resource_is_vram(bo->ttm.resource) ||
107 		resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource);
108 }
109 
110 bool xe_bo_is_stolen(struct xe_bo *bo)
111 {
112 	return bo->ttm.resource->mem_type == XE_PL_STOLEN;
113 }
114 
115 /**
116  * xe_bo_has_single_placement - check if BO is placed only in one memory location
117  * @bo: The BO
118  *
119  * This function checks whether a given BO is placed in only one memory location.
120  *
121  * Returns: true if the BO is placed in a single memory location, false otherwise.
122  *
123  */
124 bool xe_bo_has_single_placement(struct xe_bo *bo)
125 {
126 	return bo->placement.num_placement == 1;
127 }
128 
129 /**
130  * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR
131  * @bo: The BO
132  *
133  * The stolen memory is accessed through the PCI BAR for both DGFX and some
134  * integrated platforms that have a dedicated bit in the PTE for devmem (DM).
135  *
136  * Returns: true if it's stolen memory accessed via PCI BAR, false otherwise.
137  */
138 bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
139 {
140 	return xe_bo_is_stolen(bo) &&
141 		GRAPHICS_VERx100(xe_bo_device(bo)) >= 1270;
142 }
143 
144 /**
145  * xe_bo_is_vm_bound - check if BO has any mappings through VM_BIND
146  * @bo: The BO
147  *
148  * Check if a given bo is bound through VM_BIND. This requires the
149  * reservation lock for the BO to be held.
150  *
151  * Returns: boolean
152  */
153 bool xe_bo_is_vm_bound(struct xe_bo *bo)
154 {
155 	xe_bo_assert_held(bo);
156 
157 	return !list_empty(&bo->ttm.base.gpuva.list);
158 }
159 
160 static bool xe_bo_is_user(struct xe_bo *bo)
161 {
162 	return bo->flags & XE_BO_FLAG_USER;
163 }
164 
165 static struct xe_migrate *
166 mem_type_to_migrate(struct xe_device *xe, u32 mem_type)
167 {
168 	struct xe_tile *tile;
169 
170 	xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type));
171 	tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
172 	return tile->migrate;
173 }
174 
175 static struct xe_vram_region *res_to_mem_region(struct ttm_resource *res)
176 {
177 	struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
178 	struct ttm_resource_manager *mgr;
179 	struct xe_ttm_vram_mgr *vram_mgr;
180 
181 	xe_assert(xe, resource_is_vram(res));
182 	mgr = ttm_manager_type(&xe->ttm, res->mem_type);
183 	vram_mgr = to_xe_ttm_vram_mgr(mgr);
184 
185 	return container_of(vram_mgr, struct xe_vram_region, ttm);
186 }
187 
188 static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
189 			   u32 bo_flags, u32 *c)
190 {
191 	if (bo_flags & XE_BO_FLAG_SYSTEM) {
192 		xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
193 
194 		bo->placements[*c] = (struct ttm_place) {
195 			.mem_type = XE_PL_TT,
196 			.flags = (bo_flags & XE_BO_FLAG_VRAM_MASK) ?
197 			TTM_PL_FLAG_FALLBACK : 0,
198 		};
199 		*c += 1;
200 	}
201 }
202 
203 static bool force_contiguous(u32 bo_flags)
204 {
205 	if (bo_flags & XE_BO_FLAG_STOLEN)
206 		return true; /* users expect this */
207 	else if (bo_flags & XE_BO_FLAG_PINNED &&
208 		 !(bo_flags & XE_BO_FLAG_PINNED_LATE_RESTORE))
209 		return true; /* needs vmap */
210 	else if (bo_flags & XE_BO_FLAG_CPU_ADDR_MIRROR)
211 		return true;
212 
213 	/*
214 	 * For eviction / restore on suspend / resume objects pinned in VRAM
215 	 * must be contiguous, also only contiguous BOs support xe_bo_vmap.
216 	 */
217 	return bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS &&
218 	       bo_flags & XE_BO_FLAG_PINNED;
219 }
220 
221 static u8 vram_bo_flag_to_tile_id(struct xe_device *xe, u32 vram_bo_flag)
222 {
223 	xe_assert(xe, vram_bo_flag & XE_BO_FLAG_VRAM_MASK);
224 	xe_assert(xe, (vram_bo_flag & (vram_bo_flag - 1)) == 0);
225 
226 	return __ffs(vram_bo_flag >> (__ffs(XE_BO_FLAG_VRAM0) - 1)) - 1;
227 }
228 
229 static u32 bo_vram_flags_to_vram_placement(struct xe_device *xe, u32 bo_flags, u32 vram_flag,
230 					   enum ttm_bo_type type)
231 {
232 	u8 tile_id = vram_bo_flag_to_tile_id(xe, vram_flag);
233 
234 	xe_assert(xe, tile_id < xe->info.tile_count);
235 
236 	if (type == ttm_bo_type_kernel && !(bo_flags & XE_BO_FLAG_FORCE_USER_VRAM))
237 		return xe->tiles[tile_id].mem.kernel_vram->placement;
238 	else
239 		return xe->tiles[tile_id].mem.vram->placement;
240 }
241 
242 static void add_vram(struct xe_device *xe, struct xe_bo *bo,
243 		     struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
244 {
245 	struct ttm_place place = { .mem_type = mem_type };
246 	struct ttm_resource_manager *mgr = ttm_manager_type(&xe->ttm, mem_type);
247 	struct xe_ttm_vram_mgr *vram_mgr = to_xe_ttm_vram_mgr(mgr);
248 
249 	struct xe_vram_region *vram;
250 	u64 io_size;
251 
252 	xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
253 
254 	vram = container_of(vram_mgr, struct xe_vram_region, ttm);
255 	xe_assert(xe, vram && vram->usable_size);
256 	io_size = vram->io_size;
257 
258 	if (force_contiguous(bo_flags))
259 		place.flags |= TTM_PL_FLAG_CONTIGUOUS;
260 
261 	if (io_size < vram->usable_size) {
262 		if (bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) {
263 			place.fpfn = 0;
264 			place.lpfn = io_size >> PAGE_SHIFT;
265 		} else {
266 			place.flags |= TTM_PL_FLAG_TOPDOWN;
267 		}
268 	}
269 	places[*c] = place;
270 	*c += 1;
271 }
272 
273 static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
274 			 u32 bo_flags, enum ttm_bo_type type, u32 *c)
275 {
276 	u32 vram_flag;
277 
278 	for_each_set_bo_vram_flag(vram_flag, bo_flags) {
279 		u32 pl = bo_vram_flags_to_vram_placement(xe, bo_flags, vram_flag, type);
280 
281 		add_vram(xe, bo, bo->placements, bo_flags, pl, c);
282 	}
283 }
284 
285 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
286 			   u32 bo_flags, u32 *c)
287 {
288 	if (bo_flags & XE_BO_FLAG_STOLEN) {
289 		xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
290 
291 		bo->placements[*c] = (struct ttm_place) {
292 			.mem_type = XE_PL_STOLEN,
293 			.flags = force_contiguous(bo_flags) ?
294 				TTM_PL_FLAG_CONTIGUOUS : 0,
295 		};
296 		*c += 1;
297 	}
298 }
299 
300 static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
301 				       u32 bo_flags, enum ttm_bo_type type)
302 {
303 	u32 c = 0;
304 
305 	try_add_vram(xe, bo, bo_flags, type, &c);
306 	try_add_system(xe, bo, bo_flags, &c);
307 	try_add_stolen(xe, bo, bo_flags, &c);
308 
309 	if (!c)
310 		return -EINVAL;
311 
312 	bo->placement = (struct ttm_placement) {
313 		.num_placement = c,
314 		.placement = bo->placements,
315 	};
316 
317 	return 0;
318 }
319 
320 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
321 			      u32 bo_flags, enum ttm_bo_type type)
322 {
323 	xe_bo_assert_held(bo);
324 	return __xe_bo_placement_for_flags(xe, bo, bo_flags, type);
325 }
326 
327 static void xe_evict_flags(struct ttm_buffer_object *tbo,
328 			   struct ttm_placement *placement)
329 {
330 	struct xe_device *xe = container_of(tbo->bdev, typeof(*xe), ttm);
331 	bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
332 	struct xe_bo *bo;
333 
334 	if (!xe_bo_is_xe_bo(tbo)) {
335 		/* Don't handle scatter gather BOs */
336 		if (tbo->type == ttm_bo_type_sg) {
337 			placement->num_placement = 0;
338 			return;
339 		}
340 
341 		*placement = device_unplugged ? purge_placement : sys_placement;
342 		return;
343 	}
344 
345 	bo = ttm_to_xe_bo(tbo);
346 	if (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) {
347 		*placement = sys_placement;
348 		return;
349 	}
350 
351 	if (device_unplugged && !tbo->base.dma_buf) {
352 		*placement = purge_placement;
353 		return;
354 	}
355 
356 	/*
357 	 * For xe, sg bos that are evicted to system just triggers a
358 	 * rebind of the sg list upon subsequent validation to XE_PL_TT.
359 	 */
360 	switch (tbo->resource->mem_type) {
361 	case XE_PL_VRAM0:
362 	case XE_PL_VRAM1:
363 	case XE_PL_STOLEN:
364 		*placement = tt_placement;
365 		break;
366 	case XE_PL_TT:
367 	default:
368 		*placement = sys_placement;
369 		break;
370 	}
371 }
372 
373 /* struct xe_ttm_tt - Subclassed ttm_tt for xe */
374 struct xe_ttm_tt {
375 	struct ttm_tt ttm;
376 	struct sg_table sgt;
377 	struct sg_table *sg;
378 	/** @purgeable: Whether the content of the pages of @ttm is purgeable. */
379 	bool purgeable;
380 };
381 
382 static int xe_tt_map_sg(struct xe_device *xe, struct ttm_tt *tt)
383 {
384 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
385 	unsigned long num_pages = tt->num_pages;
386 	int ret;
387 
388 	XE_WARN_ON((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
389 		   !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE));
390 
391 	if (xe_tt->sg)
392 		return 0;
393 
394 	ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
395 						num_pages, 0,
396 						(u64)num_pages << PAGE_SHIFT,
397 						xe_sg_segment_size(xe->drm.dev),
398 						GFP_KERNEL);
399 	if (ret)
400 		return ret;
401 
402 	xe_tt->sg = &xe_tt->sgt;
403 	ret = dma_map_sgtable(xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL,
404 			      DMA_ATTR_SKIP_CPU_SYNC);
405 	if (ret) {
406 		sg_free_table(xe_tt->sg);
407 		xe_tt->sg = NULL;
408 		return ret;
409 	}
410 
411 	return 0;
412 }
413 
414 static void xe_tt_unmap_sg(struct xe_device *xe, struct ttm_tt *tt)
415 {
416 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
417 
418 	if (xe_tt->sg) {
419 		dma_unmap_sgtable(xe->drm.dev, xe_tt->sg,
420 				  DMA_BIDIRECTIONAL, 0);
421 		sg_free_table(xe_tt->sg);
422 		xe_tt->sg = NULL;
423 	}
424 }
425 
426 struct sg_table *xe_bo_sg(struct xe_bo *bo)
427 {
428 	struct ttm_tt *tt = bo->ttm.ttm;
429 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
430 
431 	return xe_tt->sg;
432 }
433 
434 /*
435  * Account ttm pages against the device shrinker's shrinkable and
436  * purgeable counts.
437  */
438 static void xe_ttm_tt_account_add(struct xe_device *xe, struct ttm_tt *tt)
439 {
440 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
441 
442 	if (xe_tt->purgeable)
443 		xe_shrinker_mod_pages(xe->mem.shrinker, 0, tt->num_pages);
444 	else
445 		xe_shrinker_mod_pages(xe->mem.shrinker, tt->num_pages, 0);
446 }
447 
448 static void xe_ttm_tt_account_subtract(struct xe_device *xe, struct ttm_tt *tt)
449 {
450 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
451 
452 	if (xe_tt->purgeable)
453 		xe_shrinker_mod_pages(xe->mem.shrinker, 0, -(long)tt->num_pages);
454 	else
455 		xe_shrinker_mod_pages(xe->mem.shrinker, -(long)tt->num_pages, 0);
456 }
457 
458 static void update_global_total_pages(struct ttm_device *ttm_dev,
459 				      long num_pages)
460 {
461 #if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
462 	struct xe_device *xe = ttm_to_xe_device(ttm_dev);
463 	u64 global_total_pages =
464 		atomic64_add_return(num_pages, &xe->global_total_pages);
465 
466 	trace_gpu_mem_total(xe->drm.primary->index, 0,
467 			    global_total_pages << PAGE_SHIFT);
468 #endif
469 }
470 
471 static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
472 				       u32 page_flags)
473 {
474 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
475 	struct xe_device *xe = xe_bo_device(bo);
476 	struct xe_ttm_tt *xe_tt;
477 	struct ttm_tt *tt;
478 	unsigned long extra_pages;
479 	enum ttm_caching caching = ttm_cached;
480 	int err;
481 
482 	xe_tt = kzalloc(sizeof(*xe_tt), GFP_KERNEL);
483 	if (!xe_tt)
484 		return NULL;
485 
486 	tt = &xe_tt->ttm;
487 
488 	extra_pages = 0;
489 	if (xe_bo_needs_ccs_pages(bo))
490 		extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, xe_bo_size(bo)),
491 					   PAGE_SIZE);
492 
493 	/*
494 	 * DGFX system memory is always WB / ttm_cached, since
495 	 * other caching modes are only supported on x86. DGFX
496 	 * GPU system memory accesses are always coherent with the
497 	 * CPU.
498 	 */
499 	if (!IS_DGFX(xe)) {
500 		switch (bo->cpu_caching) {
501 		case DRM_XE_GEM_CPU_CACHING_WC:
502 			caching = ttm_write_combined;
503 			break;
504 		default:
505 			caching = ttm_cached;
506 			break;
507 		}
508 
509 		WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching);
510 
511 		/*
512 		 * Display scanout is always non-coherent with the CPU cache.
513 		 *
514 		 * For Xe_LPG and beyond, PPGTT PTE lookups are also
515 		 * non-coherent and require a CPU:WC mapping.
516 		 */
517 		if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) ||
518 		    (xe->info.graphics_verx100 >= 1270 &&
519 		     bo->flags & XE_BO_FLAG_PAGETABLE))
520 			caching = ttm_write_combined;
521 	}
522 
523 	if (bo->flags & XE_BO_FLAG_NEEDS_UC) {
524 		/*
525 		 * Valid only for internally-created buffers only, for
526 		 * which cpu_caching is never initialized.
527 		 */
528 		xe_assert(xe, bo->cpu_caching == 0);
529 		caching = ttm_uncached;
530 	}
531 
532 	if (ttm_bo->type != ttm_bo_type_sg)
533 		page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
534 
535 	err = ttm_tt_init(tt, &bo->ttm, page_flags, caching, extra_pages);
536 	if (err) {
537 		kfree(xe_tt);
538 		return NULL;
539 	}
540 
541 	if (ttm_bo->type != ttm_bo_type_sg) {
542 		err = ttm_tt_setup_backup(tt);
543 		if (err) {
544 			ttm_tt_fini(tt);
545 			kfree(xe_tt);
546 			return NULL;
547 		}
548 	}
549 
550 	return tt;
551 }
552 
553 static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
554 			      struct ttm_operation_ctx *ctx)
555 {
556 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
557 	int err;
558 
559 	/*
560 	 * dma-bufs are not populated with pages, and the dma-
561 	 * addresses are set up when moved to XE_PL_TT.
562 	 */
563 	if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
564 	    !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
565 		return 0;
566 
567 	if (ttm_tt_is_backed_up(tt) && !xe_tt->purgeable) {
568 		err = ttm_tt_restore(ttm_dev, tt, ctx);
569 	} else {
570 		ttm_tt_clear_backed_up(tt);
571 		err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
572 	}
573 	if (err)
574 		return err;
575 
576 	xe_tt->purgeable = false;
577 	xe_ttm_tt_account_add(ttm_to_xe_device(ttm_dev), tt);
578 	update_global_total_pages(ttm_dev, tt->num_pages);
579 
580 	return 0;
581 }
582 
583 static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
584 {
585 	struct xe_device *xe = ttm_to_xe_device(ttm_dev);
586 
587 	if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
588 	    !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
589 		return;
590 
591 	xe_tt_unmap_sg(xe, tt);
592 
593 	ttm_pool_free(&ttm_dev->pool, tt);
594 	xe_ttm_tt_account_subtract(xe, tt);
595 	update_global_total_pages(ttm_dev, -(long)tt->num_pages);
596 }
597 
598 static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
599 {
600 	ttm_tt_fini(tt);
601 	kfree(tt);
602 }
603 
604 static bool xe_ttm_resource_visible(struct ttm_resource *mem)
605 {
606 	struct xe_ttm_vram_mgr_resource *vres =
607 		to_xe_ttm_vram_mgr_resource(mem);
608 
609 	return vres->used_visible_size == mem->size;
610 }
611 
612 /**
613  * xe_bo_is_visible_vram - check if BO is placed entirely in visible VRAM.
614  * @bo: The BO
615  *
616  * This function checks whether a given BO resides entirely in memory visible from the CPU
617  *
618  * Returns: true if the BO is entirely visible, false otherwise.
619  *
620  */
621 bool xe_bo_is_visible_vram(struct xe_bo *bo)
622 {
623 	if (drm_WARN_ON(bo->ttm.base.dev, !xe_bo_is_vram(bo)))
624 		return false;
625 
626 	return xe_ttm_resource_visible(bo->ttm.resource);
627 }
628 
629 static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
630 				 struct ttm_resource *mem)
631 {
632 	struct xe_device *xe = ttm_to_xe_device(bdev);
633 
634 	switch (mem->mem_type) {
635 	case XE_PL_SYSTEM:
636 	case XE_PL_TT:
637 		return 0;
638 	case XE_PL_VRAM0:
639 	case XE_PL_VRAM1: {
640 		struct xe_vram_region *vram = res_to_mem_region(mem);
641 
642 		if (!xe_ttm_resource_visible(mem))
643 			return -EINVAL;
644 
645 		mem->bus.offset = mem->start << PAGE_SHIFT;
646 
647 		if (vram->mapping &&
648 		    mem->placement & TTM_PL_FLAG_CONTIGUOUS)
649 			mem->bus.addr = (u8 __force *)vram->mapping +
650 				mem->bus.offset;
651 
652 		mem->bus.offset += vram->io_start;
653 		mem->bus.is_iomem = true;
654 
655 #if  !IS_ENABLED(CONFIG_X86)
656 		mem->bus.caching = ttm_write_combined;
657 #endif
658 		return 0;
659 	} case XE_PL_STOLEN:
660 		return xe_ttm_stolen_io_mem_reserve(xe, mem);
661 	default:
662 		return -EINVAL;
663 	}
664 }
665 
666 static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
667 				const struct ttm_operation_ctx *ctx)
668 {
669 	struct dma_resv_iter cursor;
670 	struct dma_fence *fence;
671 	struct drm_gem_object *obj = &bo->ttm.base;
672 	struct drm_gpuvm_bo *vm_bo;
673 	bool idle = false;
674 	int ret = 0;
675 
676 	dma_resv_assert_held(bo->ttm.base.resv);
677 
678 	if (!list_empty(&bo->ttm.base.gpuva.list)) {
679 		dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
680 				    DMA_RESV_USAGE_BOOKKEEP);
681 		dma_resv_for_each_fence_unlocked(&cursor, fence)
682 			dma_fence_enable_sw_signaling(fence);
683 		dma_resv_iter_end(&cursor);
684 	}
685 
686 	drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
687 		struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
688 		struct drm_gpuva *gpuva;
689 
690 		if (!xe_vm_in_fault_mode(vm)) {
691 			drm_gpuvm_bo_evict(vm_bo, true);
692 			continue;
693 		}
694 
695 		if (!idle) {
696 			long timeout;
697 
698 			if (ctx->no_wait_gpu &&
699 			    !dma_resv_test_signaled(bo->ttm.base.resv,
700 						    DMA_RESV_USAGE_BOOKKEEP))
701 				return -EBUSY;
702 
703 			timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
704 							DMA_RESV_USAGE_BOOKKEEP,
705 							ctx->interruptible,
706 							MAX_SCHEDULE_TIMEOUT);
707 			if (!timeout)
708 				return -ETIME;
709 			if (timeout < 0)
710 				return timeout;
711 
712 			idle = true;
713 		}
714 
715 		drm_gpuvm_bo_for_each_va(gpuva, vm_bo) {
716 			struct xe_vma *vma = gpuva_to_vma(gpuva);
717 
718 			trace_xe_vma_evict(vma);
719 			ret = xe_vm_invalidate_vma(vma);
720 			if (XE_WARN_ON(ret))
721 				return ret;
722 		}
723 	}
724 
725 	return ret;
726 }
727 
728 /*
729  * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
730  * Note that unmapping the attachment is deferred to the next
731  * map_attachment time, or to bo destroy (after idling) whichever comes first.
732  * This is to avoid syncing before unmap_attachment(), assuming that the
733  * caller relies on idling the reservation object before moving the
734  * backing store out. Should that assumption not hold, then we will be able
735  * to unconditionally call unmap_attachment() when moving out to system.
736  */
737 static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
738 			     struct ttm_resource *new_res)
739 {
740 	struct dma_buf_attachment *attach = ttm_bo->base.import_attach;
741 	struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
742 					       ttm);
743 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
744 	bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
745 	struct sg_table *sg;
746 
747 	xe_assert(xe, attach);
748 	xe_assert(xe, ttm_bo->ttm);
749 
750 	if (device_unplugged && new_res->mem_type == XE_PL_SYSTEM &&
751 	    ttm_bo->sg) {
752 		dma_resv_wait_timeout(ttm_bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
753 				      false, MAX_SCHEDULE_TIMEOUT);
754 		dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
755 		ttm_bo->sg = NULL;
756 	}
757 
758 	if (new_res->mem_type == XE_PL_SYSTEM)
759 		goto out;
760 
761 	if (ttm_bo->sg) {
762 		dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
763 		ttm_bo->sg = NULL;
764 	}
765 
766 	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
767 	if (IS_ERR(sg))
768 		return PTR_ERR(sg);
769 
770 	ttm_bo->sg = sg;
771 	xe_tt->sg = sg;
772 
773 out:
774 	ttm_bo_move_null(ttm_bo, new_res);
775 
776 	return 0;
777 }
778 
779 /**
780  * xe_bo_move_notify - Notify subsystems of a pending move
781  * @bo: The buffer object
782  * @ctx: The struct ttm_operation_ctx controlling locking and waits.
783  *
784  * This function notifies subsystems of an upcoming buffer move.
785  * Upon receiving such a notification, subsystems should schedule
786  * halting access to the underlying pages and optionally add a fence
787  * to the buffer object's dma_resv object, that signals when access is
788  * stopped. The caller will wait on all dma_resv fences before
789  * starting the move.
790  *
791  * A subsystem may commence access to the object after obtaining
792  * bindings to the new backing memory under the object lock.
793  *
794  * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
795  * negative error code on error.
796  */
797 static int xe_bo_move_notify(struct xe_bo *bo,
798 			     const struct ttm_operation_ctx *ctx)
799 {
800 	struct ttm_buffer_object *ttm_bo = &bo->ttm;
801 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
802 	struct ttm_resource *old_mem = ttm_bo->resource;
803 	u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
804 	int ret;
805 
806 	/*
807 	 * If this starts to call into many components, consider
808 	 * using a notification chain here.
809 	 */
810 
811 	if (xe_bo_is_pinned(bo))
812 		return -EINVAL;
813 
814 	xe_bo_vunmap(bo);
815 	ret = xe_bo_trigger_rebind(xe, bo, ctx);
816 	if (ret)
817 		return ret;
818 
819 	/* Don't call move_notify() for imported dma-bufs. */
820 	if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach)
821 		dma_buf_move_notify(ttm_bo->base.dma_buf);
822 
823 	/*
824 	 * TTM has already nuked the mmap for us (see ttm_bo_unmap_virtual),
825 	 * so if we moved from VRAM make sure to unlink this from the userfault
826 	 * tracking.
827 	 */
828 	if (mem_type_is_vram(old_mem_type)) {
829 		mutex_lock(&xe->mem_access.vram_userfault.lock);
830 		if (!list_empty(&bo->vram_userfault_link))
831 			list_del_init(&bo->vram_userfault_link);
832 		mutex_unlock(&xe->mem_access.vram_userfault.lock);
833 	}
834 
835 	return 0;
836 }
837 
838 static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
839 		      struct ttm_operation_ctx *ctx,
840 		      struct ttm_resource *new_mem,
841 		      struct ttm_place *hop)
842 {
843 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
844 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
845 	struct ttm_resource *old_mem = ttm_bo->resource;
846 	u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
847 	struct ttm_tt *ttm = ttm_bo->ttm;
848 	struct xe_migrate *migrate = NULL;
849 	struct dma_fence *fence;
850 	bool move_lacks_source;
851 	bool tt_has_data;
852 	bool needs_clear;
853 	bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) &&
854 				  ttm && ttm_tt_is_populated(ttm)) ? true : false;
855 	int ret = 0;
856 
857 	/* Bo creation path, moving to system or TT. */
858 	if ((!old_mem && ttm) && !handle_system_ccs) {
859 		if (new_mem->mem_type == XE_PL_TT)
860 			ret = xe_tt_map_sg(xe, ttm);
861 		if (!ret)
862 			ttm_bo_move_null(ttm_bo, new_mem);
863 		goto out;
864 	}
865 
866 	if (ttm_bo->type == ttm_bo_type_sg) {
867 		if (new_mem->mem_type == XE_PL_SYSTEM)
868 			ret = xe_bo_move_notify(bo, ctx);
869 		if (!ret)
870 			ret = xe_bo_move_dmabuf(ttm_bo, new_mem);
871 		return ret;
872 	}
873 
874 	tt_has_data = ttm && (ttm_tt_is_populated(ttm) || ttm_tt_is_swapped(ttm));
875 
876 	move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) :
877 					 (!mem_type_is_vram(old_mem_type) && !tt_has_data));
878 
879 	needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
880 		(!ttm && ttm_bo->type == ttm_bo_type_device);
881 
882 	if (new_mem->mem_type == XE_PL_TT) {
883 		ret = xe_tt_map_sg(xe, ttm);
884 		if (ret)
885 			goto out;
886 	}
887 
888 	if ((move_lacks_source && !needs_clear)) {
889 		ttm_bo_move_null(ttm_bo, new_mem);
890 		goto out;
891 	}
892 
893 	if (!move_lacks_source && (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) &&
894 	    new_mem->mem_type == XE_PL_SYSTEM) {
895 		ret = xe_svm_bo_evict(bo);
896 		if (!ret) {
897 			drm_dbg(&xe->drm, "Evict system allocator BO success\n");
898 			ttm_bo_move_null(ttm_bo, new_mem);
899 		} else {
900 			drm_dbg(&xe->drm, "Evict system allocator BO failed=%pe\n",
901 				ERR_PTR(ret));
902 		}
903 
904 		goto out;
905 	}
906 
907 	if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) {
908 		ttm_bo_move_null(ttm_bo, new_mem);
909 		goto out;
910 	}
911 
912 	/*
913 	 * Failed multi-hop where the old_mem is still marked as
914 	 * TTM_PL_FLAG_TEMPORARY, should just be a dummy move.
915 	 */
916 	if (old_mem_type == XE_PL_TT &&
917 	    new_mem->mem_type == XE_PL_TT) {
918 		ttm_bo_move_null(ttm_bo, new_mem);
919 		goto out;
920 	}
921 
922 	if (!move_lacks_source && !xe_bo_is_pinned(bo)) {
923 		ret = xe_bo_move_notify(bo, ctx);
924 		if (ret)
925 			goto out;
926 	}
927 
928 	if (old_mem_type == XE_PL_TT &&
929 	    new_mem->mem_type == XE_PL_SYSTEM) {
930 		long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
931 						     DMA_RESV_USAGE_BOOKKEEP,
932 						     false,
933 						     MAX_SCHEDULE_TIMEOUT);
934 		if (timeout < 0) {
935 			ret = timeout;
936 			goto out;
937 		}
938 
939 		if (!handle_system_ccs) {
940 			ttm_bo_move_null(ttm_bo, new_mem);
941 			goto out;
942 		}
943 	}
944 
945 	if (!move_lacks_source &&
946 	    ((old_mem_type == XE_PL_SYSTEM && resource_is_vram(new_mem)) ||
947 	     (mem_type_is_vram(old_mem_type) &&
948 	      new_mem->mem_type == XE_PL_SYSTEM))) {
949 		hop->fpfn = 0;
950 		hop->lpfn = 0;
951 		hop->mem_type = XE_PL_TT;
952 		hop->flags = TTM_PL_FLAG_TEMPORARY;
953 		ret = -EMULTIHOP;
954 		goto out;
955 	}
956 
957 	if (bo->tile)
958 		migrate = bo->tile->migrate;
959 	else if (resource_is_vram(new_mem))
960 		migrate = mem_type_to_migrate(xe, new_mem->mem_type);
961 	else if (mem_type_is_vram(old_mem_type))
962 		migrate = mem_type_to_migrate(xe, old_mem_type);
963 	else
964 		migrate = xe->tiles[0].migrate;
965 
966 	xe_assert(xe, migrate);
967 	trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
968 	if (xe_rpm_reclaim_safe(xe)) {
969 		/*
970 		 * We might be called through swapout in the validation path of
971 		 * another TTM device, so acquire rpm here.
972 		 */
973 		xe_pm_runtime_get(xe);
974 	} else {
975 		drm_WARN_ON(&xe->drm, handle_system_ccs);
976 		xe_pm_runtime_get_noresume(xe);
977 	}
978 
979 	if (move_lacks_source) {
980 		u32 flags = 0;
981 
982 		if (mem_type_is_vram(new_mem->mem_type))
983 			flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
984 		else if (handle_system_ccs)
985 			flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;
986 
987 		fence = xe_migrate_clear(migrate, bo, new_mem, flags);
988 	} else {
989 		fence = xe_migrate_copy(migrate, bo, bo, old_mem, new_mem,
990 					handle_system_ccs);
991 	}
992 	if (IS_ERR(fence)) {
993 		ret = PTR_ERR(fence);
994 		xe_pm_runtime_put(xe);
995 		goto out;
996 	}
997 	if (!move_lacks_source) {
998 		ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict, true,
999 						new_mem);
1000 		if (ret) {
1001 			dma_fence_wait(fence, false);
1002 			ttm_bo_move_null(ttm_bo, new_mem);
1003 			ret = 0;
1004 		}
1005 	} else {
1006 		/*
1007 		 * ttm_bo_move_accel_cleanup() may blow up if
1008 		 * bo->resource == NULL, so just attach the
1009 		 * fence and set the new resource.
1010 		 */
1011 		dma_resv_add_fence(ttm_bo->base.resv, fence,
1012 				   DMA_RESV_USAGE_KERNEL);
1013 		ttm_bo_move_null(ttm_bo, new_mem);
1014 	}
1015 
1016 	dma_fence_put(fence);
1017 	xe_pm_runtime_put(xe);
1018 
1019 	/*
1020 	 * CCS meta data is migrated from TT -> SMEM. So, let us detach the
1021 	 * BBs from BO as it is no longer needed.
1022 	 */
1023 	if (IS_VF_CCS_READY(xe) && old_mem_type == XE_PL_TT &&
1024 	    new_mem->mem_type == XE_PL_SYSTEM)
1025 		xe_sriov_vf_ccs_detach_bo(bo);
1026 
1027 	if (IS_VF_CCS_READY(xe) &&
1028 	    ((move_lacks_source && new_mem->mem_type == XE_PL_TT) ||
1029 	     (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT)) &&
1030 	    handle_system_ccs)
1031 		ret = xe_sriov_vf_ccs_attach_bo(bo);
1032 
1033 out:
1034 	if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
1035 	    ttm_bo->ttm) {
1036 		long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
1037 						     DMA_RESV_USAGE_KERNEL,
1038 						     false,
1039 						     MAX_SCHEDULE_TIMEOUT);
1040 		if (timeout < 0)
1041 			ret = timeout;
1042 
1043 		if (IS_VF_CCS_READY(xe))
1044 			xe_sriov_vf_ccs_detach_bo(bo);
1045 
1046 		xe_tt_unmap_sg(xe, ttm_bo->ttm);
1047 	}
1048 
1049 	return ret;
1050 }
1051 
1052 static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
1053 			       struct ttm_buffer_object *bo,
1054 			       unsigned long *scanned)
1055 {
1056 	struct xe_device *xe = ttm_to_xe_device(bo->bdev);
1057 	long lret;
1058 
1059 	/* Fake move to system, without copying data. */
1060 	if (bo->resource->mem_type != XE_PL_SYSTEM) {
1061 		struct ttm_resource *new_resource;
1062 
1063 		lret = ttm_bo_wait_ctx(bo, ctx);
1064 		if (lret)
1065 			return lret;
1066 
1067 		lret = ttm_bo_mem_space(bo, &sys_placement, &new_resource, ctx);
1068 		if (lret)
1069 			return lret;
1070 
1071 		xe_tt_unmap_sg(xe, bo->ttm);
1072 		ttm_bo_move_null(bo, new_resource);
1073 	}
1074 
1075 	*scanned += bo->ttm->num_pages;
1076 	lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags)
1077 			     {.purge = true,
1078 			      .writeback = false,
1079 			      .allow_move = false});
1080 
1081 	if (lret > 0)
1082 		xe_ttm_tt_account_subtract(xe, bo->ttm);
1083 
1084 	return lret;
1085 }
1086 
1087 static bool
1088 xe_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place)
1089 {
1090 	struct drm_gpuvm_bo *vm_bo;
1091 
1092 	if (!ttm_bo_eviction_valuable(bo, place))
1093 		return false;
1094 
1095 	if (!xe_bo_is_xe_bo(bo))
1096 		return true;
1097 
1098 	drm_gem_for_each_gpuvm_bo(vm_bo, &bo->base) {
1099 		if (xe_vm_is_validating(gpuvm_to_vm(vm_bo->vm)))
1100 			return false;
1101 	}
1102 
1103 	return true;
1104 }
1105 
1106 /**
1107  * xe_bo_shrink() - Try to shrink an xe bo.
1108  * @ctx: The struct ttm_operation_ctx used for shrinking.
1109  * @bo: The TTM buffer object whose pages to shrink.
1110  * @flags: Flags governing the shrink behaviour.
1111  * @scanned: Pointer to a counter of the number of pages
1112  * attempted to shrink.
1113  *
1114  * Try to shrink- or purge a bo, and if it succeeds, unmap dma.
1115  * Note that we need to be able to handle also non xe bos
1116  * (ghost bos), but only if the struct ttm_tt is embedded in
1117  * a struct xe_ttm_tt. When the function attempts to shrink
1118  * the pages of a buffer object, The value pointed to by @scanned
1119  * is updated.
1120  *
1121  * Return: The number of pages shrunken or purged, or negative error
1122  * code on failure.
1123  */
1124 long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
1125 		  const struct xe_bo_shrink_flags flags,
1126 		  unsigned long *scanned)
1127 {
1128 	struct ttm_tt *tt = bo->ttm;
1129 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
1130 	struct ttm_place place = {.mem_type = bo->resource->mem_type};
1131 	struct xe_bo *xe_bo = ttm_to_xe_bo(bo);
1132 	struct xe_device *xe = ttm_to_xe_device(bo->bdev);
1133 	bool needs_rpm;
1134 	long lret = 0L;
1135 
1136 	if (!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE) ||
1137 	    (flags.purge && !xe_tt->purgeable))
1138 		return -EBUSY;
1139 
1140 	if (!xe_bo_eviction_valuable(bo, &place))
1141 		return -EBUSY;
1142 
1143 	if (!xe_bo_is_xe_bo(bo) || !xe_bo_get_unless_zero(xe_bo))
1144 		return xe_bo_shrink_purge(ctx, bo, scanned);
1145 
1146 	if (xe_tt->purgeable) {
1147 		if (bo->resource->mem_type != XE_PL_SYSTEM)
1148 			lret = xe_bo_move_notify(xe_bo, ctx);
1149 		if (!lret)
1150 			lret = xe_bo_shrink_purge(ctx, bo, scanned);
1151 		goto out_unref;
1152 	}
1153 
1154 	/* System CCS needs gpu copy when moving PL_TT -> PL_SYSTEM */
1155 	needs_rpm = (!IS_DGFX(xe) && bo->resource->mem_type != XE_PL_SYSTEM &&
1156 		     xe_bo_needs_ccs_pages(xe_bo));
1157 	if (needs_rpm && !xe_pm_runtime_get_if_active(xe))
1158 		goto out_unref;
1159 
1160 	*scanned += tt->num_pages;
1161 	lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags)
1162 			     {.purge = false,
1163 			      .writeback = flags.writeback,
1164 			      .allow_move = true});
1165 	if (needs_rpm)
1166 		xe_pm_runtime_put(xe);
1167 
1168 	if (lret > 0)
1169 		xe_ttm_tt_account_subtract(xe, tt);
1170 
1171 out_unref:
1172 	xe_bo_put(xe_bo);
1173 
1174 	return lret;
1175 }
1176 
1177 /**
1178  * xe_bo_notifier_prepare_pinned() - Prepare a pinned VRAM object to be backed
1179  * up in system memory.
1180  * @bo: The buffer object to prepare.
1181  *
1182  * On successful completion, the object backup pages are allocated. Expectation
1183  * is that this is called from the PM notifier, prior to suspend/hibernation.
1184  *
1185  * Return: 0 on success. Negative error code on failure.
1186  */
1187 int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
1188 {
1189 	struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1190 	struct xe_validation_ctx ctx;
1191 	struct drm_exec exec;
1192 	struct xe_bo *backup;
1193 	int ret = 0;
1194 
1195 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) {
1196 		ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
1197 		drm_exec_retry_on_contention(&exec);
1198 		xe_assert(xe, !ret);
1199 		xe_assert(xe, !bo->backup_obj);
1200 
1201 		/*
1202 		 * Since this is called from the PM notifier we might have raced with
1203 		 * someone unpinning this after we dropped the pinned list lock and
1204 		 * grabbing the above bo lock.
1205 		 */
1206 		if (!xe_bo_is_pinned(bo))
1207 			break;
1208 
1209 		if (!xe_bo_is_vram(bo))
1210 			break;
1211 
1212 		if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
1213 			break;
1214 
1215 		backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo),
1216 					   DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
1217 					   XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
1218 					   XE_BO_FLAG_PINNED, &exec);
1219 		if (IS_ERR(backup)) {
1220 			drm_exec_retry_on_contention(&exec);
1221 			ret = PTR_ERR(backup);
1222 			xe_validation_retry_on_oom(&ctx, &ret);
1223 			break;
1224 		}
1225 
1226 		backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
1227 		ttm_bo_pin(&backup->ttm);
1228 		bo->backup_obj = backup;
1229 	}
1230 
1231 	return ret;
1232 }
1233 
1234 /**
1235  * xe_bo_notifier_unprepare_pinned() - Undo the previous prepare operation.
1236  * @bo: The buffer object to undo the prepare for.
1237  *
1238  * Always returns 0. The backup object is removed, if still present. Expectation
1239  * it that this called from the PM notifier when undoing the prepare step.
1240  *
1241  * Return: Always returns 0.
1242  */
1243 int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo)
1244 {
1245 	xe_bo_lock(bo, false);
1246 	if (bo->backup_obj) {
1247 		ttm_bo_unpin(&bo->backup_obj->ttm);
1248 		xe_bo_put(bo->backup_obj);
1249 		bo->backup_obj = NULL;
1250 	}
1251 	xe_bo_unlock(bo);
1252 
1253 	return 0;
1254 }
1255 
1256 static int xe_bo_evict_pinned_copy(struct xe_bo *bo, struct xe_bo *backup)
1257 {
1258 	struct xe_device *xe = xe_bo_device(bo);
1259 	bool unmap = false;
1260 	int ret = 0;
1261 
1262 	if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
1263 		struct xe_migrate *migrate;
1264 		struct dma_fence *fence;
1265 
1266 		if (bo->tile)
1267 			migrate = bo->tile->migrate;
1268 		else
1269 			migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
1270 
1271 		xe_assert(xe, bo->ttm.base.resv == backup->ttm.base.resv);
1272 		ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
1273 		if (ret)
1274 			goto out_backup;
1275 
1276 		fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource,
1277 					backup->ttm.resource, false);
1278 		if (IS_ERR(fence)) {
1279 			ret = PTR_ERR(fence);
1280 			goto out_backup;
1281 		}
1282 
1283 		dma_resv_add_fence(bo->ttm.base.resv, fence,
1284 				   DMA_RESV_USAGE_KERNEL);
1285 		dma_fence_put(fence);
1286 	} else {
1287 		ret = xe_bo_vmap(backup);
1288 		if (ret)
1289 			goto out_backup;
1290 
1291 		if (iosys_map_is_null(&bo->vmap)) {
1292 			ret = xe_bo_vmap(bo);
1293 			if (ret)
1294 				goto out_vunmap;
1295 			unmap = true;
1296 		}
1297 
1298 		xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0,
1299 				   xe_bo_size(bo));
1300 	}
1301 
1302 	if (!bo->backup_obj)
1303 		bo->backup_obj = backup;
1304 out_vunmap:
1305 	xe_bo_vunmap(backup);
1306 out_backup:
1307 	if (unmap)
1308 		xe_bo_vunmap(bo);
1309 
1310 	return ret;
1311 }
1312 
1313 /**
1314  * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
1315  * @bo: The buffer object to move.
1316  *
1317  * On successful completion, the object memory will be moved to system memory.
1318  *
1319  * This is needed to for special handling of pinned VRAM object during
1320  * suspend-resume.
1321  *
1322  * Return: 0 on success. Negative error code on failure.
1323  */
1324 int xe_bo_evict_pinned(struct xe_bo *bo)
1325 {
1326 	struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1327 	struct xe_validation_ctx ctx;
1328 	struct drm_exec exec;
1329 	struct xe_bo *backup = bo->backup_obj;
1330 	bool backup_created = false;
1331 	int ret = 0;
1332 
1333 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) {
1334 		ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
1335 		drm_exec_retry_on_contention(&exec);
1336 		xe_assert(xe, !ret);
1337 
1338 		if (WARN_ON(!bo->ttm.resource)) {
1339 			ret = -EINVAL;
1340 			break;
1341 		}
1342 
1343 		if (WARN_ON(!xe_bo_is_pinned(bo))) {
1344 			ret = -EINVAL;
1345 			break;
1346 		}
1347 
1348 		if (!xe_bo_is_vram(bo))
1349 			break;
1350 
1351 		if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
1352 			break;
1353 
1354 		if (!backup) {
1355 			backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL,
1356 						   xe_bo_size(bo),
1357 						   DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
1358 						   XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
1359 						   XE_BO_FLAG_PINNED, &exec);
1360 			if (IS_ERR(backup)) {
1361 				drm_exec_retry_on_contention(&exec);
1362 				ret = PTR_ERR(backup);
1363 				xe_validation_retry_on_oom(&ctx, &ret);
1364 				break;
1365 			}
1366 			backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
1367 			backup_created = true;
1368 		}
1369 
1370 		ret = xe_bo_evict_pinned_copy(bo, backup);
1371 	}
1372 
1373 	if (ret && backup_created)
1374 		xe_bo_put(backup);
1375 
1376 	return ret;
1377 }
1378 
1379 /**
1380  * xe_bo_restore_pinned() - Restore a pinned VRAM object
1381  * @bo: The buffer object to move.
1382  *
1383  * On successful completion, the object memory will be moved back to VRAM.
1384  *
1385  * This is needed to for special handling of pinned VRAM object during
1386  * suspend-resume.
1387  *
1388  * Return: 0 on success. Negative error code on failure.
1389  */
1390 int xe_bo_restore_pinned(struct xe_bo *bo)
1391 {
1392 	struct ttm_operation_ctx ctx = {
1393 		.interruptible = false,
1394 		.gfp_retry_mayfail = false,
1395 	};
1396 	struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1397 	struct xe_bo *backup = bo->backup_obj;
1398 	bool unmap = false;
1399 	int ret;
1400 
1401 	if (!backup)
1402 		return 0;
1403 
1404 	xe_bo_lock(bo, false);
1405 
1406 	if (!xe_bo_is_pinned(backup)) {
1407 		ret = ttm_bo_validate(&backup->ttm, &backup->placement, &ctx);
1408 		if (ret)
1409 			goto out_unlock_bo;
1410 	}
1411 
1412 	if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
1413 		struct xe_migrate *migrate;
1414 		struct dma_fence *fence;
1415 
1416 		if (bo->tile)
1417 			migrate = bo->tile->migrate;
1418 		else
1419 			migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
1420 
1421 		ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
1422 		if (ret)
1423 			goto out_unlock_bo;
1424 
1425 		fence = xe_migrate_copy(migrate, backup, bo,
1426 					backup->ttm.resource, bo->ttm.resource,
1427 					false);
1428 		if (IS_ERR(fence)) {
1429 			ret = PTR_ERR(fence);
1430 			goto out_unlock_bo;
1431 		}
1432 
1433 		dma_resv_add_fence(bo->ttm.base.resv, fence,
1434 				   DMA_RESV_USAGE_KERNEL);
1435 		dma_fence_put(fence);
1436 	} else {
1437 		ret = xe_bo_vmap(backup);
1438 		if (ret)
1439 			goto out_unlock_bo;
1440 
1441 		if (iosys_map_is_null(&bo->vmap)) {
1442 			ret = xe_bo_vmap(bo);
1443 			if (ret)
1444 				goto out_backup;
1445 			unmap = true;
1446 		}
1447 
1448 		xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr,
1449 				 xe_bo_size(bo));
1450 	}
1451 
1452 	bo->backup_obj = NULL;
1453 
1454 out_backup:
1455 	xe_bo_vunmap(backup);
1456 	if (!bo->backup_obj) {
1457 		if (xe_bo_is_pinned(backup))
1458 			ttm_bo_unpin(&backup->ttm);
1459 		xe_bo_put(backup);
1460 	}
1461 out_unlock_bo:
1462 	if (unmap)
1463 		xe_bo_vunmap(bo);
1464 	xe_bo_unlock(bo);
1465 	return ret;
1466 }
1467 
1468 int xe_bo_dma_unmap_pinned(struct xe_bo *bo)
1469 {
1470 	struct ttm_buffer_object *ttm_bo = &bo->ttm;
1471 	struct ttm_tt *tt = ttm_bo->ttm;
1472 
1473 	if (tt) {
1474 		struct xe_ttm_tt *xe_tt = container_of(tt, typeof(*xe_tt), ttm);
1475 
1476 		if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
1477 			dma_buf_unmap_attachment(ttm_bo->base.import_attach,
1478 						 ttm_bo->sg,
1479 						 DMA_BIDIRECTIONAL);
1480 			ttm_bo->sg = NULL;
1481 			xe_tt->sg = NULL;
1482 		} else if (xe_tt->sg) {
1483 			dma_unmap_sgtable(ttm_to_xe_device(ttm_bo->bdev)->drm.dev,
1484 					  xe_tt->sg,
1485 					  DMA_BIDIRECTIONAL, 0);
1486 			sg_free_table(xe_tt->sg);
1487 			xe_tt->sg = NULL;
1488 		}
1489 	}
1490 
1491 	return 0;
1492 }
1493 
1494 static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
1495 				       unsigned long page_offset)
1496 {
1497 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1498 	struct xe_res_cursor cursor;
1499 	struct xe_vram_region *vram;
1500 
1501 	if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
1502 		return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT;
1503 
1504 	vram = res_to_mem_region(ttm_bo->resource);
1505 	xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
1506 	return (vram->io_start + cursor.start) >> PAGE_SHIFT;
1507 }
1508 
1509 static void __xe_bo_vunmap(struct xe_bo *bo);
1510 
1511 /*
1512  * TODO: Move this function to TTM so we don't rely on how TTM does its
1513  * locking, thereby abusing TTM internals.
1514  */
1515 static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
1516 {
1517 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1518 	bool locked;
1519 
1520 	xe_assert(xe, !kref_read(&ttm_bo->kref));
1521 
1522 	/*
1523 	 * We can typically only race with TTM trylocking under the
1524 	 * lru_lock, which will immediately be unlocked again since
1525 	 * the ttm_bo refcount is zero at this point. So trylocking *should*
1526 	 * always succeed here, as long as we hold the lru lock.
1527 	 */
1528 	spin_lock(&ttm_bo->bdev->lru_lock);
1529 	locked = dma_resv_trylock(ttm_bo->base.resv);
1530 	spin_unlock(&ttm_bo->bdev->lru_lock);
1531 	xe_assert(xe, locked);
1532 
1533 	return locked;
1534 }
1535 
1536 static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
1537 {
1538 	struct dma_resv_iter cursor;
1539 	struct dma_fence *fence;
1540 	struct dma_fence *replacement = NULL;
1541 	struct xe_bo *bo;
1542 
1543 	if (!xe_bo_is_xe_bo(ttm_bo))
1544 		return;
1545 
1546 	bo = ttm_to_xe_bo(ttm_bo);
1547 	xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
1548 
1549 	/*
1550 	 * Corner case where TTM fails to allocate memory and this BOs resv
1551 	 * still points the VMs resv
1552 	 */
1553 	if (ttm_bo->base.resv != &ttm_bo->base._resv)
1554 		return;
1555 
1556 	if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
1557 		return;
1558 
1559 	/*
1560 	 * Scrub the preempt fences if any. The unbind fence is already
1561 	 * attached to the resv.
1562 	 * TODO: Don't do this for external bos once we scrub them after
1563 	 * unbind.
1564 	 */
1565 	dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
1566 				DMA_RESV_USAGE_BOOKKEEP, fence) {
1567 		if (xe_fence_is_xe_preempt(fence) &&
1568 		    !dma_fence_is_signaled(fence)) {
1569 			if (!replacement)
1570 				replacement = dma_fence_get_stub();
1571 
1572 			dma_resv_replace_fences(ttm_bo->base.resv,
1573 						fence->context,
1574 						replacement,
1575 						DMA_RESV_USAGE_BOOKKEEP);
1576 		}
1577 	}
1578 	dma_fence_put(replacement);
1579 
1580 	dma_resv_unlock(ttm_bo->base.resv);
1581 }
1582 
1583 static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
1584 {
1585 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1586 
1587 	if (!xe_bo_is_xe_bo(ttm_bo))
1588 		return;
1589 
1590 	if (IS_VF_CCS_READY(ttm_to_xe_device(ttm_bo->bdev)))
1591 		xe_sriov_vf_ccs_detach_bo(bo);
1592 
1593 	/*
1594 	 * Object is idle and about to be destroyed. Release the
1595 	 * dma-buf attachment.
1596 	 */
1597 	if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
1598 		struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm,
1599 						       struct xe_ttm_tt, ttm);
1600 
1601 		dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg,
1602 					 DMA_BIDIRECTIONAL);
1603 		ttm_bo->sg = NULL;
1604 		xe_tt->sg = NULL;
1605 	}
1606 }
1607 
1608 static void xe_ttm_bo_purge(struct ttm_buffer_object *ttm_bo, struct ttm_operation_ctx *ctx)
1609 {
1610 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1611 
1612 	if (ttm_bo->ttm) {
1613 		struct ttm_placement place = {};
1614 		int ret = ttm_bo_validate(ttm_bo, &place, ctx);
1615 
1616 		drm_WARN_ON(&xe->drm, ret);
1617 	}
1618 }
1619 
1620 static void xe_ttm_bo_swap_notify(struct ttm_buffer_object *ttm_bo)
1621 {
1622 	struct ttm_operation_ctx ctx = {
1623 		.interruptible = false,
1624 		.gfp_retry_mayfail = false,
1625 	};
1626 
1627 	if (ttm_bo->ttm) {
1628 		struct xe_ttm_tt *xe_tt =
1629 			container_of(ttm_bo->ttm, struct xe_ttm_tt, ttm);
1630 
1631 		if (xe_tt->purgeable)
1632 			xe_ttm_bo_purge(ttm_bo, &ctx);
1633 	}
1634 }
1635 
1636 static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
1637 				unsigned long offset, void *buf, int len,
1638 				int write)
1639 {
1640 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1641 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1642 	struct iosys_map vmap;
1643 	struct xe_res_cursor cursor;
1644 	struct xe_vram_region *vram;
1645 	int bytes_left = len;
1646 	int err = 0;
1647 
1648 	xe_bo_assert_held(bo);
1649 	xe_device_assert_mem_access(xe);
1650 
1651 	if (!mem_type_is_vram(ttm_bo->resource->mem_type))
1652 		return -EIO;
1653 
1654 	if (!xe_bo_is_visible_vram(bo) || len >= SZ_16K) {
1655 		struct xe_migrate *migrate =
1656 			mem_type_to_migrate(xe, ttm_bo->resource->mem_type);
1657 
1658 		err = xe_migrate_access_memory(migrate, bo, offset, buf, len,
1659 					       write);
1660 		goto out;
1661 	}
1662 
1663 	vram = res_to_mem_region(ttm_bo->resource);
1664 	xe_res_first(ttm_bo->resource, offset & PAGE_MASK,
1665 		     xe_bo_size(bo) - (offset & PAGE_MASK), &cursor);
1666 
1667 	do {
1668 		unsigned long page_offset = (offset & ~PAGE_MASK);
1669 		int byte_count = min((int)(PAGE_SIZE - page_offset), bytes_left);
1670 
1671 		iosys_map_set_vaddr_iomem(&vmap, (u8 __iomem *)vram->mapping +
1672 					  cursor.start);
1673 		if (write)
1674 			xe_map_memcpy_to(xe, &vmap, page_offset, buf, byte_count);
1675 		else
1676 			xe_map_memcpy_from(xe, buf, &vmap, page_offset, byte_count);
1677 
1678 		buf += byte_count;
1679 		offset += byte_count;
1680 		bytes_left -= byte_count;
1681 		if (bytes_left)
1682 			xe_res_next(&cursor, PAGE_SIZE);
1683 	} while (bytes_left);
1684 
1685 out:
1686 	return err ?: len;
1687 }
1688 
1689 const struct ttm_device_funcs xe_ttm_funcs = {
1690 	.ttm_tt_create = xe_ttm_tt_create,
1691 	.ttm_tt_populate = xe_ttm_tt_populate,
1692 	.ttm_tt_unpopulate = xe_ttm_tt_unpopulate,
1693 	.ttm_tt_destroy = xe_ttm_tt_destroy,
1694 	.evict_flags = xe_evict_flags,
1695 	.move = xe_bo_move,
1696 	.io_mem_reserve = xe_ttm_io_mem_reserve,
1697 	.io_mem_pfn = xe_ttm_io_mem_pfn,
1698 	.access_memory = xe_ttm_access_memory,
1699 	.release_notify = xe_ttm_bo_release_notify,
1700 	.eviction_valuable = xe_bo_eviction_valuable,
1701 	.delete_mem_notify = xe_ttm_bo_delete_mem_notify,
1702 	.swap_notify = xe_ttm_bo_swap_notify,
1703 };
1704 
1705 static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
1706 {
1707 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1708 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1709 	struct xe_tile *tile;
1710 	u8 id;
1711 
1712 	if (bo->ttm.base.import_attach)
1713 		drm_prime_gem_destroy(&bo->ttm.base, NULL);
1714 	drm_gem_object_release(&bo->ttm.base);
1715 
1716 	xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
1717 
1718 	for_each_tile(tile, xe, id)
1719 		if (bo->ggtt_node[id] && bo->ggtt_node[id]->base.size)
1720 			xe_ggtt_remove_bo(tile->mem.ggtt, bo);
1721 
1722 #ifdef CONFIG_PROC_FS
1723 	if (bo->client)
1724 		xe_drm_client_remove_bo(bo);
1725 #endif
1726 
1727 	if (bo->vm && xe_bo_is_user(bo))
1728 		xe_vm_put(bo->vm);
1729 
1730 	if (bo->parent_obj)
1731 		xe_bo_put(bo->parent_obj);
1732 
1733 	mutex_lock(&xe->mem_access.vram_userfault.lock);
1734 	if (!list_empty(&bo->vram_userfault_link))
1735 		list_del(&bo->vram_userfault_link);
1736 	mutex_unlock(&xe->mem_access.vram_userfault.lock);
1737 
1738 	kfree(bo);
1739 }
1740 
1741 static void xe_gem_object_free(struct drm_gem_object *obj)
1742 {
1743 	/* Our BO reference counting scheme works as follows:
1744 	 *
1745 	 * The gem object kref is typically used throughout the driver,
1746 	 * and the gem object holds a ttm_buffer_object refcount, so
1747 	 * that when the last gem object reference is put, which is when
1748 	 * we end up in this function, we put also that ttm_buffer_object
1749 	 * refcount. Anything using gem interfaces is then no longer
1750 	 * allowed to access the object in a way that requires a gem
1751 	 * refcount, including locking the object.
1752 	 *
1753 	 * driver ttm callbacks is allowed to use the ttm_buffer_object
1754 	 * refcount directly if needed.
1755 	 */
1756 	__xe_bo_vunmap(gem_to_xe_bo(obj));
1757 	ttm_bo_put(container_of(obj, struct ttm_buffer_object, base));
1758 }
1759 
1760 static void xe_gem_object_close(struct drm_gem_object *obj,
1761 				struct drm_file *file_priv)
1762 {
1763 	struct xe_bo *bo = gem_to_xe_bo(obj);
1764 
1765 	if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) {
1766 		xe_assert(xe_bo_device(bo), xe_bo_is_user(bo));
1767 
1768 		xe_bo_lock(bo, false);
1769 		ttm_bo_set_bulk_move(&bo->ttm, NULL);
1770 		xe_bo_unlock(bo);
1771 	}
1772 }
1773 
1774 static bool should_migrate_to_smem(struct xe_bo *bo)
1775 {
1776 	/*
1777 	 * NOTE: The following atomic checks are platform-specific. For example,
1778 	 * if a device supports CXL atomics, these may not be necessary or
1779 	 * may behave differently.
1780 	 */
1781 
1782 	return bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL ||
1783 	       bo->attr.atomic_access == DRM_XE_ATOMIC_CPU;
1784 }
1785 
1786 static int xe_bo_wait_usage_kernel(struct xe_bo *bo, struct ttm_operation_ctx *ctx)
1787 {
1788 	long lerr;
1789 
1790 	if (ctx->no_wait_gpu)
1791 		return dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL) ?
1792 			0 : -EBUSY;
1793 
1794 	lerr = dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
1795 				     ctx->interruptible, MAX_SCHEDULE_TIMEOUT);
1796 	if (lerr < 0)
1797 		return lerr;
1798 	if (lerr == 0)
1799 		return -EBUSY;
1800 
1801 	return 0;
1802 }
1803 
1804 /* Populate the bo if swapped out, or migrate if the access mode requires that. */
1805 static int xe_bo_fault_migrate(struct xe_bo *bo, struct ttm_operation_ctx *ctx,
1806 			       struct drm_exec *exec)
1807 {
1808 	struct ttm_buffer_object *tbo = &bo->ttm;
1809 	int err = 0;
1810 
1811 	if (ttm_manager_type(tbo->bdev, tbo->resource->mem_type)->use_tt) {
1812 		err = xe_bo_wait_usage_kernel(bo, ctx);
1813 		if (!err)
1814 			err = ttm_bo_populate(&bo->ttm, ctx);
1815 	} else if (should_migrate_to_smem(bo)) {
1816 		xe_assert(xe_bo_device(bo), bo->flags & XE_BO_FLAG_SYSTEM);
1817 		err = xe_bo_migrate(bo, XE_PL_TT, ctx, exec);
1818 	}
1819 
1820 	return err;
1821 }
1822 
1823 /* Call into TTM to populate PTEs, and register bo for PTE removal on runtime suspend. */
1824 static vm_fault_t __xe_bo_cpu_fault(struct vm_fault *vmf, struct xe_device *xe, struct xe_bo *bo)
1825 {
1826 	vm_fault_t ret;
1827 
1828 	trace_xe_bo_cpu_fault(bo);
1829 
1830 	ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
1831 				       TTM_BO_VM_NUM_PREFAULT);
1832 	/*
1833 	 * When TTM is actually called to insert PTEs, ensure no blocking conditions
1834 	 * remain, in which case TTM may drop locks and return VM_FAULT_RETRY.
1835 	 */
1836 	xe_assert(xe, ret != VM_FAULT_RETRY);
1837 
1838 	if (ret == VM_FAULT_NOPAGE &&
1839 	    mem_type_is_vram(bo->ttm.resource->mem_type)) {
1840 		mutex_lock(&xe->mem_access.vram_userfault.lock);
1841 		if (list_empty(&bo->vram_userfault_link))
1842 			list_add(&bo->vram_userfault_link,
1843 				 &xe->mem_access.vram_userfault.list);
1844 		mutex_unlock(&xe->mem_access.vram_userfault.lock);
1845 	}
1846 
1847 	return ret;
1848 }
1849 
1850 static vm_fault_t xe_err_to_fault_t(int err)
1851 {
1852 	switch (err) {
1853 	case 0:
1854 	case -EINTR:
1855 	case -ERESTARTSYS:
1856 	case -EAGAIN:
1857 		return VM_FAULT_NOPAGE;
1858 	case -ENOMEM:
1859 	case -ENOSPC:
1860 		return VM_FAULT_OOM;
1861 	default:
1862 		break;
1863 	}
1864 	return VM_FAULT_SIGBUS;
1865 }
1866 
1867 static bool xe_ttm_bo_is_imported(struct ttm_buffer_object *tbo)
1868 {
1869 	dma_resv_assert_held(tbo->base.resv);
1870 
1871 	return tbo->ttm &&
1872 		(tbo->ttm->page_flags & (TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE)) ==
1873 		TTM_TT_FLAG_EXTERNAL;
1874 }
1875 
1876 static vm_fault_t xe_bo_cpu_fault_fastpath(struct vm_fault *vmf, struct xe_device *xe,
1877 					   struct xe_bo *bo, bool needs_rpm)
1878 {
1879 	struct ttm_buffer_object *tbo = &bo->ttm;
1880 	vm_fault_t ret = VM_FAULT_RETRY;
1881 	struct xe_validation_ctx ctx;
1882 	struct ttm_operation_ctx tctx = {
1883 		.interruptible = true,
1884 		.no_wait_gpu = true,
1885 		.gfp_retry_mayfail = true,
1886 
1887 	};
1888 	int err;
1889 
1890 	if (needs_rpm && !xe_pm_runtime_get_if_active(xe))
1891 		return VM_FAULT_RETRY;
1892 
1893 	err = xe_validation_ctx_init(&ctx, &xe->val, NULL,
1894 				     (struct xe_val_flags) {
1895 					     .interruptible = true,
1896 					     .no_block = true
1897 				     });
1898 	if (err)
1899 		goto out_pm;
1900 
1901 	if (!dma_resv_trylock(tbo->base.resv))
1902 		goto out_validation;
1903 
1904 	if (xe_ttm_bo_is_imported(tbo)) {
1905 		ret = VM_FAULT_SIGBUS;
1906 		drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n");
1907 		goto out_unlock;
1908 	}
1909 
1910 	err = xe_bo_fault_migrate(bo, &tctx, NULL);
1911 	if (err) {
1912 		/* Return VM_FAULT_RETRY on these errors. */
1913 		if (err != -ENOMEM && err != -ENOSPC && err != -EBUSY)
1914 			ret = xe_err_to_fault_t(err);
1915 		goto out_unlock;
1916 	}
1917 
1918 	if (dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL))
1919 		ret = __xe_bo_cpu_fault(vmf, xe, bo);
1920 
1921 out_unlock:
1922 	dma_resv_unlock(tbo->base.resv);
1923 out_validation:
1924 	xe_validation_ctx_fini(&ctx);
1925 out_pm:
1926 	if (needs_rpm)
1927 		xe_pm_runtime_put(xe);
1928 
1929 	return ret;
1930 }
1931 
1932 static vm_fault_t xe_bo_cpu_fault(struct vm_fault *vmf)
1933 {
1934 	struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
1935 	struct drm_device *ddev = tbo->base.dev;
1936 	struct xe_device *xe = to_xe_device(ddev);
1937 	struct xe_bo *bo = ttm_to_xe_bo(tbo);
1938 	bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
1939 	bool retry_after_wait = false;
1940 	struct xe_validation_ctx ctx;
1941 	struct drm_exec exec;
1942 	vm_fault_t ret;
1943 	int err = 0;
1944 	int idx;
1945 
1946 	if (!drm_dev_enter(&xe->drm, &idx))
1947 		return ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
1948 
1949 	ret = xe_bo_cpu_fault_fastpath(vmf, xe, bo, needs_rpm);
1950 	if (ret != VM_FAULT_RETRY)
1951 		goto out;
1952 
1953 	if (fault_flag_allow_retry_first(vmf->flags)) {
1954 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
1955 			goto out;
1956 		retry_after_wait = true;
1957 		xe_bo_get(bo);
1958 		mmap_read_unlock(vmf->vma->vm_mm);
1959 	} else {
1960 		ret = VM_FAULT_NOPAGE;
1961 	}
1962 
1963 	/*
1964 	 * The fastpath failed and we were not required to return and retry immediately.
1965 	 * We're now running in one of two modes:
1966 	 *
1967 	 * 1) retry_after_wait == true: The mmap_read_lock() is dropped, and we're trying
1968 	 * to resolve blocking waits. But we can't resolve the fault since the
1969 	 * mmap_read_lock() is dropped. After retrying the fault, the aim is that the fastpath
1970 	 * should succeed. But it may fail since we drop the bo lock.
1971 	 *
1972 	 * 2) retry_after_wait == false: The fastpath failed, typically even after
1973 	 * a retry. Do whatever's necessary to resolve the fault.
1974 	 *
1975 	 * This construct is recommended to avoid excessive waits under the mmap_lock.
1976 	 */
1977 
1978 	if (needs_rpm)
1979 		xe_pm_runtime_get(xe);
1980 
1981 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
1982 			    err) {
1983 		struct ttm_operation_ctx tctx = {
1984 			.interruptible = true,
1985 			.no_wait_gpu = false,
1986 			.gfp_retry_mayfail = retry_after_wait,
1987 		};
1988 
1989 		err = drm_exec_lock_obj(&exec, &tbo->base);
1990 		drm_exec_retry_on_contention(&exec);
1991 		if (err)
1992 			break;
1993 
1994 		if (xe_ttm_bo_is_imported(tbo)) {
1995 			err = -EFAULT;
1996 			drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n");
1997 			break;
1998 		}
1999 
2000 		err = xe_bo_fault_migrate(bo, &tctx, &exec);
2001 		if (err) {
2002 			drm_exec_retry_on_contention(&exec);
2003 			xe_validation_retry_on_oom(&ctx, &err);
2004 			break;
2005 		}
2006 
2007 		err = xe_bo_wait_usage_kernel(bo, &tctx);
2008 		if (err)
2009 			break;
2010 
2011 		if (!retry_after_wait)
2012 			ret = __xe_bo_cpu_fault(vmf, xe, bo);
2013 	}
2014 	/* if retry_after_wait == true, we *must* return VM_FAULT_RETRY. */
2015 	if (err && !retry_after_wait)
2016 		ret = xe_err_to_fault_t(err);
2017 
2018 	if (needs_rpm)
2019 		xe_pm_runtime_put(xe);
2020 
2021 	if (retry_after_wait)
2022 		xe_bo_put(bo);
2023 out:
2024 	drm_dev_exit(idx);
2025 
2026 	return ret;
2027 }
2028 
2029 static int xe_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
2030 			   void *buf, int len, int write)
2031 {
2032 	struct ttm_buffer_object *ttm_bo = vma->vm_private_data;
2033 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
2034 	struct xe_device *xe = xe_bo_device(bo);
2035 	int ret;
2036 
2037 	xe_pm_runtime_get(xe);
2038 	ret = ttm_bo_vm_access(vma, addr, buf, len, write);
2039 	xe_pm_runtime_put(xe);
2040 
2041 	return ret;
2042 }
2043 
2044 /**
2045  * xe_bo_read() - Read from an xe_bo
2046  * @bo: The buffer object to read from.
2047  * @offset: The byte offset to start reading from.
2048  * @dst: Location to store the read.
2049  * @size: Size in bytes for the read.
2050  *
2051  * Read @size bytes from the @bo, starting from @offset, storing into @dst.
2052  *
2053  * Return: Zero on success, or negative error.
2054  */
2055 int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size)
2056 {
2057 	int ret;
2058 
2059 	ret = ttm_bo_access(&bo->ttm, offset, dst, size, 0);
2060 	if (ret >= 0 && ret != size)
2061 		ret = -EIO;
2062 	else if (ret == size)
2063 		ret = 0;
2064 
2065 	return ret;
2066 }
2067 
2068 static const struct vm_operations_struct xe_gem_vm_ops = {
2069 	.fault = xe_bo_cpu_fault,
2070 	.open = ttm_bo_vm_open,
2071 	.close = ttm_bo_vm_close,
2072 	.access = xe_bo_vm_access,
2073 };
2074 
2075 static const struct drm_gem_object_funcs xe_gem_object_funcs = {
2076 	.free = xe_gem_object_free,
2077 	.close = xe_gem_object_close,
2078 	.mmap = drm_gem_ttm_mmap,
2079 	.export = xe_gem_prime_export,
2080 	.vm_ops = &xe_gem_vm_ops,
2081 };
2082 
2083 /**
2084  * xe_bo_alloc - Allocate storage for a struct xe_bo
2085  *
2086  * This function is intended to allocate storage to be used for input
2087  * to __xe_bo_create_locked(), in the case a pointer to the bo to be
2088  * created is needed before the call to __xe_bo_create_locked().
2089  * If __xe_bo_create_locked ends up never to be called, then the
2090  * storage allocated with this function needs to be freed using
2091  * xe_bo_free().
2092  *
2093  * Return: A pointer to an uninitialized struct xe_bo on success,
2094  * ERR_PTR(-ENOMEM) on error.
2095  */
2096 struct xe_bo *xe_bo_alloc(void)
2097 {
2098 	struct xe_bo *bo = kzalloc(sizeof(*bo), GFP_KERNEL);
2099 
2100 	if (!bo)
2101 		return ERR_PTR(-ENOMEM);
2102 
2103 	return bo;
2104 }
2105 
2106 /**
2107  * xe_bo_free - Free storage allocated using xe_bo_alloc()
2108  * @bo: The buffer object storage.
2109  *
2110  * Refer to xe_bo_alloc() documentation for valid use-cases.
2111  */
2112 void xe_bo_free(struct xe_bo *bo)
2113 {
2114 	kfree(bo);
2115 }
2116 
2117 /**
2118  * xe_bo_init_locked() - Initialize or create an xe_bo.
2119  * @xe: The xe device.
2120  * @bo: An already allocated buffer object or NULL
2121  * if the function should allocate a new one.
2122  * @tile: The tile to select for migration of this bo, and the tile used for
2123  * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2124  * @resv: Pointer to a locked shared reservation object to use for this bo,
2125  * or NULL for the xe_bo to use its own.
2126  * @bulk: The bulk move to use for LRU bumping, or NULL for external bos.
2127  * @size: The storage size to use for the bo.
2128  * @cpu_caching: The cpu caching used for system memory backing store.
2129  * @type: The TTM buffer object type.
2130  * @flags: XE_BO_FLAG_ flags.
2131  * @exec: The drm_exec transaction to use for exhaustive eviction.
2132  *
2133  * Initialize or create an xe buffer object. On failure, any allocated buffer
2134  * object passed in @bo will have been unreferenced.
2135  *
2136  * Return: The buffer object on success. Negative error pointer on failure.
2137  */
2138 struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo,
2139 				struct xe_tile *tile, struct dma_resv *resv,
2140 				struct ttm_lru_bulk_move *bulk, size_t size,
2141 				u16 cpu_caching, enum ttm_bo_type type,
2142 				u32 flags, struct drm_exec *exec)
2143 {
2144 	struct ttm_operation_ctx ctx = {
2145 		.interruptible = true,
2146 		.no_wait_gpu = false,
2147 		.gfp_retry_mayfail = true,
2148 	};
2149 	struct ttm_placement *placement;
2150 	uint32_t alignment;
2151 	size_t aligned_size;
2152 	int err;
2153 
2154 	/* Only kernel objects should set GT */
2155 	xe_assert(xe, !tile || type == ttm_bo_type_kernel);
2156 
2157 	if (XE_WARN_ON(!size)) {
2158 		xe_bo_free(bo);
2159 		return ERR_PTR(-EINVAL);
2160 	}
2161 
2162 	/* XE_BO_FLAG_GGTTx requires XE_BO_FLAG_GGTT also be set */
2163 	if ((flags & XE_BO_FLAG_GGTT_ALL) && !(flags & XE_BO_FLAG_GGTT))
2164 		return ERR_PTR(-EINVAL);
2165 
2166 	if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
2167 	    !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
2168 	    ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
2169 	     (flags & (XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_NEEDS_2M)))) {
2170 		size_t align = flags & XE_BO_FLAG_NEEDS_2M ? SZ_2M : SZ_64K;
2171 
2172 		aligned_size = ALIGN(size, align);
2173 		if (type != ttm_bo_type_device)
2174 			size = ALIGN(size, align);
2175 		flags |= XE_BO_FLAG_INTERNAL_64K;
2176 		alignment = align >> PAGE_SHIFT;
2177 	} else {
2178 		aligned_size = ALIGN(size, SZ_4K);
2179 		flags &= ~XE_BO_FLAG_INTERNAL_64K;
2180 		alignment = SZ_4K >> PAGE_SHIFT;
2181 	}
2182 
2183 	if (type == ttm_bo_type_device && aligned_size != size)
2184 		return ERR_PTR(-EINVAL);
2185 
2186 	if (!bo) {
2187 		bo = xe_bo_alloc();
2188 		if (IS_ERR(bo))
2189 			return bo;
2190 	}
2191 
2192 	bo->ccs_cleared = false;
2193 	bo->tile = tile;
2194 	bo->flags = flags;
2195 	bo->cpu_caching = cpu_caching;
2196 	bo->ttm.base.funcs = &xe_gem_object_funcs;
2197 	bo->ttm.priority = XE_BO_PRIORITY_NORMAL;
2198 	INIT_LIST_HEAD(&bo->pinned_link);
2199 #ifdef CONFIG_PROC_FS
2200 	INIT_LIST_HEAD(&bo->client_link);
2201 #endif
2202 	INIT_LIST_HEAD(&bo->vram_userfault_link);
2203 
2204 	drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
2205 
2206 	if (resv) {
2207 		ctx.allow_res_evict = !(flags & XE_BO_FLAG_NO_RESV_EVICT);
2208 		ctx.resv = resv;
2209 	}
2210 
2211 	xe_validation_assert_exec(xe, exec, &bo->ttm.base);
2212 	if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) {
2213 		err = __xe_bo_placement_for_flags(xe, bo, bo->flags, type);
2214 		if (WARN_ON(err)) {
2215 			xe_ttm_bo_destroy(&bo->ttm);
2216 			return ERR_PTR(err);
2217 		}
2218 	}
2219 
2220 	/* Defer populating type_sg bos */
2221 	placement = (type == ttm_bo_type_sg ||
2222 		     bo->flags & XE_BO_FLAG_DEFER_BACKING) ? &sys_placement :
2223 		&bo->placement;
2224 	err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
2225 				   placement, alignment,
2226 				   &ctx, NULL, resv, xe_ttm_bo_destroy);
2227 	if (err)
2228 		return ERR_PTR(err);
2229 
2230 	/*
2231 	 * The VRAM pages underneath are potentially still being accessed by the
2232 	 * GPU, as per async GPU clearing and async evictions. However TTM makes
2233 	 * sure to add any corresponding move/clear fences into the objects
2234 	 * dma-resv using the DMA_RESV_USAGE_KERNEL slot.
2235 	 *
2236 	 * For KMD internal buffers we don't care about GPU clearing, however we
2237 	 * still need to handle async evictions, where the VRAM is still being
2238 	 * accessed by the GPU. Most internal callers are not expecting this,
2239 	 * since they are missing the required synchronisation before accessing
2240 	 * the memory. To keep things simple just sync wait any kernel fences
2241 	 * here, if the buffer is designated KMD internal.
2242 	 *
2243 	 * For normal userspace objects we should already have the required
2244 	 * pipelining or sync waiting elsewhere, since we already have to deal
2245 	 * with things like async GPU clearing.
2246 	 */
2247 	if (type == ttm_bo_type_kernel) {
2248 		long timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
2249 						     DMA_RESV_USAGE_KERNEL,
2250 						     ctx.interruptible,
2251 						     MAX_SCHEDULE_TIMEOUT);
2252 
2253 		if (timeout < 0) {
2254 			if (!resv)
2255 				dma_resv_unlock(bo->ttm.base.resv);
2256 			xe_bo_put(bo);
2257 			return ERR_PTR(timeout);
2258 		}
2259 	}
2260 
2261 	bo->created = true;
2262 	if (bulk)
2263 		ttm_bo_set_bulk_move(&bo->ttm, bulk);
2264 	else
2265 		ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
2266 
2267 	return bo;
2268 }
2269 
2270 static int __xe_bo_fixed_placement(struct xe_device *xe,
2271 				   struct xe_bo *bo, enum ttm_bo_type type,
2272 				   u32 flags,
2273 				   u64 start, u64 end, u64 size)
2274 {
2275 	struct ttm_place *place = bo->placements;
2276 	u32 vram_flag, vram_stolen_flags;
2277 
2278 	/*
2279 	 * to allow fixed placement in GGTT of a VF, post-migration fixups would have to
2280 	 * include selecting a new fixed offset and shifting the page ranges for it
2281 	 */
2282 	xe_assert(xe, !IS_SRIOV_VF(xe) || !(bo->flags & XE_BO_FLAG_GGTT));
2283 
2284 	if (flags & (XE_BO_FLAG_USER | XE_BO_FLAG_SYSTEM))
2285 		return -EINVAL;
2286 
2287 	vram_flag = flags & XE_BO_FLAG_VRAM_MASK;
2288 	vram_stolen_flags = (flags & (XE_BO_FLAG_STOLEN)) | vram_flag;
2289 
2290 	/* check if more than one VRAM/STOLEN flag is set */
2291 	if (hweight32(vram_stolen_flags) > 1)
2292 		return -EINVAL;
2293 
2294 	place->flags = TTM_PL_FLAG_CONTIGUOUS;
2295 	place->fpfn = start >> PAGE_SHIFT;
2296 	place->lpfn = end >> PAGE_SHIFT;
2297 
2298 	if (flags & XE_BO_FLAG_STOLEN)
2299 		place->mem_type = XE_PL_STOLEN;
2300 	else
2301 		place->mem_type = bo_vram_flags_to_vram_placement(xe, flags, vram_flag, type);
2302 
2303 	bo->placement = (struct ttm_placement) {
2304 		.num_placement = 1,
2305 		.placement = place,
2306 	};
2307 
2308 	return 0;
2309 }
2310 
2311 static struct xe_bo *
2312 __xe_bo_create_locked(struct xe_device *xe,
2313 		      struct xe_tile *tile, struct xe_vm *vm,
2314 		      size_t size, u64 start, u64 end,
2315 		      u16 cpu_caching, enum ttm_bo_type type, u32 flags,
2316 		      u64 alignment, struct drm_exec *exec)
2317 {
2318 	struct xe_bo *bo = NULL;
2319 	int err;
2320 
2321 	if (vm)
2322 		xe_vm_assert_held(vm);
2323 
2324 	if (start || end != ~0ULL) {
2325 		bo = xe_bo_alloc();
2326 		if (IS_ERR(bo))
2327 			return bo;
2328 
2329 		flags |= XE_BO_FLAG_FIXED_PLACEMENT;
2330 		err = __xe_bo_fixed_placement(xe, bo, type, flags, start, end, size);
2331 		if (err) {
2332 			xe_bo_free(bo);
2333 			return ERR_PTR(err);
2334 		}
2335 	}
2336 
2337 	bo = xe_bo_init_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
2338 			       vm && !xe_vm_in_fault_mode(vm) &&
2339 			       flags & XE_BO_FLAG_USER ?
2340 			       &vm->lru_bulk_move : NULL, size,
2341 			       cpu_caching, type, flags, exec);
2342 	if (IS_ERR(bo))
2343 		return bo;
2344 
2345 	bo->min_align = alignment;
2346 
2347 	/*
2348 	 * Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
2349 	 * to ensure the shared resv doesn't disappear under the bo, the bo
2350 	 * will keep a reference to the vm, and avoid circular references
2351 	 * by having all the vm's bo refereferences released at vm close
2352 	 * time.
2353 	 */
2354 	if (vm && xe_bo_is_user(bo))
2355 		xe_vm_get(vm);
2356 	bo->vm = vm;
2357 
2358 	if (bo->flags & XE_BO_FLAG_GGTT) {
2359 		struct xe_tile *t;
2360 		u8 id;
2361 
2362 		if (!(bo->flags & XE_BO_FLAG_GGTT_ALL)) {
2363 			if (!tile && flags & XE_BO_FLAG_STOLEN)
2364 				tile = xe_device_get_root_tile(xe);
2365 
2366 			xe_assert(xe, tile);
2367 		}
2368 
2369 		for_each_tile(t, xe, id) {
2370 			if (t != tile && !(bo->flags & XE_BO_FLAG_GGTTx(t)))
2371 				continue;
2372 
2373 			if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
2374 				err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo,
2375 							   start + xe_bo_size(bo), U64_MAX,
2376 							   exec);
2377 			} else {
2378 				err = xe_ggtt_insert_bo(t->mem.ggtt, bo, exec);
2379 			}
2380 			if (err)
2381 				goto err_unlock_put_bo;
2382 		}
2383 	}
2384 
2385 	trace_xe_bo_create(bo);
2386 	return bo;
2387 
2388 err_unlock_put_bo:
2389 	__xe_bo_unset_bulk_move(bo);
2390 	xe_bo_unlock_vm_held(bo);
2391 	xe_bo_put(bo);
2392 	return ERR_PTR(err);
2393 }
2394 
2395 /**
2396  * xe_bo_create_locked() - Create a BO
2397  * @xe: The xe device.
2398  * @tile: The tile to select for migration of this bo, and the tile used for
2399  * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2400  * @vm: The local vm or NULL for external objects.
2401  * @size: The storage size to use for the bo.
2402  * @type: The TTM buffer object type.
2403  * @flags: XE_BO_FLAG_ flags.
2404  * @exec: The drm_exec transaction to use for exhaustive eviction.
2405  *
2406  * Create a locked xe BO with no range- nor alignment restrictions.
2407  *
2408  * Return: The buffer object on success. Negative error pointer on failure.
2409  */
2410 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
2411 				  struct xe_vm *vm, size_t size,
2412 				  enum ttm_bo_type type, u32 flags,
2413 				  struct drm_exec *exec)
2414 {
2415 	return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type,
2416 				     flags, 0, exec);
2417 }
2418 
2419 static struct xe_bo *xe_bo_create_novm(struct xe_device *xe, struct xe_tile *tile,
2420 				       size_t size, u16 cpu_caching,
2421 				       enum ttm_bo_type type, u32 flags,
2422 				       u64 alignment, bool intr)
2423 {
2424 	struct xe_validation_ctx ctx;
2425 	struct drm_exec exec;
2426 	struct xe_bo *bo;
2427 	int ret = 0;
2428 
2429 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr},
2430 			    ret) {
2431 		bo = __xe_bo_create_locked(xe, tile, NULL, size, 0, ~0ULL,
2432 					   cpu_caching, type, flags, alignment, &exec);
2433 		drm_exec_retry_on_contention(&exec);
2434 		if (IS_ERR(bo)) {
2435 			ret = PTR_ERR(bo);
2436 			xe_validation_retry_on_oom(&ctx, &ret);
2437 		} else {
2438 			xe_bo_unlock(bo);
2439 		}
2440 	}
2441 
2442 	return ret ? ERR_PTR(ret) : bo;
2443 }
2444 
2445 /**
2446  * xe_bo_create_user() - Create a user BO
2447  * @xe: The xe device.
2448  * @vm: The local vm or NULL for external objects.
2449  * @size: The storage size to use for the bo.
2450  * @cpu_caching: The caching mode to be used for system backing store.
2451  * @flags: XE_BO_FLAG_ flags.
2452  * @exec: The drm_exec transaction to use for exhaustive eviction, or NULL
2453  * if such a transaction should be initiated by the call.
2454  *
2455  * Create a bo on behalf of user-space.
2456  *
2457  * Return: The buffer object on success. Negative error pointer on failure.
2458  */
2459 struct xe_bo *xe_bo_create_user(struct xe_device *xe,
2460 				struct xe_vm *vm, size_t size,
2461 				u16 cpu_caching,
2462 				u32 flags, struct drm_exec *exec)
2463 {
2464 	struct xe_bo *bo;
2465 
2466 	flags |= XE_BO_FLAG_USER;
2467 
2468 	if (vm || exec) {
2469 		xe_assert(xe, exec);
2470 		bo = __xe_bo_create_locked(xe, NULL, vm, size, 0, ~0ULL,
2471 					   cpu_caching, ttm_bo_type_device,
2472 					   flags, 0, exec);
2473 		if (!IS_ERR(bo))
2474 			xe_bo_unlock_vm_held(bo);
2475 	} else {
2476 		bo = xe_bo_create_novm(xe, NULL, size, cpu_caching,
2477 				       ttm_bo_type_device, flags, 0, true);
2478 	}
2479 
2480 	return bo;
2481 }
2482 
2483 /**
2484  * xe_bo_create_pin_range_novm() - Create and pin a BO with range options.
2485  * @xe: The xe device.
2486  * @tile: The tile to select for migration of this bo, and the tile used for
2487  * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2488  * @size: The storage size to use for the bo.
2489  * @start: Start of fixed VRAM range or 0.
2490  * @end: End of fixed VRAM range or ~0ULL.
2491  * @type: The TTM buffer object type.
2492  * @flags: XE_BO_FLAG_ flags.
2493  *
2494  * Create an Xe BO with range- and options. If @start and @end indicate
2495  * a fixed VRAM range, this must be a ttm_bo_type_kernel bo with VRAM placement
2496  * only.
2497  *
2498  * Return: The buffer object on success. Negative error pointer on failure.
2499  */
2500 struct xe_bo *xe_bo_create_pin_range_novm(struct xe_device *xe, struct xe_tile *tile,
2501 					  size_t size, u64 start, u64 end,
2502 					  enum ttm_bo_type type, u32 flags)
2503 {
2504 	struct xe_validation_ctx ctx;
2505 	struct drm_exec exec;
2506 	struct xe_bo *bo;
2507 	int err = 0;
2508 
2509 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
2510 		bo = __xe_bo_create_locked(xe, tile, NULL, size, start, end,
2511 					   0, type, flags, 0, &exec);
2512 		if (IS_ERR(bo)) {
2513 			drm_exec_retry_on_contention(&exec);
2514 			err = PTR_ERR(bo);
2515 			xe_validation_retry_on_oom(&ctx, &err);
2516 			break;
2517 		}
2518 
2519 		err = xe_bo_pin(bo, &exec);
2520 		xe_bo_unlock(bo);
2521 		if (err) {
2522 			xe_bo_put(bo);
2523 			drm_exec_retry_on_contention(&exec);
2524 			xe_validation_retry_on_oom(&ctx, &err);
2525 			break;
2526 		}
2527 	}
2528 
2529 	return err ? ERR_PTR(err) : bo;
2530 }
2531 
2532 static struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
2533 						     struct xe_tile *tile,
2534 						     struct xe_vm *vm,
2535 						     size_t size, u64 offset,
2536 						     enum ttm_bo_type type, u32 flags,
2537 						     u64 alignment, struct drm_exec *exec)
2538 {
2539 	struct xe_bo *bo;
2540 	int err;
2541 	u64 start = offset == ~0ull ? 0 : offset;
2542 	u64 end = offset == ~0ull ? ~0ull : start + size;
2543 
2544 	if (flags & XE_BO_FLAG_STOLEN &&
2545 	    xe_ttm_stolen_cpu_access_needs_ggtt(xe))
2546 		flags |= XE_BO_FLAG_GGTT;
2547 
2548 	bo = __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
2549 				   flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED,
2550 				   alignment, exec);
2551 	if (IS_ERR(bo))
2552 		return bo;
2553 
2554 	err = xe_bo_pin(bo, exec);
2555 	if (err)
2556 		goto err_put;
2557 
2558 	err = xe_bo_vmap(bo);
2559 	if (err)
2560 		goto err_unpin;
2561 
2562 	xe_bo_unlock_vm_held(bo);
2563 
2564 	return bo;
2565 
2566 err_unpin:
2567 	xe_bo_unpin(bo);
2568 err_put:
2569 	xe_bo_unlock_vm_held(bo);
2570 	xe_bo_put(bo);
2571 	return ERR_PTR(err);
2572 }
2573 
2574 /**
2575  * xe_bo_create_pin_map_at_novm() - Create pinned and mapped bo at optional VRAM offset
2576  * @xe: The xe device.
2577  * @tile: The tile to select for migration of this bo, and the tile used for
2578  * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2579  * @size: The storage size to use for the bo.
2580  * @offset: Optional VRAM offset or %~0ull for don't care.
2581  * @type: The TTM buffer object type.
2582  * @flags: XE_BO_FLAG_ flags.
2583  * @alignment: GGTT alignment.
2584  * @intr: Whether to execute any waits for backing store interruptible.
2585  *
2586  * Create a pinned and optionally mapped bo with VRAM offset and GGTT alignment
2587  * options. The bo will be external and not associated with a VM.
2588  *
2589  * Return: The buffer object on success. Negative error pointer on failure.
2590  * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set
2591  * to true on entry.
2592  */
2593 struct xe_bo *
2594 xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile,
2595 			     size_t size, u64 offset, enum ttm_bo_type type, u32 flags,
2596 			     u64 alignment, bool intr)
2597 {
2598 	struct xe_validation_ctx ctx;
2599 	struct drm_exec exec;
2600 	struct xe_bo *bo;
2601 	int ret = 0;
2602 
2603 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr},
2604 			    ret) {
2605 		bo = xe_bo_create_pin_map_at_aligned(xe, tile, NULL, size, offset,
2606 						     type, flags, alignment, &exec);
2607 		if (IS_ERR(bo)) {
2608 			drm_exec_retry_on_contention(&exec);
2609 			ret = PTR_ERR(bo);
2610 			xe_validation_retry_on_oom(&ctx, &ret);
2611 		}
2612 	}
2613 
2614 	return ret ? ERR_PTR(ret) : bo;
2615 }
2616 
2617 /**
2618  * xe_bo_create_pin_map() - Create pinned and mapped bo
2619  * @xe: The xe device.
2620  * @tile: The tile to select for migration of this bo, and the tile used for
2621  * @vm: The vm to associate the buffer object with. The vm's resv must be locked
2622  * with the transaction represented by @exec.
2623  * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2624  * @size: The storage size to use for the bo.
2625  * @type: The TTM buffer object type.
2626  * @flags: XE_BO_FLAG_ flags.
2627  * @exec: The drm_exec transaction to use for exhaustive eviction, and
2628  * previously used for locking @vm's resv.
2629  *
2630  * Create a pinned and mapped bo. The bo will be external and not associated
2631  * with a VM.
2632  *
2633  * Return: The buffer object on success. Negative error pointer on failure.
2634  * In particular, the function may return ERR_PTR(%-EINTR) if @exec was
2635  * configured for interruptible locking.
2636  */
2637 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
2638 				   struct xe_vm *vm, size_t size,
2639 				   enum ttm_bo_type type, u32 flags,
2640 				   struct drm_exec *exec)
2641 {
2642 	return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, ~0ull, type, flags,
2643 					       0, exec);
2644 }
2645 
2646 /**
2647  * xe_bo_create_pin_map_novm() - Create pinned and mapped bo
2648  * @xe: The xe device.
2649  * @tile: The tile to select for migration of this bo, and the tile used for
2650  * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2651  * @size: The storage size to use for the bo.
2652  * @type: The TTM buffer object type.
2653  * @flags: XE_BO_FLAG_ flags.
2654  * @intr: Whether to execute any waits for backing store interruptible.
2655  *
2656  * Create a pinned and mapped bo. The bo will be external and not associated
2657  * with a VM.
2658  *
2659  * Return: The buffer object on success. Negative error pointer on failure.
2660  * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set
2661  * to true on entry.
2662  */
2663 struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile,
2664 					size_t size, enum ttm_bo_type type, u32 flags,
2665 					bool intr)
2666 {
2667 	return xe_bo_create_pin_map_at_novm(xe, tile, size, ~0ull, type, flags, 0, intr);
2668 }
2669 
2670 static void __xe_bo_unpin_map_no_vm(void *arg)
2671 {
2672 	xe_bo_unpin_map_no_vm(arg);
2673 }
2674 
2675 struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
2676 					   size_t size, u32 flags)
2677 {
2678 	struct xe_bo *bo;
2679 	int ret;
2680 
2681 	KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags);
2682 	bo = xe_bo_create_pin_map_novm(xe, tile, size, ttm_bo_type_kernel, flags, true);
2683 	if (IS_ERR(bo))
2684 		return bo;
2685 
2686 	ret = devm_add_action_or_reset(xe->drm.dev, __xe_bo_unpin_map_no_vm, bo);
2687 	if (ret)
2688 		return ERR_PTR(ret);
2689 
2690 	return bo;
2691 }
2692 
2693 void xe_managed_bo_unpin_map_no_vm(struct xe_bo *bo)
2694 {
2695 	devm_release_action(xe_bo_device(bo)->drm.dev, __xe_bo_unpin_map_no_vm, bo);
2696 }
2697 
2698 struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
2699 					     const void *data, size_t size, u32 flags)
2700 {
2701 	struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags);
2702 
2703 	if (IS_ERR(bo))
2704 		return bo;
2705 
2706 	xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
2707 
2708 	return bo;
2709 }
2710 
2711 /**
2712  * xe_managed_bo_reinit_in_vram
2713  * @xe: xe device
2714  * @tile: Tile where the new buffer will be created
2715  * @src: Managed buffer object allocated in system memory
2716  *
2717  * Replace a managed src buffer object allocated in system memory with a new
2718  * one allocated in vram, copying the data between them.
2719  * Buffer object in VRAM is not going to have the same GGTT address, the caller
2720  * is responsible for making sure that any old references to it are updated.
2721  *
2722  * Returns 0 for success, negative error code otherwise.
2723  */
2724 int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src)
2725 {
2726 	struct xe_bo *bo;
2727 	u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT;
2728 
2729 	dst_flags |= (*src)->flags & (XE_BO_FLAG_GGTT_INVALIDATE |
2730 				      XE_BO_FLAG_PINNED_NORESTORE);
2731 
2732 	xe_assert(xe, IS_DGFX(xe));
2733 	xe_assert(xe, !(*src)->vmap.is_iomem);
2734 
2735 	bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr,
2736 					    xe_bo_size(*src), dst_flags);
2737 	if (IS_ERR(bo))
2738 		return PTR_ERR(bo);
2739 
2740 	devm_release_action(xe->drm.dev, __xe_bo_unpin_map_no_vm, *src);
2741 	*src = bo;
2742 
2743 	return 0;
2744 }
2745 
2746 /*
2747  * XXX: This is in the VM bind data path, likely should calculate this once and
2748  * store, with a recalculation if the BO is moved.
2749  */
2750 uint64_t vram_region_gpu_offset(struct ttm_resource *res)
2751 {
2752 	struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
2753 
2754 	switch (res->mem_type) {
2755 	case XE_PL_STOLEN:
2756 		return xe_ttm_stolen_gpu_offset(xe);
2757 	case XE_PL_TT:
2758 	case XE_PL_SYSTEM:
2759 		return 0;
2760 	default:
2761 		return res_to_mem_region(res)->dpa_base;
2762 	}
2763 	return 0;
2764 }
2765 
2766 /**
2767  * xe_bo_pin_external - pin an external BO
2768  * @bo: buffer object to be pinned
2769  * @in_place: Pin in current placement, don't attempt to migrate.
2770  * @exec: The drm_exec transaction to use for exhaustive eviction.
2771  *
2772  * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
2773  * BO. Unique call compared to xe_bo_pin as this function has it own set of
2774  * asserts and code to ensure evict / restore on suspend / resume.
2775  *
2776  * Returns 0 for success, negative error code otherwise.
2777  */
2778 int xe_bo_pin_external(struct xe_bo *bo, bool in_place, struct drm_exec *exec)
2779 {
2780 	struct xe_device *xe = xe_bo_device(bo);
2781 	int err;
2782 
2783 	xe_assert(xe, !bo->vm);
2784 	xe_assert(xe, xe_bo_is_user(bo));
2785 
2786 	if (!xe_bo_is_pinned(bo)) {
2787 		if (!in_place) {
2788 			err = xe_bo_validate(bo, NULL, false, exec);
2789 			if (err)
2790 				return err;
2791 		}
2792 
2793 		spin_lock(&xe->pinned.lock);
2794 		list_add_tail(&bo->pinned_link, &xe->pinned.late.external);
2795 		spin_unlock(&xe->pinned.lock);
2796 	}
2797 
2798 	ttm_bo_pin(&bo->ttm);
2799 	if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
2800 		xe_ttm_tt_account_subtract(xe, bo->ttm.ttm);
2801 
2802 	/*
2803 	 * FIXME: If we always use the reserve / unreserve functions for locking
2804 	 * we do not need this.
2805 	 */
2806 	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
2807 
2808 	return 0;
2809 }
2810 
2811 /**
2812  * xe_bo_pin() - Pin a kernel bo after potentially migrating it
2813  * @bo: The kernel bo to pin.
2814  * @exec: The drm_exec transaction to use for exhaustive eviction.
2815  *
2816  * Attempts to migrate a bo to @bo->placement. If that succeeds,
2817  * pins the bo.
2818  *
2819  * Return: %0 on success, negative error code on migration failure.
2820  */
2821 int xe_bo_pin(struct xe_bo *bo, struct drm_exec *exec)
2822 {
2823 	struct ttm_place *place = &bo->placements[0];
2824 	struct xe_device *xe = xe_bo_device(bo);
2825 	int err;
2826 
2827 	/* We currently don't expect user BO to be pinned */
2828 	xe_assert(xe, !xe_bo_is_user(bo));
2829 
2830 	/* Pinned object must be in GGTT or have pinned flag */
2831 	xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED |
2832 				   XE_BO_FLAG_GGTT));
2833 
2834 	/*
2835 	 * No reason we can't support pinning imported dma-bufs we just don't
2836 	 * expect to pin an imported dma-buf.
2837 	 */
2838 	xe_assert(xe, !bo->ttm.base.import_attach);
2839 
2840 	/* We only expect at most 1 pin */
2841 	xe_assert(xe, !xe_bo_is_pinned(bo));
2842 
2843 	err = xe_bo_validate(bo, NULL, false, exec);
2844 	if (err)
2845 		return err;
2846 
2847 	if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
2848 		spin_lock(&xe->pinned.lock);
2849 		if (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)
2850 			list_add_tail(&bo->pinned_link, &xe->pinned.late.kernel_bo_present);
2851 		else
2852 			list_add_tail(&bo->pinned_link, &xe->pinned.early.kernel_bo_present);
2853 		spin_unlock(&xe->pinned.lock);
2854 	}
2855 
2856 	ttm_bo_pin(&bo->ttm);
2857 	if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
2858 		xe_ttm_tt_account_subtract(xe, bo->ttm.ttm);
2859 
2860 	/*
2861 	 * FIXME: If we always use the reserve / unreserve functions for locking
2862 	 * we do not need this.
2863 	 */
2864 	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
2865 
2866 	return 0;
2867 }
2868 
2869 /**
2870  * xe_bo_unpin_external - unpin an external BO
2871  * @bo: buffer object to be unpinned
2872  *
2873  * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
2874  * BO. Unique call compared to xe_bo_unpin as this function has it own set of
2875  * asserts and code to ensure evict / restore on suspend / resume.
2876  *
2877  * Returns 0 for success, negative error code otherwise.
2878  */
2879 void xe_bo_unpin_external(struct xe_bo *bo)
2880 {
2881 	struct xe_device *xe = xe_bo_device(bo);
2882 
2883 	xe_assert(xe, !bo->vm);
2884 	xe_assert(xe, xe_bo_is_pinned(bo));
2885 	xe_assert(xe, xe_bo_is_user(bo));
2886 
2887 	spin_lock(&xe->pinned.lock);
2888 	if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link))
2889 		list_del_init(&bo->pinned_link);
2890 	spin_unlock(&xe->pinned.lock);
2891 
2892 	ttm_bo_unpin(&bo->ttm);
2893 	if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
2894 		xe_ttm_tt_account_add(xe, bo->ttm.ttm);
2895 
2896 	/*
2897 	 * FIXME: If we always use the reserve / unreserve functions for locking
2898 	 * we do not need this.
2899 	 */
2900 	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
2901 }
2902 
2903 void xe_bo_unpin(struct xe_bo *bo)
2904 {
2905 	struct ttm_place *place = &bo->placements[0];
2906 	struct xe_device *xe = xe_bo_device(bo);
2907 
2908 	xe_assert(xe, !bo->ttm.base.import_attach);
2909 	xe_assert(xe, xe_bo_is_pinned(bo));
2910 
2911 	if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
2912 		spin_lock(&xe->pinned.lock);
2913 		xe_assert(xe, !list_empty(&bo->pinned_link));
2914 		list_del_init(&bo->pinned_link);
2915 		spin_unlock(&xe->pinned.lock);
2916 
2917 		if (bo->backup_obj) {
2918 			if (xe_bo_is_pinned(bo->backup_obj))
2919 				ttm_bo_unpin(&bo->backup_obj->ttm);
2920 			xe_bo_put(bo->backup_obj);
2921 			bo->backup_obj = NULL;
2922 		}
2923 	}
2924 	ttm_bo_unpin(&bo->ttm);
2925 	if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
2926 		xe_ttm_tt_account_add(xe, bo->ttm.ttm);
2927 }
2928 
2929 /**
2930  * xe_bo_validate() - Make sure the bo is in an allowed placement
2931  * @bo: The bo,
2932  * @vm: Pointer to a the vm the bo shares a locked dma_resv object with, or
2933  *      NULL. Used together with @allow_res_evict.
2934  * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's
2935  *                   reservation object.
2936  * @exec: The drm_exec transaction to use for exhaustive eviction.
2937  *
2938  * Make sure the bo is in allowed placement, migrating it if necessary. If
2939  * needed, other bos will be evicted. If bos selected for eviction shares
2940  * the @vm's reservation object, they can be evicted iff @allow_res_evict is
2941  * set to true, otherwise they will be bypassed.
2942  *
2943  * Return: 0 on success, negative error code on failure. May return
2944  * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
2945  */
2946 int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict,
2947 		   struct drm_exec *exec)
2948 {
2949 	struct ttm_operation_ctx ctx = {
2950 		.interruptible = true,
2951 		.no_wait_gpu = false,
2952 		.gfp_retry_mayfail = true,
2953 	};
2954 	int ret;
2955 
2956 	if (xe_bo_is_pinned(bo))
2957 		return 0;
2958 
2959 	if (vm) {
2960 		lockdep_assert_held(&vm->lock);
2961 		xe_vm_assert_held(vm);
2962 
2963 		ctx.allow_res_evict = allow_res_evict;
2964 		ctx.resv = xe_vm_resv(vm);
2965 	}
2966 
2967 	xe_vm_set_validating(vm, allow_res_evict);
2968 	trace_xe_bo_validate(bo);
2969 	xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base);
2970 	ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
2971 	xe_vm_clear_validating(vm, allow_res_evict);
2972 
2973 	return ret;
2974 }
2975 
2976 bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
2977 {
2978 	if (bo->destroy == &xe_ttm_bo_destroy)
2979 		return true;
2980 
2981 	return false;
2982 }
2983 
2984 /*
2985  * Resolve a BO address. There is no assert to check if the proper lock is held
2986  * so it should only be used in cases where it is not fatal to get the wrong
2987  * address, such as printing debug information, but not in cases where memory is
2988  * written based on this result.
2989  */
2990 dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
2991 {
2992 	struct xe_device *xe = xe_bo_device(bo);
2993 	struct xe_res_cursor cur;
2994 	u64 page;
2995 
2996 	xe_assert(xe, page_size <= PAGE_SIZE);
2997 	page = offset >> PAGE_SHIFT;
2998 	offset &= (PAGE_SIZE - 1);
2999 
3000 	if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
3001 		xe_assert(xe, bo->ttm.ttm);
3002 
3003 		xe_res_first_sg(xe_bo_sg(bo), page << PAGE_SHIFT,
3004 				page_size, &cur);
3005 		return xe_res_dma(&cur) + offset;
3006 	} else {
3007 		struct xe_res_cursor cur;
3008 
3009 		xe_res_first(bo->ttm.resource, page << PAGE_SHIFT,
3010 			     page_size, &cur);
3011 		return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource);
3012 	}
3013 }
3014 
3015 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
3016 {
3017 	if (!READ_ONCE(bo->ttm.pin_count))
3018 		xe_bo_assert_held(bo);
3019 	return __xe_bo_addr(bo, offset, page_size);
3020 }
3021 
3022 int xe_bo_vmap(struct xe_bo *bo)
3023 {
3024 	struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
3025 	void *virtual;
3026 	bool is_iomem;
3027 	int ret;
3028 
3029 	xe_bo_assert_held(bo);
3030 
3031 	if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) ||
3032 			!force_contiguous(bo->flags)))
3033 		return -EINVAL;
3034 
3035 	if (!iosys_map_is_null(&bo->vmap))
3036 		return 0;
3037 
3038 	/*
3039 	 * We use this more or less deprecated interface for now since
3040 	 * ttm_bo_vmap() doesn't offer the optimization of kmapping
3041 	 * single page bos, which is done here.
3042 	 * TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap
3043 	 * to use struct iosys_map.
3044 	 */
3045 	ret = ttm_bo_kmap(&bo->ttm, 0, xe_bo_size(bo) >> PAGE_SHIFT, &bo->kmap);
3046 	if (ret)
3047 		return ret;
3048 
3049 	virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
3050 	if (is_iomem)
3051 		iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual);
3052 	else
3053 		iosys_map_set_vaddr(&bo->vmap, virtual);
3054 
3055 	return 0;
3056 }
3057 
3058 static void __xe_bo_vunmap(struct xe_bo *bo)
3059 {
3060 	if (!iosys_map_is_null(&bo->vmap)) {
3061 		iosys_map_clear(&bo->vmap);
3062 		ttm_bo_kunmap(&bo->kmap);
3063 	}
3064 }
3065 
3066 void xe_bo_vunmap(struct xe_bo *bo)
3067 {
3068 	xe_bo_assert_held(bo);
3069 	__xe_bo_vunmap(bo);
3070 }
3071 
3072 static int gem_create_set_pxp_type(struct xe_device *xe, struct xe_bo *bo, u64 value)
3073 {
3074 	if (value == DRM_XE_PXP_TYPE_NONE)
3075 		return 0;
3076 
3077 	/* we only support DRM_XE_PXP_TYPE_HWDRM for now */
3078 	if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
3079 		return -EINVAL;
3080 
3081 	return xe_pxp_key_assign(xe->pxp, bo);
3082 }
3083 
3084 typedef int (*xe_gem_create_set_property_fn)(struct xe_device *xe,
3085 					     struct xe_bo *bo,
3086 					     u64 value);
3087 
3088 static const xe_gem_create_set_property_fn gem_create_set_property_funcs[] = {
3089 	[DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE] = gem_create_set_pxp_type,
3090 };
3091 
3092 static int gem_create_user_ext_set_property(struct xe_device *xe,
3093 					    struct xe_bo *bo,
3094 					    u64 extension)
3095 {
3096 	u64 __user *address = u64_to_user_ptr(extension);
3097 	struct drm_xe_ext_set_property ext;
3098 	int err;
3099 	u32 idx;
3100 
3101 	err = copy_from_user(&ext, address, sizeof(ext));
3102 	if (XE_IOCTL_DBG(xe, err))
3103 		return -EFAULT;
3104 
3105 	if (XE_IOCTL_DBG(xe, ext.property >=
3106 			 ARRAY_SIZE(gem_create_set_property_funcs)) ||
3107 	    XE_IOCTL_DBG(xe, ext.pad) ||
3108 	    XE_IOCTL_DBG(xe, ext.property != DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY))
3109 		return -EINVAL;
3110 
3111 	idx = array_index_nospec(ext.property, ARRAY_SIZE(gem_create_set_property_funcs));
3112 	if (!gem_create_set_property_funcs[idx])
3113 		return -EINVAL;
3114 
3115 	return gem_create_set_property_funcs[idx](xe, bo, ext.value);
3116 }
3117 
3118 typedef int (*xe_gem_create_user_extension_fn)(struct xe_device *xe,
3119 					       struct xe_bo *bo,
3120 					       u64 extension);
3121 
3122 static const xe_gem_create_user_extension_fn gem_create_user_extension_funcs[] = {
3123 	[DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY] = gem_create_user_ext_set_property,
3124 };
3125 
3126 #define MAX_USER_EXTENSIONS	16
3127 static int gem_create_user_extensions(struct xe_device *xe, struct xe_bo *bo,
3128 				      u64 extensions, int ext_number)
3129 {
3130 	u64 __user *address = u64_to_user_ptr(extensions);
3131 	struct drm_xe_user_extension ext;
3132 	int err;
3133 	u32 idx;
3134 
3135 	if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
3136 		return -E2BIG;
3137 
3138 	err = copy_from_user(&ext, address, sizeof(ext));
3139 	if (XE_IOCTL_DBG(xe, err))
3140 		return -EFAULT;
3141 
3142 	if (XE_IOCTL_DBG(xe, ext.pad) ||
3143 	    XE_IOCTL_DBG(xe, ext.name >= ARRAY_SIZE(gem_create_user_extension_funcs)))
3144 		return -EINVAL;
3145 
3146 	idx = array_index_nospec(ext.name,
3147 				 ARRAY_SIZE(gem_create_user_extension_funcs));
3148 	err = gem_create_user_extension_funcs[idx](xe, bo, extensions);
3149 	if (XE_IOCTL_DBG(xe, err))
3150 		return err;
3151 
3152 	if (ext.next_extension)
3153 		return gem_create_user_extensions(xe, bo, ext.next_extension,
3154 						  ++ext_number);
3155 
3156 	return 0;
3157 }
3158 
3159 int xe_gem_create_ioctl(struct drm_device *dev, void *data,
3160 			struct drm_file *file)
3161 {
3162 	struct xe_device *xe = to_xe_device(dev);
3163 	struct xe_file *xef = to_xe_file(file);
3164 	struct drm_xe_gem_create *args = data;
3165 	struct xe_validation_ctx ctx;
3166 	struct drm_exec exec;
3167 	struct xe_vm *vm = NULL;
3168 	struct xe_bo *bo;
3169 	unsigned int bo_flags;
3170 	u32 handle;
3171 	int err;
3172 
3173 	if (XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
3174 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
3175 		return -EINVAL;
3176 
3177 	/* at least one valid memory placement must be specified */
3178 	if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) ||
3179 			 !args->placement))
3180 		return -EINVAL;
3181 
3182 	if (XE_IOCTL_DBG(xe, args->flags &
3183 			 ~(DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING |
3184 			   DRM_XE_GEM_CREATE_FLAG_SCANOUT |
3185 			   DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM)))
3186 		return -EINVAL;
3187 
3188 	if (XE_IOCTL_DBG(xe, args->handle))
3189 		return -EINVAL;
3190 
3191 	if (XE_IOCTL_DBG(xe, !args->size))
3192 		return -EINVAL;
3193 
3194 	if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX))
3195 		return -EINVAL;
3196 
3197 	if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK))
3198 		return -EINVAL;
3199 
3200 	bo_flags = 0;
3201 	if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING)
3202 		bo_flags |= XE_BO_FLAG_DEFER_BACKING;
3203 
3204 	if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT)
3205 		bo_flags |= XE_BO_FLAG_SCANOUT;
3206 
3207 	bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1);
3208 
3209 	/* CCS formats need physical placement at a 64K alignment in VRAM. */
3210 	if ((bo_flags & XE_BO_FLAG_VRAM_MASK) &&
3211 	    (bo_flags & XE_BO_FLAG_SCANOUT) &&
3212 	    !(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) &&
3213 	    IS_ALIGNED(args->size, SZ_64K))
3214 		bo_flags |= XE_BO_FLAG_NEEDS_64K;
3215 
3216 	if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
3217 		if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK)))
3218 			return -EINVAL;
3219 
3220 		bo_flags |= XE_BO_FLAG_NEEDS_CPU_ACCESS;
3221 	}
3222 
3223 	if (XE_IOCTL_DBG(xe, !args->cpu_caching ||
3224 			 args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC))
3225 		return -EINVAL;
3226 
3227 	if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK &&
3228 			 args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC))
3229 		return -EINVAL;
3230 
3231 	if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT &&
3232 			 args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB))
3233 		return -EINVAL;
3234 
3235 	if (args->vm_id) {
3236 		vm = xe_vm_lookup(xef, args->vm_id);
3237 		if (XE_IOCTL_DBG(xe, !vm))
3238 			return -ENOENT;
3239 	}
3240 
3241 	err = 0;
3242 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
3243 			    err) {
3244 		if (vm) {
3245 			err = xe_vm_drm_exec_lock(vm, &exec);
3246 			drm_exec_retry_on_contention(&exec);
3247 			if (err)
3248 				break;
3249 		}
3250 		bo = xe_bo_create_user(xe, vm, args->size, args->cpu_caching,
3251 				       bo_flags, &exec);
3252 		drm_exec_retry_on_contention(&exec);
3253 		if (IS_ERR(bo)) {
3254 			err = PTR_ERR(bo);
3255 			xe_validation_retry_on_oom(&ctx, &err);
3256 			break;
3257 		}
3258 	}
3259 	if (err)
3260 		goto out_vm;
3261 
3262 	if (args->extensions) {
3263 		err = gem_create_user_extensions(xe, bo, args->extensions, 0);
3264 		if (err)
3265 			goto out_bulk;
3266 	}
3267 
3268 	err = drm_gem_handle_create(file, &bo->ttm.base, &handle);
3269 	if (err)
3270 		goto out_bulk;
3271 
3272 	args->handle = handle;
3273 	goto out_put;
3274 
3275 out_bulk:
3276 	if (vm && !xe_vm_in_fault_mode(vm)) {
3277 		xe_vm_lock(vm, false);
3278 		__xe_bo_unset_bulk_move(bo);
3279 		xe_vm_unlock(vm);
3280 	}
3281 out_put:
3282 	xe_bo_put(bo);
3283 out_vm:
3284 	if (vm)
3285 		xe_vm_put(vm);
3286 
3287 	return err;
3288 }
3289 
3290 int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
3291 			     struct drm_file *file)
3292 {
3293 	struct xe_device *xe = to_xe_device(dev);
3294 	struct drm_xe_gem_mmap_offset *args = data;
3295 	struct drm_gem_object *gem_obj;
3296 
3297 	if (XE_IOCTL_DBG(xe, args->extensions) ||
3298 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
3299 		return -EINVAL;
3300 
3301 	if (XE_IOCTL_DBG(xe, args->flags &
3302 			 ~DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER))
3303 		return -EINVAL;
3304 
3305 	if (args->flags & DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER) {
3306 		if (XE_IOCTL_DBG(xe, !IS_DGFX(xe)))
3307 			return -EINVAL;
3308 
3309 		if (XE_IOCTL_DBG(xe, args->handle))
3310 			return -EINVAL;
3311 
3312 		if (XE_IOCTL_DBG(xe, PAGE_SIZE > SZ_4K))
3313 			return -EINVAL;
3314 
3315 		BUILD_BUG_ON(((XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT) +
3316 			      SZ_4K) >= DRM_FILE_PAGE_OFFSET_START);
3317 		args->offset = XE_PCI_BARRIER_MMAP_OFFSET;
3318 		return 0;
3319 	}
3320 
3321 	gem_obj = drm_gem_object_lookup(file, args->handle);
3322 	if (XE_IOCTL_DBG(xe, !gem_obj))
3323 		return -ENOENT;
3324 
3325 	/* The mmap offset was set up at BO allocation time. */
3326 	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
3327 
3328 	xe_bo_put(gem_to_xe_bo(gem_obj));
3329 	return 0;
3330 }
3331 
3332 /**
3333  * xe_bo_lock() - Lock the buffer object's dma_resv object
3334  * @bo: The struct xe_bo whose lock is to be taken
3335  * @intr: Whether to perform any wait interruptible
3336  *
3337  * Locks the buffer object's dma_resv object. If the buffer object is
3338  * pointing to a shared dma_resv object, that shared lock is locked.
3339  *
3340  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3341  * contended lock was interrupted. If @intr is set to false, the
3342  * function always returns 0.
3343  */
3344 int xe_bo_lock(struct xe_bo *bo, bool intr)
3345 {
3346 	if (intr)
3347 		return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL);
3348 
3349 	dma_resv_lock(bo->ttm.base.resv, NULL);
3350 
3351 	return 0;
3352 }
3353 
3354 /**
3355  * xe_bo_unlock() - Unlock the buffer object's dma_resv object
3356  * @bo: The struct xe_bo whose lock is to be released.
3357  *
3358  * Unlock a buffer object lock that was locked by xe_bo_lock().
3359  */
3360 void xe_bo_unlock(struct xe_bo *bo)
3361 {
3362 	dma_resv_unlock(bo->ttm.base.resv);
3363 }
3364 
3365 /**
3366  * xe_bo_can_migrate - Whether a buffer object likely can be migrated
3367  * @bo: The buffer object to migrate
3368  * @mem_type: The TTM memory type intended to migrate to
3369  *
3370  * Check whether the buffer object supports migration to the
3371  * given memory type. Note that pinning may affect the ability to migrate as
3372  * returned by this function.
3373  *
3374  * This function is primarily intended as a helper for checking the
3375  * possibility to migrate buffer objects and can be called without
3376  * the object lock held.
3377  *
3378  * Return: true if migration is possible, false otherwise.
3379  */
3380 bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type)
3381 {
3382 	unsigned int cur_place;
3383 
3384 	if (bo->ttm.type == ttm_bo_type_kernel)
3385 		return true;
3386 
3387 	if (bo->ttm.type == ttm_bo_type_sg)
3388 		return false;
3389 
3390 	for (cur_place = 0; cur_place < bo->placement.num_placement;
3391 	     cur_place++) {
3392 		if (bo->placements[cur_place].mem_type == mem_type)
3393 			return true;
3394 	}
3395 
3396 	return false;
3397 }
3398 
3399 static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
3400 {
3401 	memset(place, 0, sizeof(*place));
3402 	place->mem_type = mem_type;
3403 }
3404 
3405 /**
3406  * xe_bo_migrate - Migrate an object to the desired region id
3407  * @bo: The buffer object to migrate.
3408  * @mem_type: The TTM region type to migrate to.
3409  * @tctx: A pointer to a struct ttm_operation_ctx or NULL if
3410  * a default interruptibe ctx is to be used.
3411  * @exec: The drm_exec transaction to use for exhaustive eviction.
3412  *
3413  * Attempt to migrate the buffer object to the desired memory region. The
3414  * buffer object may not be pinned, and must be locked.
3415  * On successful completion, the object memory type will be updated,
3416  * but an async migration task may not have completed yet, and to
3417  * accomplish that, the object's kernel fences must be signaled with
3418  * the object lock held.
3419  *
3420  * Return: 0 on success. Negative error code on failure. In particular may
3421  * return -EINTR or -ERESTARTSYS if signal pending.
3422  */
3423 int xe_bo_migrate(struct xe_bo *bo, u32 mem_type, struct ttm_operation_ctx *tctx,
3424 		  struct drm_exec *exec)
3425 {
3426 	struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
3427 	struct ttm_operation_ctx ctx = {
3428 		.interruptible = true,
3429 		.no_wait_gpu = false,
3430 		.gfp_retry_mayfail = true,
3431 	};
3432 	struct ttm_placement placement;
3433 	struct ttm_place requested;
3434 
3435 	xe_bo_assert_held(bo);
3436 	tctx = tctx ? tctx : &ctx;
3437 
3438 	if (bo->ttm.resource->mem_type == mem_type)
3439 		return 0;
3440 
3441 	if (xe_bo_is_pinned(bo))
3442 		return -EBUSY;
3443 
3444 	if (!xe_bo_can_migrate(bo, mem_type))
3445 		return -EINVAL;
3446 
3447 	xe_place_from_ttm_type(mem_type, &requested);
3448 	placement.num_placement = 1;
3449 	placement.placement = &requested;
3450 
3451 	/*
3452 	 * Stolen needs to be handled like below VRAM handling if we ever need
3453 	 * to support it.
3454 	 */
3455 	drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN);
3456 
3457 	if (mem_type_is_vram(mem_type)) {
3458 		u32 c = 0;
3459 
3460 		add_vram(xe, bo, &requested, bo->flags, mem_type, &c);
3461 	}
3462 
3463 	if (!tctx->no_wait_gpu)
3464 		xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base);
3465 	return ttm_bo_validate(&bo->ttm, &placement, tctx);
3466 }
3467 
3468 /**
3469  * xe_bo_evict - Evict an object to evict placement
3470  * @bo: The buffer object to migrate.
3471  * @exec: The drm_exec transaction to use for exhaustive eviction.
3472  *
3473  * On successful completion, the object memory will be moved to evict
3474  * placement. This function blocks until the object has been fully moved.
3475  *
3476  * Return: 0 on success. Negative error code on failure.
3477  */
3478 int xe_bo_evict(struct xe_bo *bo, struct drm_exec *exec)
3479 {
3480 	struct ttm_operation_ctx ctx = {
3481 		.interruptible = false,
3482 		.no_wait_gpu = false,
3483 		.gfp_retry_mayfail = true,
3484 	};
3485 	struct ttm_placement placement;
3486 	int ret;
3487 
3488 	xe_evict_flags(&bo->ttm, &placement);
3489 	ret = ttm_bo_validate(&bo->ttm, &placement, &ctx);
3490 	if (ret)
3491 		return ret;
3492 
3493 	dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
3494 			      false, MAX_SCHEDULE_TIMEOUT);
3495 
3496 	return 0;
3497 }
3498 
3499 /**
3500  * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
3501  * placed in system memory.
3502  * @bo: The xe_bo
3503  *
3504  * Return: true if extra pages need to be allocated, false otherwise.
3505  */
3506 bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
3507 {
3508 	struct xe_device *xe = xe_bo_device(bo);
3509 
3510 	if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe))
3511 		return false;
3512 
3513 	if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device)
3514 		return false;
3515 
3516 	/* On discrete GPUs, if the GPU can access this buffer from
3517 	 * system memory (i.e., it allows XE_PL_TT placement), FlatCCS
3518 	 * can't be used since there's no CCS storage associated with
3519 	 * non-VRAM addresses.
3520 	 */
3521 	if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM))
3522 		return false;
3523 
3524 	/*
3525 	 * Compression implies coh_none, therefore we know for sure that WB
3526 	 * memory can't currently use compression, which is likely one of the
3527 	 * common cases.
3528 	 */
3529 	if (bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)
3530 		return false;
3531 
3532 	return true;
3533 }
3534 
3535 /**
3536  * __xe_bo_release_dummy() - Dummy kref release function
3537  * @kref: The embedded struct kref.
3538  *
3539  * Dummy release function for xe_bo_put_deferred(). Keep off.
3540  */
3541 void __xe_bo_release_dummy(struct kref *kref)
3542 {
3543 }
3544 
3545 /**
3546  * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
3547  * @deferred: The lockless list used for the call to xe_bo_put_deferred().
3548  *
3549  * Puts all bos whose put was deferred by xe_bo_put_deferred().
3550  * The @deferred list can be either an onstack local list or a global
3551  * shared list used by a workqueue.
3552  */
3553 void xe_bo_put_commit(struct llist_head *deferred)
3554 {
3555 	struct llist_node *freed;
3556 	struct xe_bo *bo, *next;
3557 
3558 	if (!deferred)
3559 		return;
3560 
3561 	freed = llist_del_all(deferred);
3562 	if (!freed)
3563 		return;
3564 
3565 	llist_for_each_entry_safe(bo, next, freed, freed)
3566 		drm_gem_object_free(&bo->ttm.base.refcount);
3567 }
3568 
3569 static void xe_bo_dev_work_func(struct work_struct *work)
3570 {
3571 	struct xe_bo_dev *bo_dev = container_of(work, typeof(*bo_dev), async_free);
3572 
3573 	xe_bo_put_commit(&bo_dev->async_list);
3574 }
3575 
3576 /**
3577  * xe_bo_dev_init() - Initialize BO dev to manage async BO freeing
3578  * @bo_dev: The BO dev structure
3579  */
3580 void xe_bo_dev_init(struct xe_bo_dev *bo_dev)
3581 {
3582 	INIT_WORK(&bo_dev->async_free, xe_bo_dev_work_func);
3583 }
3584 
3585 /**
3586  * xe_bo_dev_fini() - Finalize BO dev managing async BO freeing
3587  * @bo_dev: The BO dev structure
3588  */
3589 void xe_bo_dev_fini(struct xe_bo_dev *bo_dev)
3590 {
3591 	flush_work(&bo_dev->async_free);
3592 }
3593 
3594 void xe_bo_put(struct xe_bo *bo)
3595 {
3596 	struct xe_tile *tile;
3597 	u8 id;
3598 
3599 	might_sleep();
3600 	if (bo) {
3601 #ifdef CONFIG_PROC_FS
3602 		if (bo->client)
3603 			might_lock(&bo->client->bos_lock);
3604 #endif
3605 		for_each_tile(tile, xe_bo_device(bo), id)
3606 			if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt)
3607 				xe_ggtt_might_lock(bo->ggtt_node[id]->ggtt);
3608 		drm_gem_object_put(&bo->ttm.base);
3609 	}
3610 }
3611 
3612 /**
3613  * xe_bo_dumb_create - Create a dumb bo as backing for a fb
3614  * @file_priv: ...
3615  * @dev: ...
3616  * @args: ...
3617  *
3618  * See dumb_create() hook in include/drm/drm_drv.h
3619  *
3620  * Return: ...
3621  */
3622 int xe_bo_dumb_create(struct drm_file *file_priv,
3623 		      struct drm_device *dev,
3624 		      struct drm_mode_create_dumb *args)
3625 {
3626 	struct xe_device *xe = to_xe_device(dev);
3627 	struct xe_bo *bo;
3628 	uint32_t handle;
3629 	int cpp = DIV_ROUND_UP(args->bpp, 8);
3630 	int err;
3631 	u32 page_size = max_t(u32, PAGE_SIZE,
3632 		xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K);
3633 
3634 	args->pitch = ALIGN(args->width * cpp, 64);
3635 	args->size = ALIGN(mul_u32_u32(args->pitch, args->height),
3636 			   page_size);
3637 
3638 	bo = xe_bo_create_user(xe, NULL, args->size,
3639 			       DRM_XE_GEM_CPU_CACHING_WC,
3640 			       XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
3641 			       XE_BO_FLAG_SCANOUT |
3642 			       XE_BO_FLAG_NEEDS_CPU_ACCESS, NULL);
3643 	if (IS_ERR(bo))
3644 		return PTR_ERR(bo);
3645 
3646 	err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle);
3647 	/* drop reference from allocate - handle holds it now */
3648 	drm_gem_object_put(&bo->ttm.base);
3649 	if (!err)
3650 		args->handle = handle;
3651 	return err;
3652 }
3653 
3654 void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo)
3655 {
3656 	struct ttm_buffer_object *tbo = &bo->ttm;
3657 	struct ttm_device *bdev = tbo->bdev;
3658 
3659 	drm_vma_node_unmap(&tbo->base.vma_node, bdev->dev_mapping);
3660 
3661 	list_del_init(&bo->vram_userfault_link);
3662 }
3663 
3664 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
3665 #include "tests/xe_bo.c"
3666 #endif
3667