xref: /linux/drivers/gpu/drm/xe/xe_bo.c (revision 6c7353836a91b1479e6b81791cdc163fb04b4834)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_bo.h"
7 
8 #include <linux/dma-buf.h>
9 
10 #include <drm/drm_drv.h>
11 #include <drm/drm_gem_ttm_helper.h>
12 #include <drm/drm_managed.h>
13 #include <drm/ttm/ttm_device.h>
14 #include <drm/ttm/ttm_placement.h>
15 #include <drm/ttm/ttm_tt.h>
16 #include <drm/xe_drm.h>
17 
18 #include "xe_device.h"
19 #include "xe_dma_buf.h"
20 #include "xe_drm_client.h"
21 #include "xe_ggtt.h"
22 #include "xe_gt.h"
23 #include "xe_map.h"
24 #include "xe_migrate.h"
25 #include "xe_preempt_fence.h"
26 #include "xe_res_cursor.h"
27 #include "xe_trace.h"
28 #include "xe_ttm_stolen_mgr.h"
29 #include "xe_vm.h"
30 
31 static const struct ttm_place sys_placement_flags = {
32 	.fpfn = 0,
33 	.lpfn = 0,
34 	.mem_type = XE_PL_SYSTEM,
35 	.flags = 0,
36 };
37 
38 static struct ttm_placement sys_placement = {
39 	.num_placement = 1,
40 	.placement = &sys_placement_flags,
41 	.num_busy_placement = 1,
42 	.busy_placement = &sys_placement_flags,
43 };
44 
45 static const struct ttm_place tt_placement_flags = {
46 	.fpfn = 0,
47 	.lpfn = 0,
48 	.mem_type = XE_PL_TT,
49 	.flags = 0,
50 };
51 
52 static struct ttm_placement tt_placement = {
53 	.num_placement = 1,
54 	.placement = &tt_placement_flags,
55 	.num_busy_placement = 1,
56 	.busy_placement = &sys_placement_flags,
57 };
58 
59 bool mem_type_is_vram(u32 mem_type)
60 {
61 	return mem_type >= XE_PL_VRAM0 && mem_type != XE_PL_STOLEN;
62 }
63 
64 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res)
65 {
66 	return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe);
67 }
68 
69 static bool resource_is_vram(struct ttm_resource *res)
70 {
71 	return mem_type_is_vram(res->mem_type);
72 }
73 
74 bool xe_bo_is_vram(struct xe_bo *bo)
75 {
76 	return resource_is_vram(bo->ttm.resource) ||
77 		resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource);
78 }
79 
80 bool xe_bo_is_stolen(struct xe_bo *bo)
81 {
82 	return bo->ttm.resource->mem_type == XE_PL_STOLEN;
83 }
84 
85 /**
86  * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR
87  * @bo: The BO
88  *
89  * The stolen memory is accessed through the PCI BAR for both DGFX and some
90  * integrated platforms that have a dedicated bit in the PTE for devmem (DM).
91  *
92  * Returns: true if it's stolen memory accessed via PCI BAR, false otherwise.
93  */
94 bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
95 {
96 	return xe_bo_is_stolen(bo) &&
97 		GRAPHICS_VERx100(xe_bo_device(bo)) >= 1270;
98 }
99 
100 static bool xe_bo_is_user(struct xe_bo *bo)
101 {
102 	return bo->flags & XE_BO_CREATE_USER_BIT;
103 }
104 
105 static struct xe_migrate *
106 mem_type_to_migrate(struct xe_device *xe, u32 mem_type)
107 {
108 	struct xe_tile *tile;
109 
110 	xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type));
111 	tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
112 	return tile->migrate;
113 }
114 
115 static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res)
116 {
117 	struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
118 	struct ttm_resource_manager *mgr;
119 
120 	xe_assert(xe, resource_is_vram(res));
121 	mgr = ttm_manager_type(&xe->ttm, res->mem_type);
122 	return to_xe_ttm_vram_mgr(mgr)->vram;
123 }
124 
125 static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
126 			   u32 bo_flags, u32 *c)
127 {
128 	if (bo_flags & XE_BO_CREATE_SYSTEM_BIT) {
129 		xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
130 
131 		bo->placements[*c] = (struct ttm_place) {
132 			.mem_type = XE_PL_TT,
133 		};
134 		*c += 1;
135 
136 		if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
137 			bo->props.preferred_mem_type = XE_PL_TT;
138 	}
139 }
140 
141 static void add_vram(struct xe_device *xe, struct xe_bo *bo,
142 		     struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
143 {
144 	struct ttm_place place = { .mem_type = mem_type };
145 	struct xe_mem_region *vram;
146 	u64 io_size;
147 
148 	xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
149 
150 	vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram;
151 	xe_assert(xe, vram && vram->usable_size);
152 	io_size = vram->io_size;
153 
154 	/*
155 	 * For eviction / restore on suspend / resume objects
156 	 * pinned in VRAM must be contiguous
157 	 */
158 	if (bo_flags & (XE_BO_CREATE_PINNED_BIT |
159 			XE_BO_CREATE_GGTT_BIT))
160 		place.flags |= TTM_PL_FLAG_CONTIGUOUS;
161 
162 	if (io_size < vram->usable_size) {
163 		if (bo_flags & XE_BO_NEEDS_CPU_ACCESS) {
164 			place.fpfn = 0;
165 			place.lpfn = io_size >> PAGE_SHIFT;
166 		} else {
167 			place.flags |= TTM_PL_FLAG_TOPDOWN;
168 		}
169 	}
170 	places[*c] = place;
171 	*c += 1;
172 
173 	if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
174 		bo->props.preferred_mem_type = mem_type;
175 }
176 
177 static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
178 			 u32 bo_flags, u32 *c)
179 {
180 	if (bo->props.preferred_gt == XE_GT1) {
181 		if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
182 			add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
183 		if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
184 			add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
185 	} else {
186 		if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
187 			add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
188 		if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
189 			add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
190 	}
191 }
192 
193 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
194 			   u32 bo_flags, u32 *c)
195 {
196 	if (bo_flags & XE_BO_CREATE_STOLEN_BIT) {
197 		xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
198 
199 		bo->placements[*c] = (struct ttm_place) {
200 			.mem_type = XE_PL_STOLEN,
201 			.flags = bo_flags & (XE_BO_CREATE_PINNED_BIT |
202 					     XE_BO_CREATE_GGTT_BIT) ?
203 				TTM_PL_FLAG_CONTIGUOUS : 0,
204 		};
205 		*c += 1;
206 	}
207 }
208 
209 static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
210 				       u32 bo_flags)
211 {
212 	u32 c = 0;
213 
214 	bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
215 
216 	/* The order of placements should indicate preferred location */
217 
218 	if (bo->props.preferred_mem_class == DRM_XE_MEM_REGION_CLASS_SYSMEM) {
219 		try_add_system(xe, bo, bo_flags, &c);
220 		try_add_vram(xe, bo, bo_flags, &c);
221 	} else {
222 		try_add_vram(xe, bo, bo_flags, &c);
223 		try_add_system(xe, bo, bo_flags, &c);
224 	}
225 	try_add_stolen(xe, bo, bo_flags, &c);
226 
227 	if (!c)
228 		return -EINVAL;
229 
230 	bo->placement = (struct ttm_placement) {
231 		.num_placement = c,
232 		.placement = bo->placements,
233 		.num_busy_placement = c,
234 		.busy_placement = bo->placements,
235 	};
236 
237 	return 0;
238 }
239 
240 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
241 			      u32 bo_flags)
242 {
243 	xe_bo_assert_held(bo);
244 	return __xe_bo_placement_for_flags(xe, bo, bo_flags);
245 }
246 
247 static void xe_evict_flags(struct ttm_buffer_object *tbo,
248 			   struct ttm_placement *placement)
249 {
250 	if (!xe_bo_is_xe_bo(tbo)) {
251 		/* Don't handle scatter gather BOs */
252 		if (tbo->type == ttm_bo_type_sg) {
253 			placement->num_placement = 0;
254 			placement->num_busy_placement = 0;
255 			return;
256 		}
257 
258 		*placement = sys_placement;
259 		return;
260 	}
261 
262 	/*
263 	 * For xe, sg bos that are evicted to system just triggers a
264 	 * rebind of the sg list upon subsequent validation to XE_PL_TT.
265 	 */
266 	switch (tbo->resource->mem_type) {
267 	case XE_PL_VRAM0:
268 	case XE_PL_VRAM1:
269 	case XE_PL_STOLEN:
270 		*placement = tt_placement;
271 		break;
272 	case XE_PL_TT:
273 	default:
274 		*placement = sys_placement;
275 		break;
276 	}
277 }
278 
279 struct xe_ttm_tt {
280 	struct ttm_tt ttm;
281 	struct device *dev;
282 	struct sg_table sgt;
283 	struct sg_table *sg;
284 };
285 
286 static int xe_tt_map_sg(struct ttm_tt *tt)
287 {
288 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
289 	unsigned long num_pages = tt->num_pages;
290 	int ret;
291 
292 	XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL);
293 
294 	if (xe_tt->sg)
295 		return 0;
296 
297 	ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
298 						num_pages, 0,
299 						(u64)num_pages << PAGE_SHIFT,
300 						xe_sg_segment_size(xe_tt->dev),
301 						GFP_KERNEL);
302 	if (ret)
303 		return ret;
304 
305 	xe_tt->sg = &xe_tt->sgt;
306 	ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL,
307 			      DMA_ATTR_SKIP_CPU_SYNC);
308 	if (ret) {
309 		sg_free_table(xe_tt->sg);
310 		xe_tt->sg = NULL;
311 		return ret;
312 	}
313 
314 	return 0;
315 }
316 
317 struct sg_table *xe_bo_sg(struct xe_bo *bo)
318 {
319 	struct ttm_tt *tt = bo->ttm.ttm;
320 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
321 
322 	return xe_tt->sg;
323 }
324 
325 static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
326 				       u32 page_flags)
327 {
328 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
329 	struct xe_device *xe = xe_bo_device(bo);
330 	struct xe_ttm_tt *tt;
331 	unsigned long extra_pages;
332 	enum ttm_caching caching;
333 	int err;
334 
335 	tt = kzalloc(sizeof(*tt), GFP_KERNEL);
336 	if (!tt)
337 		return NULL;
338 
339 	tt->dev = xe->drm.dev;
340 
341 	extra_pages = 0;
342 	if (xe_bo_needs_ccs_pages(bo))
343 		extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size),
344 					   PAGE_SIZE);
345 
346 	switch (bo->cpu_caching) {
347 	case DRM_XE_GEM_CPU_CACHING_WC:
348 		caching = ttm_write_combined;
349 		break;
350 	default:
351 		caching = ttm_cached;
352 		break;
353 	}
354 
355 	WARN_ON((bo->flags & XE_BO_CREATE_USER_BIT) && !bo->cpu_caching);
356 
357 	/*
358 	 * Display scanout is always non-coherent with the CPU cache.
359 	 *
360 	 * For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and
361 	 * require a CPU:WC mapping.
362 	 */
363 	if ((!bo->cpu_caching && bo->flags & XE_BO_SCANOUT_BIT) ||
364 	    (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_PAGETABLE))
365 		caching = ttm_write_combined;
366 
367 	err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
368 	if (err) {
369 		kfree(tt);
370 		return NULL;
371 	}
372 
373 	return &tt->ttm;
374 }
375 
376 static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
377 			      struct ttm_operation_ctx *ctx)
378 {
379 	int err;
380 
381 	/*
382 	 * dma-bufs are not populated with pages, and the dma-
383 	 * addresses are set up when moved to XE_PL_TT.
384 	 */
385 	if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
386 		return 0;
387 
388 	err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
389 	if (err)
390 		return err;
391 
392 	/* A follow up may move this xe_bo_move when BO is moved to XE_PL_TT */
393 	err = xe_tt_map_sg(tt);
394 	if (err)
395 		ttm_pool_free(&ttm_dev->pool, tt);
396 
397 	return err;
398 }
399 
400 static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
401 {
402 	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
403 
404 	if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
405 		return;
406 
407 	if (xe_tt->sg) {
408 		dma_unmap_sgtable(xe_tt->dev, xe_tt->sg,
409 				  DMA_BIDIRECTIONAL, 0);
410 		sg_free_table(xe_tt->sg);
411 		xe_tt->sg = NULL;
412 	}
413 
414 	return ttm_pool_free(&ttm_dev->pool, tt);
415 }
416 
417 static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
418 {
419 	ttm_tt_fini(tt);
420 	kfree(tt);
421 }
422 
423 static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
424 				 struct ttm_resource *mem)
425 {
426 	struct xe_device *xe = ttm_to_xe_device(bdev);
427 
428 	switch (mem->mem_type) {
429 	case XE_PL_SYSTEM:
430 	case XE_PL_TT:
431 		return 0;
432 	case XE_PL_VRAM0:
433 	case XE_PL_VRAM1: {
434 		struct xe_ttm_vram_mgr_resource *vres =
435 			to_xe_ttm_vram_mgr_resource(mem);
436 		struct xe_mem_region *vram = res_to_mem_region(mem);
437 
438 		if (vres->used_visible_size < mem->size)
439 			return -EINVAL;
440 
441 		mem->bus.offset = mem->start << PAGE_SHIFT;
442 
443 		if (vram->mapping &&
444 		    mem->placement & TTM_PL_FLAG_CONTIGUOUS)
445 			mem->bus.addr = (u8 __force *)vram->mapping +
446 				mem->bus.offset;
447 
448 		mem->bus.offset += vram->io_start;
449 		mem->bus.is_iomem = true;
450 
451 #if  !defined(CONFIG_X86)
452 		mem->bus.caching = ttm_write_combined;
453 #endif
454 		return 0;
455 	} case XE_PL_STOLEN:
456 		return xe_ttm_stolen_io_mem_reserve(xe, mem);
457 	default:
458 		return -EINVAL;
459 	}
460 }
461 
462 static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
463 				const struct ttm_operation_ctx *ctx)
464 {
465 	struct dma_resv_iter cursor;
466 	struct dma_fence *fence;
467 	struct drm_gem_object *obj = &bo->ttm.base;
468 	struct drm_gpuvm_bo *vm_bo;
469 	bool idle = false;
470 	int ret = 0;
471 
472 	dma_resv_assert_held(bo->ttm.base.resv);
473 
474 	if (!list_empty(&bo->ttm.base.gpuva.list)) {
475 		dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
476 				    DMA_RESV_USAGE_BOOKKEEP);
477 		dma_resv_for_each_fence_unlocked(&cursor, fence)
478 			dma_fence_enable_sw_signaling(fence);
479 		dma_resv_iter_end(&cursor);
480 	}
481 
482 	drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
483 		struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
484 		struct drm_gpuva *gpuva;
485 
486 		if (!xe_vm_in_fault_mode(vm)) {
487 			drm_gpuvm_bo_evict(vm_bo, true);
488 			continue;
489 		}
490 
491 		if (!idle) {
492 			long timeout;
493 
494 			if (ctx->no_wait_gpu &&
495 			    !dma_resv_test_signaled(bo->ttm.base.resv,
496 						    DMA_RESV_USAGE_BOOKKEEP))
497 				return -EBUSY;
498 
499 			timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
500 							DMA_RESV_USAGE_BOOKKEEP,
501 							ctx->interruptible,
502 							MAX_SCHEDULE_TIMEOUT);
503 			if (!timeout)
504 				return -ETIME;
505 			if (timeout < 0)
506 				return timeout;
507 
508 			idle = true;
509 		}
510 
511 		drm_gpuvm_bo_for_each_va(gpuva, vm_bo) {
512 			struct xe_vma *vma = gpuva_to_vma(gpuva);
513 
514 			trace_xe_vma_evict(vma);
515 			ret = xe_vm_invalidate_vma(vma);
516 			if (XE_WARN_ON(ret))
517 				return ret;
518 		}
519 	}
520 
521 	return ret;
522 }
523 
524 /*
525  * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
526  * Note that unmapping the attachment is deferred to the next
527  * map_attachment time, or to bo destroy (after idling) whichever comes first.
528  * This is to avoid syncing before unmap_attachment(), assuming that the
529  * caller relies on idling the reservation object before moving the
530  * backing store out. Should that assumption not hold, then we will be able
531  * to unconditionally call unmap_attachment() when moving out to system.
532  */
533 static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
534 			     struct ttm_resource *new_res)
535 {
536 	struct dma_buf_attachment *attach = ttm_bo->base.import_attach;
537 	struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
538 					       ttm);
539 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
540 	struct sg_table *sg;
541 
542 	xe_assert(xe, attach);
543 	xe_assert(xe, ttm_bo->ttm);
544 
545 	if (new_res->mem_type == XE_PL_SYSTEM)
546 		goto out;
547 
548 	if (ttm_bo->sg) {
549 		dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
550 		ttm_bo->sg = NULL;
551 	}
552 
553 	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
554 	if (IS_ERR(sg))
555 		return PTR_ERR(sg);
556 
557 	ttm_bo->sg = sg;
558 	xe_tt->sg = sg;
559 
560 out:
561 	ttm_bo_move_null(ttm_bo, new_res);
562 
563 	return 0;
564 }
565 
566 /**
567  * xe_bo_move_notify - Notify subsystems of a pending move
568  * @bo: The buffer object
569  * @ctx: The struct ttm_operation_ctx controlling locking and waits.
570  *
571  * This function notifies subsystems of an upcoming buffer move.
572  * Upon receiving such a notification, subsystems should schedule
573  * halting access to the underlying pages and optionally add a fence
574  * to the buffer object's dma_resv object, that signals when access is
575  * stopped. The caller will wait on all dma_resv fences before
576  * starting the move.
577  *
578  * A subsystem may commence access to the object after obtaining
579  * bindings to the new backing memory under the object lock.
580  *
581  * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
582  * negative error code on error.
583  */
584 static int xe_bo_move_notify(struct xe_bo *bo,
585 			     const struct ttm_operation_ctx *ctx)
586 {
587 	struct ttm_buffer_object *ttm_bo = &bo->ttm;
588 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
589 	int ret;
590 
591 	/*
592 	 * If this starts to call into many components, consider
593 	 * using a notification chain here.
594 	 */
595 
596 	if (xe_bo_is_pinned(bo))
597 		return -EINVAL;
598 
599 	xe_bo_vunmap(bo);
600 	ret = xe_bo_trigger_rebind(xe, bo, ctx);
601 	if (ret)
602 		return ret;
603 
604 	/* Don't call move_notify() for imported dma-bufs. */
605 	if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach)
606 		dma_buf_move_notify(ttm_bo->base.dma_buf);
607 
608 	return 0;
609 }
610 
611 static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
612 		      struct ttm_operation_ctx *ctx,
613 		      struct ttm_resource *new_mem,
614 		      struct ttm_place *hop)
615 {
616 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
617 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
618 	struct ttm_resource *old_mem = ttm_bo->resource;
619 	u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
620 	struct ttm_tt *ttm = ttm_bo->ttm;
621 	struct xe_migrate *migrate = NULL;
622 	struct dma_fence *fence;
623 	bool move_lacks_source;
624 	bool tt_has_data;
625 	bool needs_clear;
626 	bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) &&
627 				  ttm && ttm_tt_is_populated(ttm)) ? true : false;
628 	int ret = 0;
629 	/* Bo creation path, moving to system or TT. */
630 	if ((!old_mem && ttm) && !handle_system_ccs) {
631 		ttm_bo_move_null(ttm_bo, new_mem);
632 		return 0;
633 	}
634 
635 	if (ttm_bo->type == ttm_bo_type_sg) {
636 		ret = xe_bo_move_notify(bo, ctx);
637 		if (!ret)
638 			ret = xe_bo_move_dmabuf(ttm_bo, new_mem);
639 		goto out;
640 	}
641 
642 	tt_has_data = ttm && (ttm_tt_is_populated(ttm) ||
643 			      (ttm->page_flags & TTM_TT_FLAG_SWAPPED));
644 
645 	move_lacks_source = handle_system_ccs ? (!bo->ccs_cleared)  :
646 						(!mem_type_is_vram(old_mem_type) && !tt_has_data);
647 
648 	needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
649 		(!ttm && ttm_bo->type == ttm_bo_type_device);
650 
651 	if ((move_lacks_source && !needs_clear)) {
652 		ttm_bo_move_null(ttm_bo, new_mem);
653 		goto out;
654 	}
655 
656 	if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) {
657 		ttm_bo_move_null(ttm_bo, new_mem);
658 		goto out;
659 	}
660 
661 	/*
662 	 * Failed multi-hop where the old_mem is still marked as
663 	 * TTM_PL_FLAG_TEMPORARY, should just be a dummy move.
664 	 */
665 	if (old_mem_type == XE_PL_TT &&
666 	    new_mem->mem_type == XE_PL_TT) {
667 		ttm_bo_move_null(ttm_bo, new_mem);
668 		goto out;
669 	}
670 
671 	if (!move_lacks_source && !xe_bo_is_pinned(bo)) {
672 		ret = xe_bo_move_notify(bo, ctx);
673 		if (ret)
674 			goto out;
675 	}
676 
677 	if (old_mem_type == XE_PL_TT &&
678 	    new_mem->mem_type == XE_PL_SYSTEM) {
679 		long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
680 						     DMA_RESV_USAGE_BOOKKEEP,
681 						     true,
682 						     MAX_SCHEDULE_TIMEOUT);
683 		if (timeout < 0) {
684 			ret = timeout;
685 			goto out;
686 		}
687 
688 		if (!handle_system_ccs) {
689 			ttm_bo_move_null(ttm_bo, new_mem);
690 			goto out;
691 		}
692 	}
693 
694 	if (!move_lacks_source &&
695 	    ((old_mem_type == XE_PL_SYSTEM && resource_is_vram(new_mem)) ||
696 	     (mem_type_is_vram(old_mem_type) &&
697 	      new_mem->mem_type == XE_PL_SYSTEM))) {
698 		hop->fpfn = 0;
699 		hop->lpfn = 0;
700 		hop->mem_type = XE_PL_TT;
701 		hop->flags = TTM_PL_FLAG_TEMPORARY;
702 		ret = -EMULTIHOP;
703 		goto out;
704 	}
705 
706 	if (bo->tile)
707 		migrate = bo->tile->migrate;
708 	else if (resource_is_vram(new_mem))
709 		migrate = mem_type_to_migrate(xe, new_mem->mem_type);
710 	else if (mem_type_is_vram(old_mem_type))
711 		migrate = mem_type_to_migrate(xe, old_mem_type);
712 	else
713 		migrate = xe->tiles[0].migrate;
714 
715 	xe_assert(xe, migrate);
716 
717 	trace_xe_bo_move(bo);
718 	xe_device_mem_access_get(xe);
719 
720 	if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
721 		/*
722 		 * Kernel memory that is pinned should only be moved on suspend
723 		 * / resume, some of the pinned memory is required for the
724 		 * device to resume / use the GPU to move other evicted memory
725 		 * (user memory) around. This likely could be optimized a bit
726 		 * futher where we find the minimum set of pinned memory
727 		 * required for resume but for simplity doing a memcpy for all
728 		 * pinned memory.
729 		 */
730 		ret = xe_bo_vmap(bo);
731 		if (!ret) {
732 			ret = ttm_bo_move_memcpy(ttm_bo, ctx, new_mem);
733 
734 			/* Create a new VMAP once kernel BO back in VRAM */
735 			if (!ret && resource_is_vram(new_mem)) {
736 				struct xe_mem_region *vram = res_to_mem_region(new_mem);
737 				void __iomem *new_addr = vram->mapping +
738 					(new_mem->start << PAGE_SHIFT);
739 
740 				if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
741 					ret = -EINVAL;
742 					xe_device_mem_access_put(xe);
743 					goto out;
744 				}
745 
746 				xe_assert(xe, new_mem->start ==
747 					  bo->placements->fpfn);
748 
749 				iosys_map_set_vaddr_iomem(&bo->vmap, new_addr);
750 			}
751 		}
752 	} else {
753 		if (move_lacks_source)
754 			fence = xe_migrate_clear(migrate, bo, new_mem);
755 		else
756 			fence = xe_migrate_copy(migrate, bo, bo, old_mem,
757 						new_mem, handle_system_ccs);
758 		if (IS_ERR(fence)) {
759 			ret = PTR_ERR(fence);
760 			xe_device_mem_access_put(xe);
761 			goto out;
762 		}
763 		if (!move_lacks_source) {
764 			ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict,
765 							true, new_mem);
766 			if (ret) {
767 				dma_fence_wait(fence, false);
768 				ttm_bo_move_null(ttm_bo, new_mem);
769 				ret = 0;
770 			}
771 		} else {
772 			/*
773 			 * ttm_bo_move_accel_cleanup() may blow up if
774 			 * bo->resource == NULL, so just attach the
775 			 * fence and set the new resource.
776 			 */
777 			dma_resv_add_fence(ttm_bo->base.resv, fence,
778 					   DMA_RESV_USAGE_KERNEL);
779 			ttm_bo_move_null(ttm_bo, new_mem);
780 		}
781 
782 		dma_fence_put(fence);
783 	}
784 
785 	xe_device_mem_access_put(xe);
786 
787 out:
788 	return ret;
789 
790 }
791 
792 /**
793  * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
794  * @bo: The buffer object to move.
795  *
796  * On successful completion, the object memory will be moved to sytem memory.
797  * This function blocks until the object has been fully moved.
798  *
799  * This is needed to for special handling of pinned VRAM object during
800  * suspend-resume.
801  *
802  * Return: 0 on success. Negative error code on failure.
803  */
804 int xe_bo_evict_pinned(struct xe_bo *bo)
805 {
806 	struct ttm_place place = {
807 		.mem_type = XE_PL_TT,
808 	};
809 	struct ttm_placement placement = {
810 		.placement = &place,
811 		.num_placement = 1,
812 	};
813 	struct ttm_operation_ctx ctx = {
814 		.interruptible = false,
815 	};
816 	struct ttm_resource *new_mem;
817 	int ret;
818 
819 	xe_bo_assert_held(bo);
820 
821 	if (WARN_ON(!bo->ttm.resource))
822 		return -EINVAL;
823 
824 	if (WARN_ON(!xe_bo_is_pinned(bo)))
825 		return -EINVAL;
826 
827 	if (WARN_ON(!xe_bo_is_vram(bo)))
828 		return -EINVAL;
829 
830 	ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
831 	if (ret)
832 		return ret;
833 
834 	if (!bo->ttm.ttm) {
835 		bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0);
836 		if (!bo->ttm.ttm) {
837 			ret = -ENOMEM;
838 			goto err_res_free;
839 		}
840 	}
841 
842 	ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
843 	if (ret)
844 		goto err_res_free;
845 
846 	ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
847 	if (ret)
848 		goto err_res_free;
849 
850 	ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
851 	if (ret)
852 		goto err_res_free;
853 
854 	dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
855 			      false, MAX_SCHEDULE_TIMEOUT);
856 
857 	return 0;
858 
859 err_res_free:
860 	ttm_resource_free(&bo->ttm, &new_mem);
861 	return ret;
862 }
863 
864 /**
865  * xe_bo_restore_pinned() - Restore a pinned VRAM object
866  * @bo: The buffer object to move.
867  *
868  * On successful completion, the object memory will be moved back to VRAM.
869  * This function blocks until the object has been fully moved.
870  *
871  * This is needed to for special handling of pinned VRAM object during
872  * suspend-resume.
873  *
874  * Return: 0 on success. Negative error code on failure.
875  */
876 int xe_bo_restore_pinned(struct xe_bo *bo)
877 {
878 	struct ttm_operation_ctx ctx = {
879 		.interruptible = false,
880 	};
881 	struct ttm_resource *new_mem;
882 	int ret;
883 
884 	xe_bo_assert_held(bo);
885 
886 	if (WARN_ON(!bo->ttm.resource))
887 		return -EINVAL;
888 
889 	if (WARN_ON(!xe_bo_is_pinned(bo)))
890 		return -EINVAL;
891 
892 	if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm))
893 		return -EINVAL;
894 
895 	ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
896 	if (ret)
897 		return ret;
898 
899 	ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
900 	if (ret)
901 		goto err_res_free;
902 
903 	ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
904 	if (ret)
905 		goto err_res_free;
906 
907 	ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
908 	if (ret)
909 		goto err_res_free;
910 
911 	dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
912 			      false, MAX_SCHEDULE_TIMEOUT);
913 
914 	return 0;
915 
916 err_res_free:
917 	ttm_resource_free(&bo->ttm, &new_mem);
918 	return ret;
919 }
920 
921 static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
922 				       unsigned long page_offset)
923 {
924 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
925 	struct xe_res_cursor cursor;
926 	struct xe_mem_region *vram;
927 
928 	if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
929 		return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT;
930 
931 	vram = res_to_mem_region(ttm_bo->resource);
932 	xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
933 	return (vram->io_start + cursor.start) >> PAGE_SHIFT;
934 }
935 
936 static void __xe_bo_vunmap(struct xe_bo *bo);
937 
938 /*
939  * TODO: Move this function to TTM so we don't rely on how TTM does its
940  * locking, thereby abusing TTM internals.
941  */
942 static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
943 {
944 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
945 	bool locked;
946 
947 	xe_assert(xe, !kref_read(&ttm_bo->kref));
948 
949 	/*
950 	 * We can typically only race with TTM trylocking under the
951 	 * lru_lock, which will immediately be unlocked again since
952 	 * the ttm_bo refcount is zero at this point. So trylocking *should*
953 	 * always succeed here, as long as we hold the lru lock.
954 	 */
955 	spin_lock(&ttm_bo->bdev->lru_lock);
956 	locked = dma_resv_trylock(ttm_bo->base.resv);
957 	spin_unlock(&ttm_bo->bdev->lru_lock);
958 	xe_assert(xe, locked);
959 
960 	return locked;
961 }
962 
963 static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
964 {
965 	struct dma_resv_iter cursor;
966 	struct dma_fence *fence;
967 	struct dma_fence *replacement = NULL;
968 	struct xe_bo *bo;
969 
970 	if (!xe_bo_is_xe_bo(ttm_bo))
971 		return;
972 
973 	bo = ttm_to_xe_bo(ttm_bo);
974 	xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
975 
976 	/*
977 	 * Corner case where TTM fails to allocate memory and this BOs resv
978 	 * still points the VMs resv
979 	 */
980 	if (ttm_bo->base.resv != &ttm_bo->base._resv)
981 		return;
982 
983 	if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
984 		return;
985 
986 	/*
987 	 * Scrub the preempt fences if any. The unbind fence is already
988 	 * attached to the resv.
989 	 * TODO: Don't do this for external bos once we scrub them after
990 	 * unbind.
991 	 */
992 	dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
993 				DMA_RESV_USAGE_BOOKKEEP, fence) {
994 		if (xe_fence_is_xe_preempt(fence) &&
995 		    !dma_fence_is_signaled(fence)) {
996 			if (!replacement)
997 				replacement = dma_fence_get_stub();
998 
999 			dma_resv_replace_fences(ttm_bo->base.resv,
1000 						fence->context,
1001 						replacement,
1002 						DMA_RESV_USAGE_BOOKKEEP);
1003 		}
1004 	}
1005 	dma_fence_put(replacement);
1006 
1007 	dma_resv_unlock(ttm_bo->base.resv);
1008 }
1009 
1010 static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
1011 {
1012 	if (!xe_bo_is_xe_bo(ttm_bo))
1013 		return;
1014 
1015 	/*
1016 	 * Object is idle and about to be destroyed. Release the
1017 	 * dma-buf attachment.
1018 	 */
1019 	if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
1020 		struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm,
1021 						       struct xe_ttm_tt, ttm);
1022 
1023 		dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg,
1024 					 DMA_BIDIRECTIONAL);
1025 		ttm_bo->sg = NULL;
1026 		xe_tt->sg = NULL;
1027 	}
1028 }
1029 
1030 struct ttm_device_funcs xe_ttm_funcs = {
1031 	.ttm_tt_create = xe_ttm_tt_create,
1032 	.ttm_tt_populate = xe_ttm_tt_populate,
1033 	.ttm_tt_unpopulate = xe_ttm_tt_unpopulate,
1034 	.ttm_tt_destroy = xe_ttm_tt_destroy,
1035 	.evict_flags = xe_evict_flags,
1036 	.move = xe_bo_move,
1037 	.io_mem_reserve = xe_ttm_io_mem_reserve,
1038 	.io_mem_pfn = xe_ttm_io_mem_pfn,
1039 	.release_notify = xe_ttm_bo_release_notify,
1040 	.eviction_valuable = ttm_bo_eviction_valuable,
1041 	.delete_mem_notify = xe_ttm_bo_delete_mem_notify,
1042 };
1043 
1044 static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
1045 {
1046 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1047 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1048 
1049 	if (bo->ttm.base.import_attach)
1050 		drm_prime_gem_destroy(&bo->ttm.base, NULL);
1051 	drm_gem_object_release(&bo->ttm.base);
1052 
1053 	xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
1054 
1055 	if (bo->ggtt_node.size)
1056 		xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
1057 
1058 #ifdef CONFIG_PROC_FS
1059 	if (bo->client)
1060 		xe_drm_client_remove_bo(bo);
1061 #endif
1062 
1063 	if (bo->vm && xe_bo_is_user(bo))
1064 		xe_vm_put(bo->vm);
1065 
1066 	kfree(bo);
1067 }
1068 
1069 static void xe_gem_object_free(struct drm_gem_object *obj)
1070 {
1071 	/* Our BO reference counting scheme works as follows:
1072 	 *
1073 	 * The gem object kref is typically used throughout the driver,
1074 	 * and the gem object holds a ttm_buffer_object refcount, so
1075 	 * that when the last gem object reference is put, which is when
1076 	 * we end up in this function, we put also that ttm_buffer_object
1077 	 * refcount. Anything using gem interfaces is then no longer
1078 	 * allowed to access the object in a way that requires a gem
1079 	 * refcount, including locking the object.
1080 	 *
1081 	 * driver ttm callbacks is allowed to use the ttm_buffer_object
1082 	 * refcount directly if needed.
1083 	 */
1084 	__xe_bo_vunmap(gem_to_xe_bo(obj));
1085 	ttm_bo_put(container_of(obj, struct ttm_buffer_object, base));
1086 }
1087 
1088 static void xe_gem_object_close(struct drm_gem_object *obj,
1089 				struct drm_file *file_priv)
1090 {
1091 	struct xe_bo *bo = gem_to_xe_bo(obj);
1092 
1093 	if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) {
1094 		xe_assert(xe_bo_device(bo), xe_bo_is_user(bo));
1095 
1096 		xe_bo_lock(bo, false);
1097 		ttm_bo_set_bulk_move(&bo->ttm, NULL);
1098 		xe_bo_unlock(bo);
1099 	}
1100 }
1101 
1102 static bool should_migrate_to_system(struct xe_bo *bo)
1103 {
1104 	struct xe_device *xe = xe_bo_device(bo);
1105 
1106 	return xe_device_in_fault_mode(xe) && bo->props.cpu_atomic;
1107 }
1108 
1109 static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
1110 {
1111 	struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
1112 	struct drm_device *ddev = tbo->base.dev;
1113 	vm_fault_t ret;
1114 	int idx, r = 0;
1115 
1116 	ret = ttm_bo_vm_reserve(tbo, vmf);
1117 	if (ret)
1118 		return ret;
1119 
1120 	if (drm_dev_enter(ddev, &idx)) {
1121 		struct xe_bo *bo = ttm_to_xe_bo(tbo);
1122 
1123 		trace_xe_bo_cpu_fault(bo);
1124 
1125 		if (should_migrate_to_system(bo)) {
1126 			r = xe_bo_migrate(bo, XE_PL_TT);
1127 			if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
1128 				ret = VM_FAULT_NOPAGE;
1129 			else if (r)
1130 				ret = VM_FAULT_SIGBUS;
1131 		}
1132 		if (!ret)
1133 			ret = ttm_bo_vm_fault_reserved(vmf,
1134 						       vmf->vma->vm_page_prot,
1135 						       TTM_BO_VM_NUM_PREFAULT);
1136 		drm_dev_exit(idx);
1137 	} else {
1138 		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
1139 	}
1140 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
1141 		return ret;
1142 
1143 	dma_resv_unlock(tbo->base.resv);
1144 	return ret;
1145 }
1146 
1147 static const struct vm_operations_struct xe_gem_vm_ops = {
1148 	.fault = xe_gem_fault,
1149 	.open = ttm_bo_vm_open,
1150 	.close = ttm_bo_vm_close,
1151 	.access = ttm_bo_vm_access
1152 };
1153 
1154 static const struct drm_gem_object_funcs xe_gem_object_funcs = {
1155 	.free = xe_gem_object_free,
1156 	.close = xe_gem_object_close,
1157 	.mmap = drm_gem_ttm_mmap,
1158 	.export = xe_gem_prime_export,
1159 	.vm_ops = &xe_gem_vm_ops,
1160 };
1161 
1162 /**
1163  * xe_bo_alloc - Allocate storage for a struct xe_bo
1164  *
1165  * This funcition is intended to allocate storage to be used for input
1166  * to __xe_bo_create_locked(), in the case a pointer to the bo to be
1167  * created is needed before the call to __xe_bo_create_locked().
1168  * If __xe_bo_create_locked ends up never to be called, then the
1169  * storage allocated with this function needs to be freed using
1170  * xe_bo_free().
1171  *
1172  * Return: A pointer to an uninitialized struct xe_bo on success,
1173  * ERR_PTR(-ENOMEM) on error.
1174  */
1175 struct xe_bo *xe_bo_alloc(void)
1176 {
1177 	struct xe_bo *bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1178 
1179 	if (!bo)
1180 		return ERR_PTR(-ENOMEM);
1181 
1182 	return bo;
1183 }
1184 
1185 /**
1186  * xe_bo_free - Free storage allocated using xe_bo_alloc()
1187  * @bo: The buffer object storage.
1188  *
1189  * Refer to xe_bo_alloc() documentation for valid use-cases.
1190  */
1191 void xe_bo_free(struct xe_bo *bo)
1192 {
1193 	kfree(bo);
1194 }
1195 
1196 struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
1197 				     struct xe_tile *tile, struct dma_resv *resv,
1198 				     struct ttm_lru_bulk_move *bulk, size_t size,
1199 				     u16 cpu_caching, enum ttm_bo_type type,
1200 				     u32 flags)
1201 {
1202 	struct ttm_operation_ctx ctx = {
1203 		.interruptible = true,
1204 		.no_wait_gpu = false,
1205 	};
1206 	struct ttm_placement *placement;
1207 	uint32_t alignment;
1208 	size_t aligned_size;
1209 	int err;
1210 
1211 	/* Only kernel objects should set GT */
1212 	xe_assert(xe, !tile || type == ttm_bo_type_kernel);
1213 
1214 	if (XE_WARN_ON(!size)) {
1215 		xe_bo_free(bo);
1216 		return ERR_PTR(-EINVAL);
1217 	}
1218 
1219 	if (flags & (XE_BO_CREATE_VRAM_MASK | XE_BO_CREATE_STOLEN_BIT) &&
1220 	    !(flags & XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT) &&
1221 	    xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) {
1222 		aligned_size = ALIGN(size, SZ_64K);
1223 		if (type != ttm_bo_type_device)
1224 			size = ALIGN(size, SZ_64K);
1225 		flags |= XE_BO_INTERNAL_64K;
1226 		alignment = SZ_64K >> PAGE_SHIFT;
1227 
1228 	} else {
1229 		aligned_size = ALIGN(size, SZ_4K);
1230 		flags &= ~XE_BO_INTERNAL_64K;
1231 		alignment = SZ_4K >> PAGE_SHIFT;
1232 	}
1233 
1234 	if (type == ttm_bo_type_device && aligned_size != size)
1235 		return ERR_PTR(-EINVAL);
1236 
1237 	if (!bo) {
1238 		bo = xe_bo_alloc();
1239 		if (IS_ERR(bo))
1240 			return bo;
1241 	}
1242 
1243 	bo->ccs_cleared = false;
1244 	bo->tile = tile;
1245 	bo->size = size;
1246 	bo->flags = flags;
1247 	bo->cpu_caching = cpu_caching;
1248 	bo->ttm.base.funcs = &xe_gem_object_funcs;
1249 	bo->props.preferred_mem_class = XE_BO_PROPS_INVALID;
1250 	bo->props.preferred_gt = XE_BO_PROPS_INVALID;
1251 	bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
1252 	bo->ttm.priority = XE_BO_PRIORITY_NORMAL;
1253 	INIT_LIST_HEAD(&bo->pinned_link);
1254 #ifdef CONFIG_PROC_FS
1255 	INIT_LIST_HEAD(&bo->client_link);
1256 #endif
1257 
1258 	drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
1259 
1260 	if (resv) {
1261 		ctx.allow_res_evict = !(flags & XE_BO_CREATE_NO_RESV_EVICT);
1262 		ctx.resv = resv;
1263 	}
1264 
1265 	if (!(flags & XE_BO_FIXED_PLACEMENT_BIT)) {
1266 		err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
1267 		if (WARN_ON(err)) {
1268 			xe_ttm_bo_destroy(&bo->ttm);
1269 			return ERR_PTR(err);
1270 		}
1271 	}
1272 
1273 	/* Defer populating type_sg bos */
1274 	placement = (type == ttm_bo_type_sg ||
1275 		     bo->flags & XE_BO_DEFER_BACKING) ? &sys_placement :
1276 		&bo->placement;
1277 	err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
1278 				   placement, alignment,
1279 				   &ctx, NULL, resv, xe_ttm_bo_destroy);
1280 	if (err)
1281 		return ERR_PTR(err);
1282 
1283 	/*
1284 	 * The VRAM pages underneath are potentially still being accessed by the
1285 	 * GPU, as per async GPU clearing and async evictions. However TTM makes
1286 	 * sure to add any corresponding move/clear fences into the objects
1287 	 * dma-resv using the DMA_RESV_USAGE_KERNEL slot.
1288 	 *
1289 	 * For KMD internal buffers we don't care about GPU clearing, however we
1290 	 * still need to handle async evictions, where the VRAM is still being
1291 	 * accessed by the GPU. Most internal callers are not expecting this,
1292 	 * since they are missing the required synchronisation before accessing
1293 	 * the memory. To keep things simple just sync wait any kernel fences
1294 	 * here, if the buffer is designated KMD internal.
1295 	 *
1296 	 * For normal userspace objects we should already have the required
1297 	 * pipelining or sync waiting elsewhere, since we already have to deal
1298 	 * with things like async GPU clearing.
1299 	 */
1300 	if (type == ttm_bo_type_kernel) {
1301 		long timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
1302 						     DMA_RESV_USAGE_KERNEL,
1303 						     ctx.interruptible,
1304 						     MAX_SCHEDULE_TIMEOUT);
1305 
1306 		if (timeout < 0) {
1307 			if (!resv)
1308 				dma_resv_unlock(bo->ttm.base.resv);
1309 			xe_bo_put(bo);
1310 			return ERR_PTR(timeout);
1311 		}
1312 	}
1313 
1314 	bo->created = true;
1315 	if (bulk)
1316 		ttm_bo_set_bulk_move(&bo->ttm, bulk);
1317 	else
1318 		ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1319 
1320 	return bo;
1321 }
1322 
1323 static int __xe_bo_fixed_placement(struct xe_device *xe,
1324 				   struct xe_bo *bo,
1325 				   u32 flags,
1326 				   u64 start, u64 end, u64 size)
1327 {
1328 	struct ttm_place *place = bo->placements;
1329 
1330 	if (flags & (XE_BO_CREATE_USER_BIT|XE_BO_CREATE_SYSTEM_BIT))
1331 		return -EINVAL;
1332 
1333 	place->flags = TTM_PL_FLAG_CONTIGUOUS;
1334 	place->fpfn = start >> PAGE_SHIFT;
1335 	place->lpfn = end >> PAGE_SHIFT;
1336 
1337 	switch (flags & (XE_BO_CREATE_STOLEN_BIT | XE_BO_CREATE_VRAM_MASK)) {
1338 	case XE_BO_CREATE_VRAM0_BIT:
1339 		place->mem_type = XE_PL_VRAM0;
1340 		break;
1341 	case XE_BO_CREATE_VRAM1_BIT:
1342 		place->mem_type = XE_PL_VRAM1;
1343 		break;
1344 	case XE_BO_CREATE_STOLEN_BIT:
1345 		place->mem_type = XE_PL_STOLEN;
1346 		break;
1347 
1348 	default:
1349 		/* 0 or multiple of the above set */
1350 		return -EINVAL;
1351 	}
1352 
1353 	bo->placement = (struct ttm_placement) {
1354 		.num_placement = 1,
1355 		.placement = place,
1356 		.num_busy_placement = 1,
1357 		.busy_placement = place,
1358 	};
1359 
1360 	return 0;
1361 }
1362 
1363 static struct xe_bo *
1364 __xe_bo_create_locked(struct xe_device *xe,
1365 		      struct xe_tile *tile, struct xe_vm *vm,
1366 		      size_t size, u64 start, u64 end,
1367 		      u16 cpu_caching, enum ttm_bo_type type, u32 flags)
1368 {
1369 	struct xe_bo *bo = NULL;
1370 	int err;
1371 
1372 	if (vm)
1373 		xe_vm_assert_held(vm);
1374 
1375 	if (start || end != ~0ULL) {
1376 		bo = xe_bo_alloc();
1377 		if (IS_ERR(bo))
1378 			return bo;
1379 
1380 		flags |= XE_BO_FIXED_PLACEMENT_BIT;
1381 		err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size);
1382 		if (err) {
1383 			xe_bo_free(bo);
1384 			return ERR_PTR(err);
1385 		}
1386 	}
1387 
1388 	bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
1389 				    vm && !xe_vm_in_fault_mode(vm) &&
1390 				    flags & XE_BO_CREATE_USER_BIT ?
1391 				    &vm->lru_bulk_move : NULL, size,
1392 				    cpu_caching, type, flags);
1393 	if (IS_ERR(bo))
1394 		return bo;
1395 
1396 	/*
1397 	 * Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
1398 	 * to ensure the shared resv doesn't disappear under the bo, the bo
1399 	 * will keep a reference to the vm, and avoid circular references
1400 	 * by having all the vm's bo refereferences released at vm close
1401 	 * time.
1402 	 */
1403 	if (vm && xe_bo_is_user(bo))
1404 		xe_vm_get(vm);
1405 	bo->vm = vm;
1406 
1407 	if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
1408 		if (!tile && flags & XE_BO_CREATE_STOLEN_BIT)
1409 			tile = xe_device_get_root_tile(xe);
1410 
1411 		xe_assert(xe, tile);
1412 
1413 		if (flags & XE_BO_FIXED_PLACEMENT_BIT) {
1414 			err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo,
1415 						   start + bo->size, U64_MAX);
1416 		} else {
1417 			err = xe_ggtt_insert_bo(tile->mem.ggtt, bo);
1418 		}
1419 		if (err)
1420 			goto err_unlock_put_bo;
1421 	}
1422 
1423 	return bo;
1424 
1425 err_unlock_put_bo:
1426 	__xe_bo_unset_bulk_move(bo);
1427 	xe_bo_unlock_vm_held(bo);
1428 	xe_bo_put(bo);
1429 	return ERR_PTR(err);
1430 }
1431 
1432 struct xe_bo *
1433 xe_bo_create_locked_range(struct xe_device *xe,
1434 			  struct xe_tile *tile, struct xe_vm *vm,
1435 			  size_t size, u64 start, u64 end,
1436 			  enum ttm_bo_type type, u32 flags)
1437 {
1438 	return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags);
1439 }
1440 
1441 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
1442 				  struct xe_vm *vm, size_t size,
1443 				  enum ttm_bo_type type, u32 flags)
1444 {
1445 	return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags);
1446 }
1447 
1448 struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
1449 				struct xe_vm *vm, size_t size,
1450 				u16 cpu_caching,
1451 				enum ttm_bo_type type,
1452 				u32 flags)
1453 {
1454 	struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
1455 						 cpu_caching, type,
1456 						 flags | XE_BO_CREATE_USER_BIT);
1457 	if (!IS_ERR(bo))
1458 		xe_bo_unlock_vm_held(bo);
1459 
1460 	return bo;
1461 }
1462 
1463 struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
1464 			   struct xe_vm *vm, size_t size,
1465 			   enum ttm_bo_type type, u32 flags)
1466 {
1467 	struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags);
1468 
1469 	if (!IS_ERR(bo))
1470 		xe_bo_unlock_vm_held(bo);
1471 
1472 	return bo;
1473 }
1474 
1475 struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
1476 				      struct xe_vm *vm,
1477 				      size_t size, u64 offset,
1478 				      enum ttm_bo_type type, u32 flags)
1479 {
1480 	struct xe_bo *bo;
1481 	int err;
1482 	u64 start = offset == ~0ull ? 0 : offset;
1483 	u64 end = offset == ~0ull ? offset : start + size;
1484 
1485 	if (flags & XE_BO_CREATE_STOLEN_BIT &&
1486 	    xe_ttm_stolen_cpu_access_needs_ggtt(xe))
1487 		flags |= XE_BO_CREATE_GGTT_BIT;
1488 
1489 	bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
1490 				       flags | XE_BO_NEEDS_CPU_ACCESS);
1491 	if (IS_ERR(bo))
1492 		return bo;
1493 
1494 	err = xe_bo_pin(bo);
1495 	if (err)
1496 		goto err_put;
1497 
1498 	err = xe_bo_vmap(bo);
1499 	if (err)
1500 		goto err_unpin;
1501 
1502 	xe_bo_unlock_vm_held(bo);
1503 
1504 	return bo;
1505 
1506 err_unpin:
1507 	xe_bo_unpin(bo);
1508 err_put:
1509 	xe_bo_unlock_vm_held(bo);
1510 	xe_bo_put(bo);
1511 	return ERR_PTR(err);
1512 }
1513 
1514 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
1515 				   struct xe_vm *vm, size_t size,
1516 				   enum ttm_bo_type type, u32 flags)
1517 {
1518 	return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags);
1519 }
1520 
1521 struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
1522 				     const void *data, size_t size,
1523 				     enum ttm_bo_type type, u32 flags)
1524 {
1525 	struct xe_bo *bo = xe_bo_create_pin_map(xe, tile, NULL,
1526 						ALIGN(size, PAGE_SIZE),
1527 						type, flags);
1528 	if (IS_ERR(bo))
1529 		return bo;
1530 
1531 	xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
1532 
1533 	return bo;
1534 }
1535 
1536 static void __xe_bo_unpin_map_no_vm(struct drm_device *drm, void *arg)
1537 {
1538 	xe_bo_unpin_map_no_vm(arg);
1539 }
1540 
1541 struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
1542 					   size_t size, u32 flags)
1543 {
1544 	struct xe_bo *bo;
1545 	int ret;
1546 
1547 	bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags);
1548 	if (IS_ERR(bo))
1549 		return bo;
1550 
1551 	ret = drmm_add_action_or_reset(&xe->drm, __xe_bo_unpin_map_no_vm, bo);
1552 	if (ret)
1553 		return ERR_PTR(ret);
1554 
1555 	return bo;
1556 }
1557 
1558 struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
1559 					     const void *data, size_t size, u32 flags)
1560 {
1561 	struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags);
1562 
1563 	if (IS_ERR(bo))
1564 		return bo;
1565 
1566 	xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
1567 
1568 	return bo;
1569 }
1570 
1571 /*
1572  * XXX: This is in the VM bind data path, likely should calculate this once and
1573  * store, with a recalculation if the BO is moved.
1574  */
1575 uint64_t vram_region_gpu_offset(struct ttm_resource *res)
1576 {
1577 	struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
1578 
1579 	if (res->mem_type == XE_PL_STOLEN)
1580 		return xe_ttm_stolen_gpu_offset(xe);
1581 
1582 	return res_to_mem_region(res)->dpa_base;
1583 }
1584 
1585 /**
1586  * xe_bo_pin_external - pin an external BO
1587  * @bo: buffer object to be pinned
1588  *
1589  * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
1590  * BO. Unique call compared to xe_bo_pin as this function has it own set of
1591  * asserts and code to ensure evict / restore on suspend / resume.
1592  *
1593  * Returns 0 for success, negative error code otherwise.
1594  */
1595 int xe_bo_pin_external(struct xe_bo *bo)
1596 {
1597 	struct xe_device *xe = xe_bo_device(bo);
1598 	int err;
1599 
1600 	xe_assert(xe, !bo->vm);
1601 	xe_assert(xe, xe_bo_is_user(bo));
1602 
1603 	if (!xe_bo_is_pinned(bo)) {
1604 		err = xe_bo_validate(bo, NULL, false);
1605 		if (err)
1606 			return err;
1607 
1608 		if (xe_bo_is_vram(bo)) {
1609 			spin_lock(&xe->pinned.lock);
1610 			list_add_tail(&bo->pinned_link,
1611 				      &xe->pinned.external_vram);
1612 			spin_unlock(&xe->pinned.lock);
1613 		}
1614 	}
1615 
1616 	ttm_bo_pin(&bo->ttm);
1617 
1618 	/*
1619 	 * FIXME: If we always use the reserve / unreserve functions for locking
1620 	 * we do not need this.
1621 	 */
1622 	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1623 
1624 	return 0;
1625 }
1626 
1627 int xe_bo_pin(struct xe_bo *bo)
1628 {
1629 	struct xe_device *xe = xe_bo_device(bo);
1630 	int err;
1631 
1632 	/* We currently don't expect user BO to be pinned */
1633 	xe_assert(xe, !xe_bo_is_user(bo));
1634 
1635 	/* Pinned object must be in GGTT or have pinned flag */
1636 	xe_assert(xe, bo->flags & (XE_BO_CREATE_PINNED_BIT |
1637 				   XE_BO_CREATE_GGTT_BIT));
1638 
1639 	/*
1640 	 * No reason we can't support pinning imported dma-bufs we just don't
1641 	 * expect to pin an imported dma-buf.
1642 	 */
1643 	xe_assert(xe, !bo->ttm.base.import_attach);
1644 
1645 	/* We only expect at most 1 pin */
1646 	xe_assert(xe, !xe_bo_is_pinned(bo));
1647 
1648 	err = xe_bo_validate(bo, NULL, false);
1649 	if (err)
1650 		return err;
1651 
1652 	/*
1653 	 * For pinned objects in on DGFX, which are also in vram, we expect
1654 	 * these to be in contiguous VRAM memory. Required eviction / restore
1655 	 * during suspend / resume (force restore to same physical address).
1656 	 */
1657 	if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
1658 	    bo->flags & XE_BO_INTERNAL_TEST)) {
1659 		struct ttm_place *place = &(bo->placements[0]);
1660 
1661 		if (mem_type_is_vram(place->mem_type)) {
1662 			xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
1663 
1664 			place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
1665 				       vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
1666 			place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
1667 
1668 			spin_lock(&xe->pinned.lock);
1669 			list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
1670 			spin_unlock(&xe->pinned.lock);
1671 		}
1672 	}
1673 
1674 	ttm_bo_pin(&bo->ttm);
1675 
1676 	/*
1677 	 * FIXME: If we always use the reserve / unreserve functions for locking
1678 	 * we do not need this.
1679 	 */
1680 	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1681 
1682 	return 0;
1683 }
1684 
1685 /**
1686  * xe_bo_unpin_external - unpin an external BO
1687  * @bo: buffer object to be unpinned
1688  *
1689  * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
1690  * BO. Unique call compared to xe_bo_unpin as this function has it own set of
1691  * asserts and code to ensure evict / restore on suspend / resume.
1692  *
1693  * Returns 0 for success, negative error code otherwise.
1694  */
1695 void xe_bo_unpin_external(struct xe_bo *bo)
1696 {
1697 	struct xe_device *xe = xe_bo_device(bo);
1698 
1699 	xe_assert(xe, !bo->vm);
1700 	xe_assert(xe, xe_bo_is_pinned(bo));
1701 	xe_assert(xe, xe_bo_is_user(bo));
1702 
1703 	if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) {
1704 		spin_lock(&xe->pinned.lock);
1705 		list_del_init(&bo->pinned_link);
1706 		spin_unlock(&xe->pinned.lock);
1707 	}
1708 
1709 	ttm_bo_unpin(&bo->ttm);
1710 
1711 	/*
1712 	 * FIXME: If we always use the reserve / unreserve functions for locking
1713 	 * we do not need this.
1714 	 */
1715 	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1716 }
1717 
1718 void xe_bo_unpin(struct xe_bo *bo)
1719 {
1720 	struct xe_device *xe = xe_bo_device(bo);
1721 
1722 	xe_assert(xe, !bo->ttm.base.import_attach);
1723 	xe_assert(xe, xe_bo_is_pinned(bo));
1724 
1725 	if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
1726 	    bo->flags & XE_BO_INTERNAL_TEST)) {
1727 		struct ttm_place *place = &(bo->placements[0]);
1728 
1729 		if (mem_type_is_vram(place->mem_type)) {
1730 			xe_assert(xe, !list_empty(&bo->pinned_link));
1731 
1732 			spin_lock(&xe->pinned.lock);
1733 			list_del_init(&bo->pinned_link);
1734 			spin_unlock(&xe->pinned.lock);
1735 		}
1736 	}
1737 
1738 	ttm_bo_unpin(&bo->ttm);
1739 }
1740 
1741 /**
1742  * xe_bo_validate() - Make sure the bo is in an allowed placement
1743  * @bo: The bo,
1744  * @vm: Pointer to a the vm the bo shares a locked dma_resv object with, or
1745  *      NULL. Used together with @allow_res_evict.
1746  * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's
1747  *                   reservation object.
1748  *
1749  * Make sure the bo is in allowed placement, migrating it if necessary. If
1750  * needed, other bos will be evicted. If bos selected for eviction shares
1751  * the @vm's reservation object, they can be evicted iff @allow_res_evict is
1752  * set to true, otherwise they will be bypassed.
1753  *
1754  * Return: 0 on success, negative error code on failure. May return
1755  * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
1756  */
1757 int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
1758 {
1759 	struct ttm_operation_ctx ctx = {
1760 		.interruptible = true,
1761 		.no_wait_gpu = false,
1762 	};
1763 
1764 	if (vm) {
1765 		lockdep_assert_held(&vm->lock);
1766 		xe_vm_assert_held(vm);
1767 
1768 		ctx.allow_res_evict = allow_res_evict;
1769 		ctx.resv = xe_vm_resv(vm);
1770 	}
1771 
1772 	return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
1773 }
1774 
1775 bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
1776 {
1777 	if (bo->destroy == &xe_ttm_bo_destroy)
1778 		return true;
1779 
1780 	return false;
1781 }
1782 
1783 /*
1784  * Resolve a BO address. There is no assert to check if the proper lock is held
1785  * so it should only be used in cases where it is not fatal to get the wrong
1786  * address, such as printing debug information, but not in cases where memory is
1787  * written based on this result.
1788  */
1789 dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
1790 {
1791 	struct xe_device *xe = xe_bo_device(bo);
1792 	struct xe_res_cursor cur;
1793 	u64 page;
1794 
1795 	xe_assert(xe, page_size <= PAGE_SIZE);
1796 	page = offset >> PAGE_SHIFT;
1797 	offset &= (PAGE_SIZE - 1);
1798 
1799 	if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
1800 		xe_assert(xe, bo->ttm.ttm);
1801 
1802 		xe_res_first_sg(xe_bo_sg(bo), page << PAGE_SHIFT,
1803 				page_size, &cur);
1804 		return xe_res_dma(&cur) + offset;
1805 	} else {
1806 		struct xe_res_cursor cur;
1807 
1808 		xe_res_first(bo->ttm.resource, page << PAGE_SHIFT,
1809 			     page_size, &cur);
1810 		return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource);
1811 	}
1812 }
1813 
1814 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
1815 {
1816 	if (!READ_ONCE(bo->ttm.pin_count))
1817 		xe_bo_assert_held(bo);
1818 	return __xe_bo_addr(bo, offset, page_size);
1819 }
1820 
1821 int xe_bo_vmap(struct xe_bo *bo)
1822 {
1823 	void *virtual;
1824 	bool is_iomem;
1825 	int ret;
1826 
1827 	xe_bo_assert_held(bo);
1828 
1829 	if (!(bo->flags & XE_BO_NEEDS_CPU_ACCESS))
1830 		return -EINVAL;
1831 
1832 	if (!iosys_map_is_null(&bo->vmap))
1833 		return 0;
1834 
1835 	/*
1836 	 * We use this more or less deprecated interface for now since
1837 	 * ttm_bo_vmap() doesn't offer the optimization of kmapping
1838 	 * single page bos, which is done here.
1839 	 * TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap
1840 	 * to use struct iosys_map.
1841 	 */
1842 	ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap);
1843 	if (ret)
1844 		return ret;
1845 
1846 	virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
1847 	if (is_iomem)
1848 		iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual);
1849 	else
1850 		iosys_map_set_vaddr(&bo->vmap, virtual);
1851 
1852 	return 0;
1853 }
1854 
1855 static void __xe_bo_vunmap(struct xe_bo *bo)
1856 {
1857 	if (!iosys_map_is_null(&bo->vmap)) {
1858 		iosys_map_clear(&bo->vmap);
1859 		ttm_bo_kunmap(&bo->kmap);
1860 	}
1861 }
1862 
1863 void xe_bo_vunmap(struct xe_bo *bo)
1864 {
1865 	xe_bo_assert_held(bo);
1866 	__xe_bo_vunmap(bo);
1867 }
1868 
1869 int xe_gem_create_ioctl(struct drm_device *dev, void *data,
1870 			struct drm_file *file)
1871 {
1872 	struct xe_device *xe = to_xe_device(dev);
1873 	struct xe_file *xef = to_xe_file(file);
1874 	struct drm_xe_gem_create *args = data;
1875 	struct xe_vm *vm = NULL;
1876 	struct xe_bo *bo;
1877 	unsigned int bo_flags;
1878 	u32 handle;
1879 	int err;
1880 
1881 	if (XE_IOCTL_DBG(xe, args->extensions) ||
1882 	    XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
1883 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1884 		return -EINVAL;
1885 
1886 	/* at least one valid memory placement must be specified */
1887 	if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) ||
1888 			 !args->placement))
1889 		return -EINVAL;
1890 
1891 	if (XE_IOCTL_DBG(xe, args->flags &
1892 			 ~(DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING |
1893 			   DRM_XE_GEM_CREATE_FLAG_SCANOUT |
1894 			   DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM)))
1895 		return -EINVAL;
1896 
1897 	if (XE_IOCTL_DBG(xe, args->handle))
1898 		return -EINVAL;
1899 
1900 	if (XE_IOCTL_DBG(xe, !args->size))
1901 		return -EINVAL;
1902 
1903 	if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX))
1904 		return -EINVAL;
1905 
1906 	if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK))
1907 		return -EINVAL;
1908 
1909 	bo_flags = 0;
1910 	if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING)
1911 		bo_flags |= XE_BO_DEFER_BACKING;
1912 
1913 	if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT)
1914 		bo_flags |= XE_BO_SCANOUT_BIT;
1915 
1916 	bo_flags |= args->placement << (ffs(XE_BO_CREATE_SYSTEM_BIT) - 1);
1917 
1918 	if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
1919 		if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_CREATE_VRAM_MASK)))
1920 			return -EINVAL;
1921 
1922 		bo_flags |= XE_BO_NEEDS_CPU_ACCESS;
1923 	}
1924 
1925 	if (XE_IOCTL_DBG(xe, !args->cpu_caching ||
1926 			 args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC))
1927 		return -EINVAL;
1928 
1929 	if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_CREATE_VRAM_MASK &&
1930 			 args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC))
1931 		return -EINVAL;
1932 
1933 	if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_SCANOUT_BIT &&
1934 			 args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB))
1935 		return -EINVAL;
1936 
1937 	if (args->vm_id) {
1938 		vm = xe_vm_lookup(xef, args->vm_id);
1939 		if (XE_IOCTL_DBG(xe, !vm))
1940 			return -ENOENT;
1941 		err = xe_vm_lock(vm, true);
1942 		if (err)
1943 			goto out_vm;
1944 	}
1945 
1946 	bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching,
1947 			       ttm_bo_type_device, bo_flags);
1948 
1949 	if (vm)
1950 		xe_vm_unlock(vm);
1951 
1952 	if (IS_ERR(bo)) {
1953 		err = PTR_ERR(bo);
1954 		goto out_vm;
1955 	}
1956 
1957 	err = drm_gem_handle_create(file, &bo->ttm.base, &handle);
1958 	if (err)
1959 		goto out_bulk;
1960 
1961 	args->handle = handle;
1962 	goto out_put;
1963 
1964 out_bulk:
1965 	if (vm && !xe_vm_in_fault_mode(vm)) {
1966 		xe_vm_lock(vm, false);
1967 		__xe_bo_unset_bulk_move(bo);
1968 		xe_vm_unlock(vm);
1969 	}
1970 out_put:
1971 	xe_bo_put(bo);
1972 out_vm:
1973 	if (vm)
1974 		xe_vm_put(vm);
1975 
1976 	return err;
1977 }
1978 
1979 int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
1980 			     struct drm_file *file)
1981 {
1982 	struct xe_device *xe = to_xe_device(dev);
1983 	struct drm_xe_gem_mmap_offset *args = data;
1984 	struct drm_gem_object *gem_obj;
1985 
1986 	if (XE_IOCTL_DBG(xe, args->extensions) ||
1987 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1988 		return -EINVAL;
1989 
1990 	if (XE_IOCTL_DBG(xe, args->flags))
1991 		return -EINVAL;
1992 
1993 	gem_obj = drm_gem_object_lookup(file, args->handle);
1994 	if (XE_IOCTL_DBG(xe, !gem_obj))
1995 		return -ENOENT;
1996 
1997 	/* The mmap offset was set up at BO allocation time. */
1998 	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
1999 
2000 	xe_bo_put(gem_to_xe_bo(gem_obj));
2001 	return 0;
2002 }
2003 
2004 /**
2005  * xe_bo_lock() - Lock the buffer object's dma_resv object
2006  * @bo: The struct xe_bo whose lock is to be taken
2007  * @intr: Whether to perform any wait interruptible
2008  *
2009  * Locks the buffer object's dma_resv object. If the buffer object is
2010  * pointing to a shared dma_resv object, that shared lock is locked.
2011  *
2012  * Return: 0 on success, -EINTR if @intr is true and the wait for a
2013  * contended lock was interrupted. If @intr is set to false, the
2014  * function always returns 0.
2015  */
2016 int xe_bo_lock(struct xe_bo *bo, bool intr)
2017 {
2018 	if (intr)
2019 		return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL);
2020 
2021 	dma_resv_lock(bo->ttm.base.resv, NULL);
2022 
2023 	return 0;
2024 }
2025 
2026 /**
2027  * xe_bo_unlock() - Unlock the buffer object's dma_resv object
2028  * @bo: The struct xe_bo whose lock is to be released.
2029  *
2030  * Unlock a buffer object lock that was locked by xe_bo_lock().
2031  */
2032 void xe_bo_unlock(struct xe_bo *bo)
2033 {
2034 	dma_resv_unlock(bo->ttm.base.resv);
2035 }
2036 
2037 /**
2038  * xe_bo_can_migrate - Whether a buffer object likely can be migrated
2039  * @bo: The buffer object to migrate
2040  * @mem_type: The TTM memory type intended to migrate to
2041  *
2042  * Check whether the buffer object supports migration to the
2043  * given memory type. Note that pinning may affect the ability to migrate as
2044  * returned by this function.
2045  *
2046  * This function is primarily intended as a helper for checking the
2047  * possibility to migrate buffer objects and can be called without
2048  * the object lock held.
2049  *
2050  * Return: true if migration is possible, false otherwise.
2051  */
2052 bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type)
2053 {
2054 	unsigned int cur_place;
2055 
2056 	if (bo->ttm.type == ttm_bo_type_kernel)
2057 		return true;
2058 
2059 	if (bo->ttm.type == ttm_bo_type_sg)
2060 		return false;
2061 
2062 	for (cur_place = 0; cur_place < bo->placement.num_placement;
2063 	     cur_place++) {
2064 		if (bo->placements[cur_place].mem_type == mem_type)
2065 			return true;
2066 	}
2067 
2068 	return false;
2069 }
2070 
2071 static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
2072 {
2073 	memset(place, 0, sizeof(*place));
2074 	place->mem_type = mem_type;
2075 }
2076 
2077 /**
2078  * xe_bo_migrate - Migrate an object to the desired region id
2079  * @bo: The buffer object to migrate.
2080  * @mem_type: The TTM region type to migrate to.
2081  *
2082  * Attempt to migrate the buffer object to the desired memory region. The
2083  * buffer object may not be pinned, and must be locked.
2084  * On successful completion, the object memory type will be updated,
2085  * but an async migration task may not have completed yet, and to
2086  * accomplish that, the object's kernel fences must be signaled with
2087  * the object lock held.
2088  *
2089  * Return: 0 on success. Negative error code on failure. In particular may
2090  * return -EINTR or -ERESTARTSYS if signal pending.
2091  */
2092 int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
2093 {
2094 	struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
2095 	struct ttm_operation_ctx ctx = {
2096 		.interruptible = true,
2097 		.no_wait_gpu = false,
2098 	};
2099 	struct ttm_placement placement;
2100 	struct ttm_place requested;
2101 
2102 	xe_bo_assert_held(bo);
2103 
2104 	if (bo->ttm.resource->mem_type == mem_type)
2105 		return 0;
2106 
2107 	if (xe_bo_is_pinned(bo))
2108 		return -EBUSY;
2109 
2110 	if (!xe_bo_can_migrate(bo, mem_type))
2111 		return -EINVAL;
2112 
2113 	xe_place_from_ttm_type(mem_type, &requested);
2114 	placement.num_placement = 1;
2115 	placement.num_busy_placement = 1;
2116 	placement.placement = &requested;
2117 	placement.busy_placement = &requested;
2118 
2119 	/*
2120 	 * Stolen needs to be handled like below VRAM handling if we ever need
2121 	 * to support it.
2122 	 */
2123 	drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN);
2124 
2125 	if (mem_type_is_vram(mem_type)) {
2126 		u32 c = 0;
2127 
2128 		add_vram(xe, bo, &requested, bo->flags, mem_type, &c);
2129 	}
2130 
2131 	return ttm_bo_validate(&bo->ttm, &placement, &ctx);
2132 }
2133 
2134 /**
2135  * xe_bo_evict - Evict an object to evict placement
2136  * @bo: The buffer object to migrate.
2137  * @force_alloc: Set force_alloc in ttm_operation_ctx
2138  *
2139  * On successful completion, the object memory will be moved to evict
2140  * placement. Ths function blocks until the object has been fully moved.
2141  *
2142  * Return: 0 on success. Negative error code on failure.
2143  */
2144 int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
2145 {
2146 	struct ttm_operation_ctx ctx = {
2147 		.interruptible = false,
2148 		.no_wait_gpu = false,
2149 		.force_alloc = force_alloc,
2150 	};
2151 	struct ttm_placement placement;
2152 	int ret;
2153 
2154 	xe_evict_flags(&bo->ttm, &placement);
2155 	ret = ttm_bo_validate(&bo->ttm, &placement, &ctx);
2156 	if (ret)
2157 		return ret;
2158 
2159 	dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
2160 			      false, MAX_SCHEDULE_TIMEOUT);
2161 
2162 	return 0;
2163 }
2164 
2165 /**
2166  * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
2167  * placed in system memory.
2168  * @bo: The xe_bo
2169  *
2170  * Return: true if extra pages need to be allocated, false otherwise.
2171  */
2172 bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
2173 {
2174 	struct xe_device *xe = xe_bo_device(bo);
2175 
2176 	if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device)
2177 		return false;
2178 
2179 	/* On discrete GPUs, if the GPU can access this buffer from
2180 	 * system memory (i.e., it allows XE_PL_TT placement), FlatCCS
2181 	 * can't be used since there's no CCS storage associated with
2182 	 * non-VRAM addresses.
2183 	 */
2184 	if (IS_DGFX(xe) && (bo->flags & XE_BO_CREATE_SYSTEM_BIT))
2185 		return false;
2186 
2187 	return true;
2188 }
2189 
2190 /**
2191  * __xe_bo_release_dummy() - Dummy kref release function
2192  * @kref: The embedded struct kref.
2193  *
2194  * Dummy release function for xe_bo_put_deferred(). Keep off.
2195  */
2196 void __xe_bo_release_dummy(struct kref *kref)
2197 {
2198 }
2199 
2200 /**
2201  * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
2202  * @deferred: The lockless list used for the call to xe_bo_put_deferred().
2203  *
2204  * Puts all bos whose put was deferred by xe_bo_put_deferred().
2205  * The @deferred list can be either an onstack local list or a global
2206  * shared list used by a workqueue.
2207  */
2208 void xe_bo_put_commit(struct llist_head *deferred)
2209 {
2210 	struct llist_node *freed;
2211 	struct xe_bo *bo, *next;
2212 
2213 	if (!deferred)
2214 		return;
2215 
2216 	freed = llist_del_all(deferred);
2217 	if (!freed)
2218 		return;
2219 
2220 	llist_for_each_entry_safe(bo, next, freed, freed)
2221 		drm_gem_object_free(&bo->ttm.base.refcount);
2222 }
2223 
2224 /**
2225  * xe_bo_dumb_create - Create a dumb bo as backing for a fb
2226  * @file_priv: ...
2227  * @dev: ...
2228  * @args: ...
2229  *
2230  * See dumb_create() hook in include/drm/drm_drv.h
2231  *
2232  * Return: ...
2233  */
2234 int xe_bo_dumb_create(struct drm_file *file_priv,
2235 		      struct drm_device *dev,
2236 		      struct drm_mode_create_dumb *args)
2237 {
2238 	struct xe_device *xe = to_xe_device(dev);
2239 	struct xe_bo *bo;
2240 	uint32_t handle;
2241 	int cpp = DIV_ROUND_UP(args->bpp, 8);
2242 	int err;
2243 	u32 page_size = max_t(u32, PAGE_SIZE,
2244 		xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K);
2245 
2246 	args->pitch = ALIGN(args->width * cpp, 64);
2247 	args->size = ALIGN(mul_u32_u32(args->pitch, args->height),
2248 			   page_size);
2249 
2250 	bo = xe_bo_create_user(xe, NULL, NULL, args->size,
2251 			       DRM_XE_GEM_CPU_CACHING_WC,
2252 			       ttm_bo_type_device,
2253 			       XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
2254 			       XE_BO_CREATE_USER_BIT | XE_BO_SCANOUT_BIT |
2255 			       XE_BO_NEEDS_CPU_ACCESS);
2256 	if (IS_ERR(bo))
2257 		return PTR_ERR(bo);
2258 
2259 	err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle);
2260 	/* drop reference from allocate - handle holds it now */
2261 	drm_gem_object_put(&bo->ttm.base);
2262 	if (!err)
2263 		args->handle = handle;
2264 	return err;
2265 }
2266 
2267 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
2268 #include "tests/xe_bo.c"
2269 #endif
2270