xref: /linux/drivers/gpu/drm/xe/xe_bo.h (revision 6916d5703ddf9a38f1f6c2cc793381a24ee914c6)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #ifndef _XE_BO_H_
7 #define _XE_BO_H_
8 
9 #include <drm/ttm/ttm_tt.h>
10 
11 #include "xe_bo_types.h"
12 #include "xe_ggtt.h"
13 #include "xe_macros.h"
14 #include "xe_validation.h"
15 #include "xe_vm_types.h"
16 #include "xe_vm.h"
17 #include "xe_vram_types.h"
18 
19 #define XE_DEFAULT_GTT_SIZE_MB          3072ULL /* 3GB by default */
20 
21 #define XE_BO_FLAG_USER		BIT(0)
22 /* The bits below need to be contiguous, or things break */
23 #define XE_BO_FLAG_SYSTEM		BIT(1)
24 #define XE_BO_FLAG_VRAM0		BIT(2)
25 #define XE_BO_FLAG_VRAM1		BIT(3)
26 #define XE_BO_FLAG_VRAM_MASK		(XE_BO_FLAG_VRAM0 | XE_BO_FLAG_VRAM1)
27 /* -- */
28 #define XE_BO_FLAG_STOLEN		BIT(4)
29 #define XE_BO_FLAG_VRAM(vram)		(XE_BO_FLAG_VRAM0 << ((vram)->id))
30 #define XE_BO_FLAG_VRAM_IF_DGFX(tile)	(IS_DGFX(tile_to_xe(tile)) ? \
31 					 XE_BO_FLAG_VRAM((tile)->mem.vram) : \
32 					 XE_BO_FLAG_SYSTEM)
33 #define XE_BO_FLAG_GGTT			BIT(5)
34 #define XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE BIT(6)
35 #define XE_BO_FLAG_PINNED		BIT(7)
36 #define XE_BO_FLAG_NO_RESV_EVICT	BIT(8)
37 #define XE_BO_FLAG_DEFER_BACKING	BIT(9)
38 #define XE_BO_FLAG_FORCE_WC		BIT(10)
39 #define XE_BO_FLAG_FIXED_PLACEMENT	BIT(11)
40 #define XE_BO_FLAG_PAGETABLE		BIT(12)
41 #define XE_BO_FLAG_NEEDS_CPU_ACCESS	BIT(13)
42 #define XE_BO_FLAG_NEEDS_UC		BIT(14)
43 #define XE_BO_FLAG_NEEDS_64K		BIT(15)
44 #define XE_BO_FLAG_NEEDS_2M		BIT(16)
45 #define XE_BO_FLAG_GGTT_INVALIDATE	BIT(17)
46 #define XE_BO_FLAG_PINNED_NORESTORE	BIT(18)
47 #define XE_BO_FLAG_PINNED_LATE_RESTORE	BIT(19)
48 #define XE_BO_FLAG_GGTT0		BIT(20)
49 #define XE_BO_FLAG_GGTT1		BIT(21)
50 #define XE_BO_FLAG_GGTT2		BIT(22)
51 #define XE_BO_FLAG_GGTT3		BIT(23)
52 #define XE_BO_FLAG_CPU_ADDR_MIRROR	BIT(24)
53 #define XE_BO_FLAG_FORCE_USER_VRAM	BIT(25)
54 #define XE_BO_FLAG_NO_COMPRESSION	BIT(26)
55 
56 /* this one is trigger internally only */
57 #define XE_BO_FLAG_INTERNAL_TEST	BIT(30)
58 #define XE_BO_FLAG_INTERNAL_64K		BIT(31)
59 
60 #define XE_BO_FLAG_GGTT_ALL		(XE_BO_FLAG_GGTT0 | \
61 					 XE_BO_FLAG_GGTT1 | \
62 					 XE_BO_FLAG_GGTT2 | \
63 					 XE_BO_FLAG_GGTT3)
64 
65 #define XE_BO_FLAG_GGTTx(tile) \
66 	(XE_BO_FLAG_GGTT0 << (tile)->id)
67 
68 #define XE_PTE_SHIFT			12
69 #define XE_PAGE_SIZE			(1 << XE_PTE_SHIFT)
70 #define XE_PTE_MASK			(XE_PAGE_SIZE - 1)
71 #define XE_PDE_SHIFT			(XE_PTE_SHIFT - 3)
72 #define XE_PDES				(1 << XE_PDE_SHIFT)
73 #define XE_PDE_MASK			(XE_PDES - 1)
74 
75 #define XE_64K_PTE_SHIFT		16
76 #define XE_64K_PAGE_SIZE		(1 << XE_64K_PTE_SHIFT)
77 #define XE_64K_PTE_MASK			(XE_64K_PAGE_SIZE - 1)
78 #define XE_64K_PDE_MASK			(XE_PDE_MASK >> 4)
79 
80 #define XE_PL_SYSTEM		TTM_PL_SYSTEM
81 #define XE_PL_TT		TTM_PL_TT
82 #define XE_PL_VRAM0		TTM_PL_VRAM
83 #define XE_PL_VRAM1		(XE_PL_VRAM0 + 1)
84 #define XE_PL_STOLEN		(TTM_NUM_MEM_TYPES - 1)
85 
86 #define XE_BO_PROPS_INVALID	(-1)
87 
88 #define XE_PCI_BARRIER_MMAP_OFFSET	(0x50 << XE_PTE_SHIFT)
89 
90 /**
91  * enum xe_madv_purgeable_state - Buffer object purgeable state enumeration
92  *
93  * This enum defines the possible purgeable states for a buffer object,
94  * allowing userspace to provide memory usage hints to the kernel for
95  * better memory management under pressure.
96  *
97  * @XE_MADV_PURGEABLE_WILLNEED: The buffer object is needed and should not be purged.
98  * This is the default state.
99  * @XE_MADV_PURGEABLE_DONTNEED: The buffer object is not currently needed and can be
100  * purged by the kernel under memory pressure.
101  * @XE_MADV_PURGEABLE_PURGED: The buffer object has been purged by the kernel.
102  *
103  * Accessing a purged buffer will result in an error. Per i915 semantics,
104  * once purged, a BO remains permanently invalid and must be destroyed and recreated.
105  */
106 enum xe_madv_purgeable_state {
107 	XE_MADV_PURGEABLE_WILLNEED,
108 	XE_MADV_PURGEABLE_DONTNEED,
109 	XE_MADV_PURGEABLE_PURGED,
110 };
111 
112 struct sg_table;
113 
114 struct xe_bo *xe_bo_alloc(void);
115 void xe_bo_free(struct xe_bo *bo);
116 
117 struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo,
118 				struct xe_tile *tile, struct dma_resv *resv,
119 				struct ttm_lru_bulk_move *bulk, size_t size,
120 				u16 cpu_caching, enum ttm_bo_type type,
121 				u32 flags, struct drm_exec *exec);
122 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
123 				  struct xe_vm *vm, size_t size,
124 				  enum ttm_bo_type type, u32 flags,
125 				  struct drm_exec *exec);
126 struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_vm *vm, size_t size,
127 				u16 cpu_caching, u32 flags, struct drm_exec *exec);
128 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
129 				   struct xe_vm *vm, size_t size,
130 				   enum ttm_bo_type type, u32 flags,
131 				   struct drm_exec *exec);
132 struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile,
133 					size_t size, enum ttm_bo_type type, u32 flags,
134 					bool intr);
135 struct xe_bo *xe_bo_create_pin_range_novm(struct xe_device *xe, struct xe_tile *tile,
136 					  size_t size, u64 start, u64 end,
137 					  enum ttm_bo_type type, u32 flags);
138 struct xe_bo *
139 xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile,
140 			     size_t size, u64 offset, enum ttm_bo_type type,
141 			     u32 flags, u64 alignment, bool intr);
142 struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
143 					   size_t size, u32 flags);
144 void xe_managed_bo_unpin_map_no_vm(struct xe_bo *bo);
145 struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
146 					     const void *data, size_t size, u32 flags);
147 int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src);
148 
149 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
150 			      u32 bo_flags, enum ttm_bo_type type);
151 
ttm_to_xe_bo(const struct ttm_buffer_object * bo)152 static inline struct xe_bo *ttm_to_xe_bo(const struct ttm_buffer_object *bo)
153 {
154 	return container_of(bo, struct xe_bo, ttm);
155 }
156 
gem_to_xe_bo(const struct drm_gem_object * obj)157 static inline struct xe_bo *gem_to_xe_bo(const struct drm_gem_object *obj)
158 {
159 	return container_of(obj, struct xe_bo, ttm.base);
160 }
161 
162 #define xe_bo_device(bo) ttm_to_xe_device((bo)->ttm.bdev)
163 
xe_bo_get(struct xe_bo * bo)164 static inline struct xe_bo *xe_bo_get(struct xe_bo *bo)
165 {
166 	if (bo)
167 		drm_gem_object_get(&bo->ttm.base);
168 
169 	return bo;
170 }
171 
172 void xe_bo_put(struct xe_bo *bo);
173 
174 /*
175  * xe_bo_get_unless_zero() - Conditionally obtain a GEM object refcount on an
176  * xe bo
177  * @bo: The bo for which we want to obtain a refcount.
178  *
179  * There is a short window between where the bo's GEM object refcount reaches
180  * zero and where we put the final ttm_bo reference. Code in the eviction- and
181  * shrinking path should therefore attempt to grab a gem object reference before
182  * trying to use members outside of the base class ttm object. This function is
183  * intended for that purpose. On successful return, this function must be paired
184  * with an xe_bo_put().
185  *
186  * Return: @bo on success, NULL on failure.
187  */
xe_bo_get_unless_zero(struct xe_bo * bo)188 static inline __must_check struct xe_bo *xe_bo_get_unless_zero(struct xe_bo *bo)
189 {
190 	if (!bo || !kref_get_unless_zero(&bo->ttm.base.refcount))
191 		return NULL;
192 
193 	return bo;
194 }
195 
__xe_bo_unset_bulk_move(struct xe_bo * bo)196 static inline void __xe_bo_unset_bulk_move(struct xe_bo *bo)
197 {
198 	if (bo)
199 		ttm_bo_set_bulk_move(&bo->ttm, NULL);
200 }
201 
xe_bo_assert_held(struct xe_bo * bo)202 static inline void xe_bo_assert_held(struct xe_bo *bo)
203 {
204 	if (bo)
205 		dma_resv_assert_held((bo)->ttm.base.resv);
206 }
207 
208 int xe_bo_lock(struct xe_bo *bo, bool intr);
209 
210 void xe_bo_unlock(struct xe_bo *bo);
211 
xe_bo_unlock_vm_held(struct xe_bo * bo)212 static inline void xe_bo_unlock_vm_held(struct xe_bo *bo)
213 {
214 	if (bo) {
215 		XE_WARN_ON(bo->vm && bo->ttm.base.resv != xe_vm_resv(bo->vm));
216 		if (bo->vm)
217 			xe_vm_assert_held(bo->vm);
218 		else
219 			dma_resv_unlock(bo->ttm.base.resv);
220 	}
221 }
222 
223 int xe_bo_pin_external(struct xe_bo *bo, bool in_place, struct drm_exec *exec);
224 int xe_bo_pin(struct xe_bo *bo, struct drm_exec *exec);
225 void xe_bo_unpin_external(struct xe_bo *bo);
226 void xe_bo_unpin(struct xe_bo *bo);
227 int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict,
228 		   struct drm_exec *exec);
229 
xe_bo_is_pinned(struct xe_bo * bo)230 static inline bool xe_bo_is_pinned(struct xe_bo *bo)
231 {
232 	return bo->ttm.pin_count;
233 }
234 
xe_bo_is_protected(const struct xe_bo * bo)235 static inline bool xe_bo_is_protected(const struct xe_bo *bo)
236 {
237 	return bo->pxp_key_instance;
238 }
239 
240 /**
241  * xe_bo_is_purged() - Check if buffer object has been purged
242  * @bo: The buffer object to check
243  *
244  * Checks if the buffer object's backing store has been discarded by the
245  * kernel due to memory pressure after being marked as purgeable (DONTNEED).
246  * Once purged, the BO cannot be restored and any attempt to use it will fail.
247  *
248  * Context: Caller must hold the BO's dma-resv lock
249  * Return: true if the BO has been purged, false otherwise
250  */
xe_bo_is_purged(struct xe_bo * bo)251 static inline bool xe_bo_is_purged(struct xe_bo *bo)
252 {
253 	xe_bo_assert_held(bo);
254 	return bo->purgeable.state == XE_MADV_PURGEABLE_PURGED;
255 }
256 
257 /**
258  * xe_bo_madv_is_dontneed() - Check if BO is marked as DONTNEED
259  * @bo: The buffer object to check
260  *
261  * Checks if userspace has marked this BO as DONTNEED (i.e., its contents
262  * are not currently needed and can be discarded under memory pressure).
263  * This is used internally to decide whether a BO is eligible for purging.
264  *
265  * Context: Caller must hold the BO's dma-resv lock
266  * Return: true if the BO is marked DONTNEED, false otherwise
267  */
xe_bo_madv_is_dontneed(struct xe_bo * bo)268 static inline bool xe_bo_madv_is_dontneed(struct xe_bo *bo)
269 {
270 	xe_bo_assert_held(bo);
271 	return bo->purgeable.state == XE_MADV_PURGEABLE_DONTNEED;
272 }
273 
274 void xe_bo_set_purgeable_state(struct xe_bo *bo, enum xe_madv_purgeable_state new_state);
275 
276 /**
277  * xe_bo_willneed_get_locked() - Acquire a WILLNEED holder on a BO
278  * @bo: Buffer object
279  *
280  * Increments willneed_count and, on a 0->1 transition, promotes the BO
281  * from DONTNEED to WILLNEED. PURGED is terminal and is never modified.
282  *
283  * Caller must hold the BO's dma-resv lock.
284  */
xe_bo_willneed_get_locked(struct xe_bo * bo)285 static inline void xe_bo_willneed_get_locked(struct xe_bo *bo)
286 {
287 	xe_bo_assert_held(bo);
288 
289 	/* Imported BOs are owned externally; do not track purgeability. */
290 	if (drm_gem_is_imported(&bo->ttm.base))
291 		return;
292 
293 	if (bo->purgeable.willneed_count++ == 0 && xe_bo_madv_is_dontneed(bo))
294 		xe_bo_set_purgeable_state(bo, XE_MADV_PURGEABLE_WILLNEED);
295 }
296 
297 /**
298  * xe_bo_willneed_put_locked() - Release a WILLNEED holder on a BO
299  * @bo: Buffer object
300  *
301  * Decrements willneed_count and, on a 1->0 transition, marks the BO
302  * DONTNEED only if it still has VMAs (implying all active VMAs are
303  * DONTNEED). If the last VMA is being removed, preserve the current BO
304  * state to match the previous VMA-walk semantics.
305  *
306  * PURGED is terminal and the BO state is never modified.
307  *
308  * Caller must hold the BO's dma-resv lock.
309  */
xe_bo_willneed_put_locked(struct xe_bo * bo)310 static inline void xe_bo_willneed_put_locked(struct xe_bo *bo)
311 {
312 	xe_bo_assert_held(bo);
313 
314 	if (drm_gem_is_imported(&bo->ttm.base))
315 		return;
316 
317 	xe_assert(xe_bo_device(bo), bo->purgeable.willneed_count > 0);
318 	if (--bo->purgeable.willneed_count == 0 && bo->purgeable.vma_count > 0 &&
319 	    !xe_bo_is_purged(bo))
320 		xe_bo_set_purgeable_state(bo, XE_MADV_PURGEABLE_DONTNEED);
321 }
322 
323 /**
324  * xe_bo_vma_count_inc_locked() - Account a new VMA on a BO
325  * @bo: Buffer object
326  *
327  * Increments vma_count.
328  *
329  * Caller must hold the BO's dma-resv lock.
330  */
xe_bo_vma_count_inc_locked(struct xe_bo * bo)331 static inline void xe_bo_vma_count_inc_locked(struct xe_bo *bo)
332 {
333 	xe_bo_assert_held(bo);
334 
335 	if (drm_gem_is_imported(&bo->ttm.base))
336 		return;
337 
338 	bo->purgeable.vma_count++;
339 }
340 
341 /**
342  * xe_bo_vma_count_dec_locked() - Account a VMA removal on a BO
343  * @bo: Buffer object
344  *
345  * Decrements vma_count.
346  *
347  * Caller must hold the BO's dma-resv lock.
348  */
xe_bo_vma_count_dec_locked(struct xe_bo * bo)349 static inline void xe_bo_vma_count_dec_locked(struct xe_bo *bo)
350 {
351 	xe_bo_assert_held(bo);
352 
353 	if (drm_gem_is_imported(&bo->ttm.base))
354 		return;
355 
356 	xe_assert(xe_bo_device(bo), bo->purgeable.vma_count > 0);
357 	bo->purgeable.vma_count--;
358 }
359 
xe_bo_unpin_map_no_vm(struct xe_bo * bo)360 static inline void xe_bo_unpin_map_no_vm(struct xe_bo *bo)
361 {
362 	if (likely(bo)) {
363 		xe_bo_lock(bo, false);
364 		xe_bo_unpin(bo);
365 		xe_bo_unlock(bo);
366 
367 		xe_bo_put(bo);
368 	}
369 }
370 
371 bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo);
372 dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
373 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
374 
375 static inline dma_addr_t
xe_bo_main_addr(struct xe_bo * bo,size_t page_size)376 xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
377 {
378 	return xe_bo_addr(bo, 0, page_size);
379 }
380 
381 /**
382  * xe_bo_size() - Xe BO size
383  * @bo: The bo object.
384  *
385  * Simple helper to return Xe BO's size.
386  *
387  * Return: Xe BO's size
388  */
xe_bo_size(struct xe_bo * bo)389 static inline size_t xe_bo_size(struct xe_bo *bo)
390 {
391 	return bo->ttm.base.size;
392 }
393 
394 static inline u32
__xe_bo_ggtt_addr(struct xe_bo * bo,u8 tile_id)395 __xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id)
396 {
397 	struct xe_ggtt_node *ggtt_node = bo->ggtt_node[tile_id];
398 	u64 offset;
399 
400 	if (XE_WARN_ON(!ggtt_node))
401 		return 0;
402 
403 	offset = xe_ggtt_node_addr(ggtt_node);
404 	XE_WARN_ON(offset + xe_bo_size(bo) > (1ull << 32));
405 	return offset;
406 }
407 
408 static inline u32
xe_bo_ggtt_addr(struct xe_bo * bo)409 xe_bo_ggtt_addr(struct xe_bo *bo)
410 {
411 	xe_assert(xe_bo_device(bo), bo->tile);
412 
413 	return __xe_bo_ggtt_addr(bo, bo->tile->id);
414 }
415 
416 int xe_bo_vmap(struct xe_bo *bo);
417 void xe_bo_vunmap(struct xe_bo *bo);
418 int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size);
419 
420 bool mem_type_is_vram(u32 mem_type);
421 bool xe_bo_is_vram(struct xe_bo *bo);
422 bool xe_bo_is_visible_vram(struct xe_bo *bo);
423 bool xe_bo_is_stolen(struct xe_bo *bo);
424 bool xe_bo_is_stolen_devmem(struct xe_bo *bo);
425 bool xe_bo_is_vm_bound(struct xe_bo *bo);
426 bool xe_bo_has_single_placement(struct xe_bo *bo);
427 uint64_t vram_region_gpu_offset(struct ttm_resource *res);
428 
429 bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
430 
431 int xe_bo_migrate(struct xe_bo *bo, u32 mem_type, struct ttm_operation_ctx *ctc,
432 		  struct drm_exec *exec);
433 int xe_bo_evict(struct xe_bo *bo, struct drm_exec *exec);
434 
435 int xe_bo_evict_pinned(struct xe_bo *bo);
436 int xe_bo_notifier_prepare_pinned(struct xe_bo *bo);
437 int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo);
438 int xe_bo_restore_pinned(struct xe_bo *bo);
439 
440 int xe_bo_dma_unmap_pinned(struct xe_bo *bo);
441 
442 extern const struct ttm_device_funcs xe_ttm_funcs;
443 extern const char *const xe_mem_type_to_name[];
444 
445 int xe_gem_create_ioctl(struct drm_device *dev, void *data,
446 			struct drm_file *file);
447 int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
448 			     struct drm_file *file);
449 void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo);
450 
451 int xe_bo_dumb_create(struct drm_file *file_priv,
452 		      struct drm_device *dev,
453 		      struct drm_mode_create_dumb *args);
454 
455 bool xe_bo_needs_ccs_pages(struct xe_bo *bo);
456 
457 int xe_bo_decompress(struct xe_bo *bo);
458 
xe_bo_ccs_pages_start(struct xe_bo * bo)459 static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo)
460 {
461 	return PAGE_ALIGN(xe_bo_size(bo));
462 }
463 
464 /**
465  * xe_bo_has_valid_ccs_bb - Check if CCS's BBs were setup for the BO.
466  * @bo: the &xe_bo to check
467  *
468  * The CCS's BBs should only be setup by the driver VF, but it is safe
469  * to call this function also by non-VF driver.
470  *
471  * Return: true iff the CCS's BBs are setup, false otherwise.
472  */
xe_bo_has_valid_ccs_bb(struct xe_bo * bo)473 static inline bool xe_bo_has_valid_ccs_bb(struct xe_bo *bo)
474 {
475 	return bo->bb_ccs[XE_SRIOV_VF_CCS_READ_CTX] &&
476 	       bo->bb_ccs[XE_SRIOV_VF_CCS_WRITE_CTX];
477 }
478 
xe_bo_has_pages(struct xe_bo * bo)479 static inline bool xe_bo_has_pages(struct xe_bo *bo)
480 {
481 	if ((bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) ||
482 	    xe_bo_is_vram(bo))
483 		return true;
484 
485 	return false;
486 }
487 
488 void __xe_bo_release_dummy(struct kref *kref);
489 
490 /**
491  * xe_bo_put_deferred() - Put a buffer object with delayed final freeing
492  * @bo: The bo to put.
493  * @deferred: List to which to add the buffer object if we cannot put, or
494  * NULL if the function is to put unconditionally.
495  *
496  * Since the final freeing of an object includes both sleeping and (!)
497  * memory allocation in the dma_resv individualization, it's not ok
498  * to put an object from atomic context nor from within a held lock
499  * tainted by reclaim. In such situations we want to defer the final
500  * freeing until we've exited the restricting context, or in the worst
501  * case to a workqueue.
502  * This function either puts the object if possible without the refcount
503  * reaching zero, or adds it to the @deferred list if that was not possible.
504  * The caller needs to follow up with a call to xe_bo_put_commit() to actually
505  * put the bo iff this function returns true. It's safe to always
506  * follow up with a call to xe_bo_put_commit().
507  * TODO: It's TTM that is the villain here. Perhaps TTM should add an
508  * interface like this.
509  *
510  * Return: true if @bo was the first object put on the @freed list,
511  * false otherwise.
512  */
513 static inline bool
xe_bo_put_deferred(struct xe_bo * bo,struct llist_head * deferred)514 xe_bo_put_deferred(struct xe_bo *bo, struct llist_head *deferred)
515 {
516 	if (!deferred) {
517 		xe_bo_put(bo);
518 		return false;
519 	}
520 
521 	if (!kref_put(&bo->ttm.base.refcount, __xe_bo_release_dummy))
522 		return false;
523 
524 	return llist_add(&bo->freed, deferred);
525 }
526 
527 void xe_bo_put_commit(struct llist_head *deferred);
528 
529 /**
530  * xe_bo_put_async() - Put BO async
531  * @bo: The bo to put.
532  *
533  * Put BO async, the final put is deferred to a worker to exit an IRQ context.
534  */
535 static inline void
xe_bo_put_async(struct xe_bo * bo)536 xe_bo_put_async(struct xe_bo *bo)
537 {
538 	struct xe_bo_dev *bo_device = &xe_bo_device(bo)->bo_device;
539 
540 	if (xe_bo_put_deferred(bo, &bo_device->async_list))
541 		schedule_work(&bo_device->async_free);
542 }
543 
544 void xe_bo_dev_init(struct xe_bo_dev *bo_device);
545 
546 void xe_bo_dev_fini(struct xe_bo_dev *bo_device);
547 
548 struct sg_table *xe_bo_sg(struct xe_bo *bo);
549 
550 /*
551  * xe_sg_segment_size() - Provides upper limit for sg segment size.
552  * @dev: device pointer
553  *
554  * Returns the maximum segment size for the 'struct scatterlist'
555  * elements.
556  */
xe_sg_segment_size(struct device * dev)557 static inline unsigned int xe_sg_segment_size(struct device *dev)
558 {
559 	struct scatterlist __maybe_unused sg;
560 	size_t max = BIT_ULL(sizeof(sg.length) * 8) - 1;
561 
562 	max = min_t(size_t, max, dma_max_mapping_size(dev));
563 
564 	/*
565 	 * The iommu_dma_map_sg() function ensures iova allocation doesn't
566 	 * cross dma segment boundary. It does so by padding some sg elements.
567 	 * This can cause overflow, ending up with sg->length being set to 0.
568 	 * Avoid this by ensuring maximum segment size is half of 'max'
569 	 * rounded down to PAGE_SIZE.
570 	 */
571 	return round_down(max / 2, PAGE_SIZE);
572 }
573 
574 /**
575  * struct xe_bo_shrink_flags - flags governing the shrink behaviour.
576  * @purge: Only purging allowed. Don't shrink if bo not purgeable.
577  * @writeback: Attempt to immediately move content to swap.
578  */
579 struct xe_bo_shrink_flags {
580 	u32 purge : 1;
581 	u32 writeback : 1;
582 };
583 
584 long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
585 		  const struct xe_bo_shrink_flags flags,
586 		  unsigned long *scanned);
587 
588 /**
589  * xe_bo_is_mem_type - Whether the bo currently resides in the given
590  * TTM memory type
591  * @bo: The bo to check.
592  * @mem_type: The TTM memory type.
593  *
594  * Return: true iff the bo resides in @mem_type, false otherwise.
595  */
xe_bo_is_mem_type(struct xe_bo * bo,u32 mem_type)596 static inline bool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type)
597 {
598 	xe_bo_assert_held(bo);
599 	return bo->ttm.resource->mem_type == mem_type;
600 }
601 #endif
602