xref: /linux/drivers/gpu/drm/xe/xe_bo.h (revision 8cdcef1c2f82d207aa8b2a02298fbc17191c6261)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #ifndef _XE_BO_H_
7 #define _XE_BO_H_
8 
9 #include <drm/ttm/ttm_tt.h>
10 
11 #include "xe_bo_types.h"
12 #include "xe_macros.h"
13 #include "xe_vm_types.h"
14 #include "xe_vm.h"
15 
16 /**
17  * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
18  * @vm: The vm
19  */
20 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
21 
22 
23 
24 #define XE_DEFAULT_GTT_SIZE_MB          3072ULL /* 3GB by default */
25 
26 #define XE_BO_CREATE_USER_BIT		BIT(0)
27 /* The bits below need to be contiguous, or things break */
28 #define XE_BO_CREATE_SYSTEM_BIT		BIT(1)
29 #define XE_BO_CREATE_VRAM0_BIT		BIT(2)
30 #define XE_BO_CREATE_VRAM1_BIT		BIT(3)
31 #define XE_BO_CREATE_VRAM_MASK		(XE_BO_CREATE_VRAM0_BIT | \
32 					 XE_BO_CREATE_VRAM1_BIT)
33 /* -- */
34 #define XE_BO_CREATE_STOLEN_BIT		BIT(4)
35 #define XE_BO_CREATE_VRAM_IF_DGFX(tile) \
36 	(IS_DGFX(tile_to_xe(tile)) ? XE_BO_CREATE_VRAM0_BIT << (tile)->id : \
37 	 XE_BO_CREATE_SYSTEM_BIT)
38 #define XE_BO_CREATE_GGTT_BIT		BIT(5)
39 #define XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT BIT(6)
40 #define XE_BO_CREATE_PINNED_BIT		BIT(7)
41 #define XE_BO_CREATE_NO_RESV_EVICT	BIT(8)
42 #define XE_BO_DEFER_BACKING		BIT(9)
43 #define XE_BO_SCANOUT_BIT		BIT(10)
44 #define XE_BO_FIXED_PLACEMENT_BIT	BIT(11)
45 #define XE_BO_PAGETABLE			BIT(12)
46 #define XE_BO_NEEDS_CPU_ACCESS		BIT(13)
47 /* this one is trigger internally only */
48 #define XE_BO_INTERNAL_TEST		BIT(30)
49 #define XE_BO_INTERNAL_64K		BIT(31)
50 
51 #define XELPG_PPGTT_PTE_PAT3		BIT_ULL(62)
52 #define XE2_PPGTT_PTE_PAT4		BIT_ULL(61)
53 #define XE_PPGTT_PDE_PDPE_PAT2		BIT_ULL(12)
54 #define XE_PPGTT_PTE_PAT2		BIT_ULL(7)
55 #define XE_PPGTT_PTE_PAT1		BIT_ULL(4)
56 #define XE_PPGTT_PTE_PAT0		BIT_ULL(3)
57 
58 #define XE_PTE_SHIFT			12
59 #define XE_PAGE_SIZE			(1 << XE_PTE_SHIFT)
60 #define XE_PTE_MASK			(XE_PAGE_SIZE - 1)
61 #define XE_PDE_SHIFT			(XE_PTE_SHIFT - 3)
62 #define XE_PDES				(1 << XE_PDE_SHIFT)
63 #define XE_PDE_MASK			(XE_PDES - 1)
64 
65 #define XE_64K_PTE_SHIFT		16
66 #define XE_64K_PAGE_SIZE		(1 << XE_64K_PTE_SHIFT)
67 #define XE_64K_PTE_MASK			(XE_64K_PAGE_SIZE - 1)
68 #define XE_64K_PDE_MASK			(XE_PDE_MASK >> 4)
69 
70 #define XE_PDE_PS_2M			BIT_ULL(7)
71 #define XE_PDPE_PS_1G			BIT_ULL(7)
72 #define XE_PDE_IPS_64K			BIT_ULL(11)
73 
74 #define XE_GGTT_PTE_DM			BIT_ULL(1)
75 #define XE_USM_PPGTT_PTE_AE		BIT_ULL(10)
76 #define XE_PPGTT_PTE_DM			BIT_ULL(11)
77 #define XE_PDE_64K			BIT_ULL(6)
78 #define XE_PTE_PS64			BIT_ULL(8)
79 #define XE_PTE_NULL			BIT_ULL(9)
80 
81 #define XE_PAGE_PRESENT			BIT_ULL(0)
82 #define XE_PAGE_RW			BIT_ULL(1)
83 
84 #define XE_PL_SYSTEM		TTM_PL_SYSTEM
85 #define XE_PL_TT		TTM_PL_TT
86 #define XE_PL_VRAM0		TTM_PL_VRAM
87 #define XE_PL_VRAM1		(XE_PL_VRAM0 + 1)
88 #define XE_PL_STOLEN		(TTM_NUM_MEM_TYPES - 1)
89 
90 #define XE_BO_PROPS_INVALID	(-1)
91 
92 struct sg_table;
93 
94 struct xe_bo *xe_bo_alloc(void);
95 void xe_bo_free(struct xe_bo *bo);
96 
97 struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
98 				     struct xe_tile *tile, struct dma_resv *resv,
99 				     struct ttm_lru_bulk_move *bulk, size_t size,
100 				     u16 cpu_caching, enum ttm_bo_type type,
101 				     u32 flags);
102 struct xe_bo *
103 xe_bo_create_locked_range(struct xe_device *xe,
104 			  struct xe_tile *tile, struct xe_vm *vm,
105 			  size_t size, u64 start, u64 end,
106 			  enum ttm_bo_type type, u32 flags);
107 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
108 				  struct xe_vm *vm, size_t size,
109 				  enum ttm_bo_type type, u32 flags);
110 struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
111 			   struct xe_vm *vm, size_t size,
112 			   enum ttm_bo_type type, u32 flags);
113 struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
114 				struct xe_vm *vm, size_t size,
115 				u16 cpu_caching,
116 				enum ttm_bo_type type,
117 				u32 flags);
118 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
119 				   struct xe_vm *vm, size_t size,
120 				   enum ttm_bo_type type, u32 flags);
121 struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
122 				      struct xe_vm *vm, size_t size, u64 offset,
123 				      enum ttm_bo_type type, u32 flags);
124 struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
125 				     const void *data, size_t size,
126 				     enum ttm_bo_type type, u32 flags);
127 
128 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
129 			      u32 bo_flags);
130 
131 static inline struct xe_bo *ttm_to_xe_bo(const struct ttm_buffer_object *bo)
132 {
133 	return container_of(bo, struct xe_bo, ttm);
134 }
135 
136 static inline struct xe_bo *gem_to_xe_bo(const struct drm_gem_object *obj)
137 {
138 	return container_of(obj, struct xe_bo, ttm.base);
139 }
140 
141 #define xe_bo_device(bo) ttm_to_xe_device((bo)->ttm.bdev)
142 
143 static inline struct xe_bo *xe_bo_get(struct xe_bo *bo)
144 {
145 	if (bo)
146 		drm_gem_object_get(&bo->ttm.base);
147 
148 	return bo;
149 }
150 
151 static inline void xe_bo_put(struct xe_bo *bo)
152 {
153 	if (bo)
154 		drm_gem_object_put(&bo->ttm.base);
155 }
156 
157 static inline void __xe_bo_unset_bulk_move(struct xe_bo *bo)
158 {
159 	if (bo)
160 		ttm_bo_set_bulk_move(&bo->ttm, NULL);
161 }
162 
163 static inline void xe_bo_assert_held(struct xe_bo *bo)
164 {
165 	if (bo)
166 		dma_resv_assert_held((bo)->ttm.base.resv);
167 }
168 
169 int xe_bo_lock(struct xe_bo *bo, bool intr);
170 
171 void xe_bo_unlock(struct xe_bo *bo);
172 
173 static inline void xe_bo_unlock_vm_held(struct xe_bo *bo)
174 {
175 	if (bo) {
176 		XE_WARN_ON(bo->vm && bo->ttm.base.resv != xe_vm_resv(bo->vm));
177 		if (bo->vm)
178 			xe_vm_assert_held(bo->vm);
179 		else
180 			dma_resv_unlock(bo->ttm.base.resv);
181 	}
182 }
183 
184 int xe_bo_pin_external(struct xe_bo *bo);
185 int xe_bo_pin(struct xe_bo *bo);
186 void xe_bo_unpin_external(struct xe_bo *bo);
187 void xe_bo_unpin(struct xe_bo *bo);
188 int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict);
189 
190 static inline bool xe_bo_is_pinned(struct xe_bo *bo)
191 {
192 	return bo->ttm.pin_count;
193 }
194 
195 static inline void xe_bo_unpin_map_no_vm(struct xe_bo *bo)
196 {
197 	if (likely(bo)) {
198 		xe_bo_lock(bo, false);
199 		xe_bo_unpin(bo);
200 		xe_bo_unlock(bo);
201 
202 		xe_bo_put(bo);
203 	}
204 }
205 
206 bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo);
207 dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
208 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
209 
210 static inline dma_addr_t
211 xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
212 {
213 	return xe_bo_addr(bo, 0, page_size);
214 }
215 
216 static inline u32
217 xe_bo_ggtt_addr(struct xe_bo *bo)
218 {
219 	XE_WARN_ON(bo->ggtt_node.size > bo->size);
220 	XE_WARN_ON(bo->ggtt_node.start + bo->ggtt_node.size > (1ull << 32));
221 	return bo->ggtt_node.start;
222 }
223 
224 int xe_bo_vmap(struct xe_bo *bo);
225 void xe_bo_vunmap(struct xe_bo *bo);
226 
227 bool mem_type_is_vram(u32 mem_type);
228 bool xe_bo_is_vram(struct xe_bo *bo);
229 bool xe_bo_is_stolen(struct xe_bo *bo);
230 bool xe_bo_is_stolen_devmem(struct xe_bo *bo);
231 uint64_t vram_region_gpu_offset(struct ttm_resource *res);
232 
233 bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
234 
235 int xe_bo_migrate(struct xe_bo *bo, u32 mem_type);
236 int xe_bo_evict(struct xe_bo *bo, bool force_alloc);
237 
238 int xe_bo_evict_pinned(struct xe_bo *bo);
239 int xe_bo_restore_pinned(struct xe_bo *bo);
240 
241 extern struct ttm_device_funcs xe_ttm_funcs;
242 
243 int xe_gem_create_ioctl(struct drm_device *dev, void *data,
244 			struct drm_file *file);
245 int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
246 			     struct drm_file *file);
247 int xe_bo_dumb_create(struct drm_file *file_priv,
248 		      struct drm_device *dev,
249 		      struct drm_mode_create_dumb *args);
250 
251 bool xe_bo_needs_ccs_pages(struct xe_bo *bo);
252 
253 static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo)
254 {
255 	return PAGE_ALIGN(bo->ttm.base.size);
256 }
257 
258 static inline bool xe_bo_has_pages(struct xe_bo *bo)
259 {
260 	if ((bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) ||
261 	    xe_bo_is_vram(bo))
262 		return true;
263 
264 	return false;
265 }
266 
267 void __xe_bo_release_dummy(struct kref *kref);
268 
269 /**
270  * xe_bo_put_deferred() - Put a buffer object with delayed final freeing
271  * @bo: The bo to put.
272  * @deferred: List to which to add the buffer object if we cannot put, or
273  * NULL if the function is to put unconditionally.
274  *
275  * Since the final freeing of an object includes both sleeping and (!)
276  * memory allocation in the dma_resv individualization, it's not ok
277  * to put an object from atomic context nor from within a held lock
278  * tainted by reclaim. In such situations we want to defer the final
279  * freeing until we've exited the restricting context, or in the worst
280  * case to a workqueue.
281  * This function either puts the object if possible without the refcount
282  * reaching zero, or adds it to the @deferred list if that was not possible.
283  * The caller needs to follow up with a call to xe_bo_put_commit() to actually
284  * put the bo iff this function returns true. It's safe to always
285  * follow up with a call to xe_bo_put_commit().
286  * TODO: It's TTM that is the villain here. Perhaps TTM should add an
287  * interface like this.
288  *
289  * Return: true if @bo was the first object put on the @freed list,
290  * false otherwise.
291  */
292 static inline bool
293 xe_bo_put_deferred(struct xe_bo *bo, struct llist_head *deferred)
294 {
295 	if (!deferred) {
296 		xe_bo_put(bo);
297 		return false;
298 	}
299 
300 	if (!kref_put(&bo->ttm.base.refcount, __xe_bo_release_dummy))
301 		return false;
302 
303 	return llist_add(&bo->freed, deferred);
304 }
305 
306 void xe_bo_put_commit(struct llist_head *deferred);
307 
308 struct sg_table *xe_bo_sg(struct xe_bo *bo);
309 
310 /*
311  * xe_sg_segment_size() - Provides upper limit for sg segment size.
312  * @dev: device pointer
313  *
314  * Returns the maximum segment size for the 'struct scatterlist'
315  * elements.
316  */
317 static inline unsigned int xe_sg_segment_size(struct device *dev)
318 {
319 	struct scatterlist __maybe_unused sg;
320 	size_t max = BIT_ULL(sizeof(sg.length) * 8) - 1;
321 
322 	max = min_t(size_t, max, dma_max_mapping_size(dev));
323 
324 	/*
325 	 * The iommu_dma_map_sg() function ensures iova allocation doesn't
326 	 * cross dma segment boundary. It does so by padding some sg elements.
327 	 * This can cause overflow, ending up with sg->length being set to 0.
328 	 * Avoid this by ensuring maximum segment size is half of 'max'
329 	 * rounded down to PAGE_SIZE.
330 	 */
331 	return round_down(max / 2, PAGE_SIZE);
332 }
333 
334 #define i915_gem_object_flush_if_display(obj)		((void)(obj))
335 
336 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
337 /**
338  * xe_bo_is_mem_type - Whether the bo currently resides in the given
339  * TTM memory type
340  * @bo: The bo to check.
341  * @mem_type: The TTM memory type.
342  *
343  * Return: true iff the bo resides in @mem_type, false otherwise.
344  */
345 static inline bool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type)
346 {
347 	xe_bo_assert_held(bo);
348 	return bo->ttm.resource->mem_type == mem_type;
349 }
350 #endif
351 #endif
352