Lines Matching defs:bo
87 void xe_bo_free(struct xe_bo *bo);
89 struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
127 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
130 static inline struct xe_bo *ttm_to_xe_bo(const struct ttm_buffer_object *bo)
132 return container_of(bo, struct xe_bo, ttm);
140 #define xe_bo_device(bo) ttm_to_xe_device((bo)->ttm.bdev)
142 static inline struct xe_bo *xe_bo_get(struct xe_bo *bo)
144 if (bo)
145 drm_gem_object_get(&bo->ttm.base);
147 return bo;
150 void xe_bo_put(struct xe_bo *bo);
154 * xe bo
155 * @bo: The bo for which we want to obtain a refcount.
157 * There is a short window between where the bo's GEM object refcount reaches
164 * Return: @bo on success, NULL on failure.
166 static inline __must_check struct xe_bo *xe_bo_get_unless_zero(struct xe_bo *bo)
168 if (!bo || !kref_get_unless_zero(&bo->ttm.base.refcount))
171 return bo;
174 static inline void __xe_bo_unset_bulk_move(struct xe_bo *bo)
176 if (bo)
177 ttm_bo_set_bulk_move(&bo->ttm, NULL);
180 static inline void xe_bo_assert_held(struct xe_bo *bo)
182 if (bo)
183 dma_resv_assert_held((bo)->ttm.base.resv);
186 int xe_bo_lock(struct xe_bo *bo, bool intr);
188 void xe_bo_unlock(struct xe_bo *bo);
190 static inline void xe_bo_unlock_vm_held(struct xe_bo *bo)
192 if (bo) {
193 XE_WARN_ON(bo->vm && bo->ttm.base.resv != xe_vm_resv(bo->vm));
194 if (bo->vm)
195 xe_vm_assert_held(bo->vm);
197 dma_resv_unlock(bo->ttm.base.resv);
201 int xe_bo_pin_external(struct xe_bo *bo);
202 int xe_bo_pin(struct xe_bo *bo);
203 void xe_bo_unpin_external(struct xe_bo *bo);
204 void xe_bo_unpin(struct xe_bo *bo);
205 int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict);
207 static inline bool xe_bo_is_pinned(struct xe_bo *bo)
209 return bo->ttm.pin_count;
212 static inline bool xe_bo_is_protected(const struct xe_bo *bo)
214 return bo->pxp_key_instance;
217 static inline void xe_bo_unpin_map_no_vm(struct xe_bo *bo)
219 if (likely(bo)) {
220 xe_bo_lock(bo, false);
221 xe_bo_unpin(bo);
222 xe_bo_unlock(bo);
224 xe_bo_put(bo);
228 bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo);
229 dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
230 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
233 xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
235 return xe_bo_addr(bo, 0, page_size);
240 * @bo: The bo object.
246 static inline size_t xe_bo_size(struct xe_bo *bo)
248 return bo->ttm.base.size;
252 __xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id)
254 struct xe_ggtt_node *ggtt_node = bo->ggtt_node[tile_id];
259 XE_WARN_ON(ggtt_node->base.size > xe_bo_size(bo));
265 xe_bo_ggtt_addr(struct xe_bo *bo)
267 xe_assert(xe_bo_device(bo), bo->tile);
269 return __xe_bo_ggtt_addr(bo, bo->tile->id);
272 int xe_bo_vmap(struct xe_bo *bo);
273 void xe_bo_vunmap(struct xe_bo *bo);
274 int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size);
277 bool xe_bo_is_vram(struct xe_bo *bo);
278 bool xe_bo_is_stolen(struct xe_bo *bo);
279 bool xe_bo_is_stolen_devmem(struct xe_bo *bo);
280 bool xe_bo_is_vm_bound(struct xe_bo *bo);
281 bool xe_bo_has_single_placement(struct xe_bo *bo);
284 bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
286 int xe_bo_migrate(struct xe_bo *bo, u32 mem_type);
287 int xe_bo_evict(struct xe_bo *bo);
289 int xe_bo_evict_pinned(struct xe_bo *bo);
290 int xe_bo_notifier_prepare_pinned(struct xe_bo *bo);
291 int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo);
292 int xe_bo_restore_pinned(struct xe_bo *bo);
294 int xe_bo_dma_unmap_pinned(struct xe_bo *bo);
303 void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo);
309 bool xe_bo_needs_ccs_pages(struct xe_bo *bo);
311 static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo)
313 return PAGE_ALIGN(xe_bo_size(bo));
316 static inline bool xe_bo_has_pages(struct xe_bo *bo)
318 if ((bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) ||
319 xe_bo_is_vram(bo))
329 * @bo: The bo to put.
342 * put the bo iff this function returns true. It's safe to always
347 * Return: true if @bo was the first object put on the @freed list,
351 xe_bo_put_deferred(struct xe_bo *bo, struct llist_head *deferred)
354 xe_bo_put(bo);
358 if (!kref_put(&bo->ttm.base.refcount, __xe_bo_release_dummy))
361 return llist_add(&bo->freed, deferred);
368 * @bo: The bo to put.
373 xe_bo_put_async(struct xe_bo *bo)
375 struct xe_bo_dev *bo_device = &xe_bo_device(bo)->bo_device;
377 if (xe_bo_put_deferred(bo, &bo_device->async_list))
385 struct sg_table *xe_bo_sg(struct xe_bo *bo);
413 * @purge: Only purging allowed. Don't shrink if bo not purgeable.
421 long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
426 * xe_bo_is_mem_type - Whether the bo currently resides in the given
428 * @bo: The bo to check.
431 * Return: true iff the bo resides in @mem_type, false otherwise.
433 static inline bool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type)
435 xe_bo_assert_held(bo);
436 return bo->ttm.resource->mem_type == mem_type;