1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2020-2025 Intel Corporation 4 */ 5 #ifndef __IVPU_GEM_H__ 6 #define __IVPU_GEM_H__ 7 8 #include <drm/drm_gem.h> 9 #include <drm/drm_gem_shmem_helper.h> 10 #include <drm/drm_mm.h> 11 12 struct ivpu_file_priv; 13 14 struct ivpu_bo { 15 struct drm_gem_shmem_object base; 16 struct ivpu_mmu_context *ctx; 17 struct list_head bo_list_node; 18 struct drm_mm_node mm_node; 19 20 u64 vpu_addr; 21 u32 flags; 22 u32 job_status; /* Valid only for command buffer */ 23 u32 ctx_id; 24 bool mmu_mapped; 25 }; 26 27 int ivpu_bo_bind(struct ivpu_bo *bo); 28 void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); 29 30 struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size); 31 struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); 32 struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 33 struct ivpu_addr_range *range, u64 size, u32 flags); 34 struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags); 35 struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags); 36 void ivpu_bo_free(struct ivpu_bo *bo); 37 38 int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); 39 int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file); 40 int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file); 41 42 void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p); 43 void ivpu_bo_list_print(struct drm_device *dev); 44 45 static inline struct ivpu_bo *to_ivpu_bo(struct drm_gem_object *obj) 46 { 47 return container_of(obj, struct ivpu_bo, base.base); 48 } 49 50 static inline void *ivpu_bo_vaddr(struct ivpu_bo *bo) 51 { 52 return bo->base.vaddr; 53 } 54 55 static inline size_t ivpu_bo_size(struct ivpu_bo *bo) 56 { 57 return bo->base.base.size; 58 } 59 60 static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo) 61 { 62 return bo->flags & DRM_IVPU_BO_CACHE_MASK; 63 } 64 65 static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo) 66 { 67 return to_ivpu_device(bo->base.base.dev); 68 } 69 70 static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo) 71 { 72 if (ivpu_is_force_snoop_enabled(ivpu_bo_to_vdev(bo))) 73 return true; 74 75 return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED; 76 } 77 78 static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr) 79 { 80 if (vpu_addr < bo->vpu_addr) 81 return NULL; 82 83 if (vpu_addr >= (bo->vpu_addr + ivpu_bo_size(bo))) 84 return NULL; 85 86 return ivpu_bo_vaddr(bo) + (vpu_addr - bo->vpu_addr); 87 } 88 89 static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr) 90 { 91 if (cpu_addr < ivpu_bo_vaddr(bo)) 92 return 0; 93 94 if (cpu_addr >= (ivpu_bo_vaddr(bo) + ivpu_bo_size(bo))) 95 return 0; 96 97 return bo->vpu_addr + (cpu_addr - ivpu_bo_vaddr(bo)); 98 } 99 100 static inline bool ivpu_bo_is_mappable(struct ivpu_bo *bo) 101 { 102 return bo->flags & DRM_IVPU_BO_MAPPABLE; 103 } 104 105 #endif /* __IVPU_GEM_H__ */ 106