1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2020-2025 Intel Corporation 4 */ 5 #ifndef __IVPU_GEM_H__ 6 #define __IVPU_GEM_H__ 7 8 #include <drm/drm_gem.h> 9 #include <drm/drm_gem_shmem_helper.h> 10 #include <drm/drm_mm.h> 11 12 struct ivpu_file_priv; 13 14 struct ivpu_bo { 15 struct drm_gem_shmem_object base; 16 struct ivpu_mmu_context *ctx; 17 struct list_head bo_list_node; 18 struct drm_mm_node mm_node; 19 20 u64 vpu_addr; 21 u32 flags; 22 u32 job_status; /* Valid only for command buffer */ 23 u32 ctx_id; 24 bool mmu_mapped; 25 }; 26 27 int ivpu_bo_bind(struct ivpu_bo *bo); 28 void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); 29 30 struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size); 31 struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); 32 struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 33 struct ivpu_addr_range *range, u64 size, u32 flags); 34 struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags); 35 struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags); 36 void ivpu_bo_free(struct ivpu_bo *bo); 37 38 int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); 39 int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file); 40 int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file); 41 int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data, 42 struct drm_file *file); 43 44 void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p); 45 void ivpu_bo_list_print(struct drm_device *dev); 46 47 static inline struct ivpu_bo *to_ivpu_bo(struct drm_gem_object *obj) 48 { 49 return container_of(obj, struct ivpu_bo, base.base); 50 } 51 52 static inline void *ivpu_bo_vaddr(struct ivpu_bo *bo) 53 { 54 return bo->base.vaddr; 55 } 56 57 static inline size_t ivpu_bo_size(struct ivpu_bo *bo) 58 { 59 return bo->base.base.size; 60 } 61 62 static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo) 63 { 64 return bo->flags & DRM_IVPU_BO_CACHE_MASK; 65 } 66 67 static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo) 68 { 69 return to_ivpu_device(bo->base.base.dev); 70 } 71 72 static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo) 73 { 74 if (ivpu_is_force_snoop_enabled(ivpu_bo_to_vdev(bo))) 75 return true; 76 77 return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED; 78 } 79 80 static inline bool ivpu_bo_is_read_only(struct ivpu_bo *bo) 81 { 82 return bo->flags & DRM_IVPU_BO_READ_ONLY; 83 } 84 85 static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr) 86 { 87 if (vpu_addr < bo->vpu_addr) 88 return NULL; 89 90 if (vpu_addr >= (bo->vpu_addr + ivpu_bo_size(bo))) 91 return NULL; 92 93 return ivpu_bo_vaddr(bo) + (vpu_addr - bo->vpu_addr); 94 } 95 96 static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr) 97 { 98 if (cpu_addr < ivpu_bo_vaddr(bo)) 99 return 0; 100 101 if (cpu_addr >= (ivpu_bo_vaddr(bo) + ivpu_bo_size(bo))) 102 return 0; 103 104 return bo->vpu_addr + (cpu_addr - ivpu_bo_vaddr(bo)); 105 } 106 107 static inline bool ivpu_bo_is_mappable(struct ivpu_bo *bo) 108 { 109 return bo->flags & DRM_IVPU_BO_MAPPABLE; 110 } 111 112 #endif /* __IVPU_GEM_H__ */ 113