1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #ifndef __MSM_GEM_H__ 8 #define __MSM_GEM_H__ 9 10 #include <linux/kref.h> 11 #include <linux/dma-resv.h> 12 #include "drm/gpu_scheduler.h" 13 #include "msm_drv.h" 14 15 /* Make all GEM related WARN_ON()s ratelimited.. when things go wrong they 16 * tend to go wrong 1000s of times in a short timespan. 17 */ 18 #define GEM_WARN_ON(x) WARN_RATELIMIT(x, "%s", __stringify(x)) 19 20 /* Additional internal-use only BO flags: */ 21 #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ 22 #define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */ 23 24 struct msm_gem_address_space { 25 const char *name; 26 /* NOTE: mm managed at the page level, size is in # of pages 27 * and position mm_node->start is in # of pages: 28 */ 29 struct drm_mm mm; 30 spinlock_t lock; /* Protects drm_mm node allocation/removal */ 31 struct msm_mmu *mmu; 32 struct kref kref; 33 34 /* For address spaces associated with a specific process, this 35 * will be non-NULL: 36 */ 37 struct pid *pid; 38 }; 39 40 struct msm_gem_vma { 41 struct drm_mm_node node; 42 uint64_t iova; 43 struct msm_gem_address_space *aspace; 44 struct list_head list; /* node in msm_gem_object::vmas */ 45 bool mapped; 46 int inuse; 47 }; 48 49 struct msm_gem_object { 50 struct drm_gem_object base; 51 52 uint32_t flags; 53 54 /** 55 * Advice: are the backing pages purgeable? 56 */ 57 uint8_t madv; 58 59 /** 60 * Is object on inactive_dontneed list (ie. counted in priv->shrinkable_count)? 61 */ 62 bool dontneed : 1; 63 64 /** 65 * Is object evictable (ie. counted in priv->evictable_count)? 66 */ 67 bool evictable : 1; 68 69 /** 70 * count of active vmap'ing 71 */ 72 uint8_t vmap_count; 73 74 /** 75 * Node in list of all objects (mainly for debugfs, protected by 76 * priv->obj_lock 77 */ 78 struct list_head node; 79 80 /** 81 * An object is either: 82 * inactive - on priv->inactive_dontneed or priv->inactive_willneed 83 * (depending on purgeability status) 84 * active - on one one of the gpu's active_list.. well, at 85 * least for now we don't have (I don't think) hw sync between 86 * 2d and 3d one devices which have both, meaning we need to 87 * block on submit if a bo is already on other ring 88 */ 89 struct list_head mm_list; 90 91 struct page **pages; 92 struct sg_table *sgt; 93 void *vaddr; 94 95 struct list_head vmas; /* list of msm_gem_vma */ 96 97 /* For physically contiguous buffers. Used when we don't have 98 * an IOMMU. Also used for stolen/splashscreen buffer. 99 */ 100 struct drm_mm_node *vram_node; 101 102 char name[32]; /* Identifier to print for the debugfs files */ 103 104 int active_count; 105 int pin_count; 106 }; 107 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) 108 109 int msm_gem_mmap_obj(struct drm_gem_object *obj, 110 struct vm_area_struct *vma); 111 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 112 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); 113 int msm_gem_get_iova(struct drm_gem_object *obj, 114 struct msm_gem_address_space *aspace, uint64_t *iova); 115 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 116 struct msm_gem_address_space *aspace, uint64_t *iova, 117 u64 range_start, u64 range_end); 118 int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj, 119 struct msm_gem_address_space *aspace, uint64_t *iova); 120 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 121 struct msm_gem_address_space *aspace, uint64_t *iova); 122 uint64_t msm_gem_iova(struct drm_gem_object *obj, 123 struct msm_gem_address_space *aspace); 124 void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, 125 struct msm_gem_address_space *aspace); 126 void msm_gem_unpin_iova(struct drm_gem_object *obj, 127 struct msm_gem_address_space *aspace); 128 struct page **msm_gem_get_pages(struct drm_gem_object *obj); 129 void msm_gem_put_pages(struct drm_gem_object *obj); 130 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 131 struct drm_mode_create_dumb *args); 132 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 133 uint32_t handle, uint64_t *offset); 134 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj); 135 void *msm_gem_get_vaddr(struct drm_gem_object *obj); 136 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj); 137 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj); 138 void msm_gem_put_vaddr(struct drm_gem_object *obj); 139 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); 140 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu); 141 void msm_gem_active_put(struct drm_gem_object *obj); 142 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); 143 int msm_gem_cpu_fini(struct drm_gem_object *obj); 144 void msm_gem_free_object(struct drm_gem_object *obj); 145 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 146 uint32_t size, uint32_t flags, uint32_t *handle, char *name); 147 struct drm_gem_object *msm_gem_new(struct drm_device *dev, 148 uint32_t size, uint32_t flags); 149 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 150 uint32_t flags, struct msm_gem_address_space *aspace, 151 struct drm_gem_object **bo, uint64_t *iova); 152 void msm_gem_kernel_put(struct drm_gem_object *bo, 153 struct msm_gem_address_space *aspace); 154 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 155 struct dma_buf *dmabuf, struct sg_table *sgt); 156 __printf(2, 3) 157 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); 158 159 #ifdef CONFIG_DEBUG_FS 160 struct msm_gem_stats { 161 struct { 162 unsigned count; 163 size_t size; 164 } all, active, resident, purgeable, purged; 165 }; 166 167 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, 168 struct msm_gem_stats *stats); 169 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); 170 #endif 171 172 static inline void 173 msm_gem_lock(struct drm_gem_object *obj) 174 { 175 dma_resv_lock(obj->resv, NULL); 176 } 177 178 static inline bool __must_check 179 msm_gem_trylock(struct drm_gem_object *obj) 180 { 181 return dma_resv_trylock(obj->resv); 182 } 183 184 static inline int 185 msm_gem_lock_interruptible(struct drm_gem_object *obj) 186 { 187 return dma_resv_lock_interruptible(obj->resv, NULL); 188 } 189 190 static inline void 191 msm_gem_unlock(struct drm_gem_object *obj) 192 { 193 dma_resv_unlock(obj->resv); 194 } 195 196 static inline bool 197 msm_gem_is_locked(struct drm_gem_object *obj) 198 { 199 return dma_resv_is_locked(obj->resv); 200 } 201 202 static inline bool is_active(struct msm_gem_object *msm_obj) 203 { 204 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); 205 return msm_obj->active_count; 206 } 207 208 /* imported/exported objects are not purgeable: */ 209 static inline bool is_unpurgeable(struct msm_gem_object *msm_obj) 210 { 211 return msm_obj->base.import_attach || msm_obj->pin_count; 212 } 213 214 static inline bool is_purgeable(struct msm_gem_object *msm_obj) 215 { 216 return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt && 217 !is_unpurgeable(msm_obj); 218 } 219 220 static inline bool is_vunmapable(struct msm_gem_object *msm_obj) 221 { 222 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); 223 return (msm_obj->vmap_count == 0) && msm_obj->vaddr; 224 } 225 226 static inline void mark_purgeable(struct msm_gem_object *msm_obj) 227 { 228 struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 229 230 GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock)); 231 232 if (is_unpurgeable(msm_obj)) 233 return; 234 235 if (GEM_WARN_ON(msm_obj->dontneed)) 236 return; 237 238 priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT; 239 msm_obj->dontneed = true; 240 } 241 242 static inline void mark_unpurgeable(struct msm_gem_object *msm_obj) 243 { 244 struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 245 246 GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock)); 247 248 if (is_unpurgeable(msm_obj)) 249 return; 250 251 if (GEM_WARN_ON(!msm_obj->dontneed)) 252 return; 253 254 priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT; 255 GEM_WARN_ON(priv->shrinkable_count < 0); 256 msm_obj->dontneed = false; 257 } 258 259 static inline bool is_unevictable(struct msm_gem_object *msm_obj) 260 { 261 return is_unpurgeable(msm_obj) || msm_obj->vaddr; 262 } 263 264 static inline void mark_evictable(struct msm_gem_object *msm_obj) 265 { 266 struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 267 268 WARN_ON(!mutex_is_locked(&priv->mm_lock)); 269 270 if (is_unevictable(msm_obj)) 271 return; 272 273 if (WARN_ON(msm_obj->evictable)) 274 return; 275 276 priv->evictable_count += msm_obj->base.size >> PAGE_SHIFT; 277 msm_obj->evictable = true; 278 } 279 280 static inline void mark_unevictable(struct msm_gem_object *msm_obj) 281 { 282 struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 283 284 WARN_ON(!mutex_is_locked(&priv->mm_lock)); 285 286 if (is_unevictable(msm_obj)) 287 return; 288 289 if (WARN_ON(!msm_obj->evictable)) 290 return; 291 292 priv->evictable_count -= msm_obj->base.size >> PAGE_SHIFT; 293 WARN_ON(priv->evictable_count < 0); 294 msm_obj->evictable = false; 295 } 296 297 void msm_gem_purge(struct drm_gem_object *obj); 298 void msm_gem_evict(struct drm_gem_object *obj); 299 void msm_gem_vunmap(struct drm_gem_object *obj); 300 301 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, 302 * associated with the cmdstream submission for synchronization (and 303 * make it easier to unwind when things go wrong, etc). 304 */ 305 struct msm_gem_submit { 306 struct drm_sched_job base; 307 struct kref ref; 308 struct drm_device *dev; 309 struct msm_gpu *gpu; 310 struct msm_gem_address_space *aspace; 311 struct list_head node; /* node in ring submit list */ 312 struct ww_acquire_ctx ticket; 313 uint32_t seqno; /* Sequence number of the submit on the ring */ 314 315 /* Array of struct dma_fence * to block on before submitting this job. 316 */ 317 struct xarray deps; 318 unsigned long last_dep; 319 320 /* Hw fence, which is created when the scheduler executes the job, and 321 * is signaled when the hw finishes (via seqno write from cmdstream) 322 */ 323 struct dma_fence *hw_fence; 324 325 /* Userspace visible fence, which is signaled by the scheduler after 326 * the hw_fence is signaled. 327 */ 328 struct dma_fence *user_fence; 329 330 int fence_id; /* key into queue->fence_idr */ 331 struct msm_gpu_submitqueue *queue; 332 struct pid *pid; /* submitting process */ 333 bool fault_dumped; /* Limit devcoredump dumping to one per submit */ 334 bool valid; /* true if no cmdstream patching needed */ 335 bool in_rb; /* "sudo" mode, copy cmds into RB */ 336 struct msm_ringbuffer *ring; 337 struct msm_file_private *ctx; 338 unsigned int nr_cmds; 339 unsigned int nr_bos; 340 u32 ident; /* A "identifier" for the submit for logging */ 341 struct { 342 uint32_t type; 343 uint32_t size; /* in dwords */ 344 uint64_t iova; 345 uint32_t offset;/* in dwords */ 346 uint32_t idx; /* cmdstream buffer idx in bos[] */ 347 uint32_t nr_relocs; 348 struct drm_msm_gem_submit_reloc *relocs; 349 } *cmd; /* array of size nr_cmds */ 350 struct { 351 uint32_t flags; 352 union { 353 struct msm_gem_object *obj; 354 uint32_t handle; 355 }; 356 uint64_t iova; 357 } bos[]; 358 }; 359 360 static inline struct msm_gem_submit *to_msm_submit(struct drm_sched_job *job) 361 { 362 return container_of(job, struct msm_gem_submit, base); 363 } 364 365 void __msm_gem_submit_destroy(struct kref *kref); 366 367 static inline void msm_gem_submit_get(struct msm_gem_submit *submit) 368 { 369 kref_get(&submit->ref); 370 } 371 372 static inline void msm_gem_submit_put(struct msm_gem_submit *submit) 373 { 374 kref_put(&submit->ref, __msm_gem_submit_destroy); 375 } 376 377 void msm_submit_retire(struct msm_gem_submit *submit); 378 379 /* helper to determine of a buffer in submit should be dumped, used for both 380 * devcoredump and debugfs cmdstream dumping: 381 */ 382 static inline bool 383 should_dump(struct msm_gem_submit *submit, int idx) 384 { 385 extern bool rd_full; 386 return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP); 387 } 388 389 #endif /* __MSM_GEM_H__ */ 390