1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #ifndef __I915_GEM_OBJECT_H__ 8 #define __I915_GEM_OBJECT_H__ 9 10 #include <drm/drm_gem.h> 11 #include <drm/drm_file.h> 12 #include <drm/drm_device.h> 13 14 #include <drm/i915_drm.h> 15 16 #include "display/intel_frontbuffer.h" 17 #include "i915_gem_object_types.h" 18 #include "i915_gem_gtt.h" 19 #include "i915_vma_types.h" 20 21 void i915_gem_init__objects(struct drm_i915_private *i915); 22 23 struct drm_i915_gem_object *i915_gem_object_alloc(void); 24 void i915_gem_object_free(struct drm_i915_gem_object *obj); 25 26 void i915_gem_object_init(struct drm_i915_gem_object *obj, 27 const struct drm_i915_gem_object_ops *ops, 28 struct lock_class_key *key); 29 struct drm_i915_gem_object * 30 i915_gem_object_create_shmem(struct drm_i915_private *i915, 31 resource_size_t size); 32 struct drm_i915_gem_object * 33 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, 34 const void *data, resource_size_t size); 35 36 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops; 37 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 38 struct sg_table *pages, 39 bool needs_clflush); 40 41 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); 42 43 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); 44 void i915_gem_free_object(struct drm_gem_object *obj); 45 46 void i915_gem_flush_free_objects(struct drm_i915_private *i915); 47 48 struct sg_table * 49 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); 50 void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 51 52 /** 53 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle 54 * @filp: DRM file private date 55 * @handle: userspace handle 56 * 57 * Returns: 58 * 59 * A pointer to the object named by the handle if such exists on @filp, NULL 60 * otherwise. This object is only valid whilst under the RCU read lock, and 61 * note carefully the object may be in the process of being destroyed. 62 */ 63 static inline struct drm_i915_gem_object * 64 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) 65 { 66 #ifdef CONFIG_LOCKDEP 67 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); 68 #endif 69 return idr_find(&file->object_idr, handle); 70 } 71 72 static inline struct drm_i915_gem_object * 73 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj) 74 { 75 if (obj && !kref_get_unless_zero(&obj->base.refcount)) 76 obj = NULL; 77 78 return obj; 79 } 80 81 static inline struct drm_i915_gem_object * 82 i915_gem_object_lookup(struct drm_file *file, u32 handle) 83 { 84 struct drm_i915_gem_object *obj; 85 86 rcu_read_lock(); 87 obj = i915_gem_object_lookup_rcu(file, handle); 88 obj = i915_gem_object_get_rcu(obj); 89 rcu_read_unlock(); 90 91 return obj; 92 } 93 94 __deprecated 95 struct drm_gem_object * 96 drm_gem_object_lookup(struct drm_file *file, u32 handle); 97 98 __attribute__((nonnull)) 99 static inline struct drm_i915_gem_object * 100 i915_gem_object_get(struct drm_i915_gem_object *obj) 101 { 102 drm_gem_object_get(&obj->base); 103 return obj; 104 } 105 106 __attribute__((nonnull)) 107 static inline void 108 i915_gem_object_put(struct drm_i915_gem_object *obj) 109 { 110 __drm_gem_object_put(&obj->base); 111 } 112 113 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv) 114 115 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) 116 { 117 dma_resv_lock(obj->base.resv, NULL); 118 } 119 120 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj) 121 { 122 return dma_resv_trylock(obj->base.resv); 123 } 124 125 static inline int 126 i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj) 127 { 128 return dma_resv_lock_interruptible(obj->base.resv, NULL); 129 } 130 131 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) 132 { 133 dma_resv_unlock(obj->base.resv); 134 } 135 136 struct dma_fence * 137 i915_gem_object_lock_fence(struct drm_i915_gem_object *obj); 138 void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj, 139 struct dma_fence *fence); 140 141 static inline void 142 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) 143 { 144 obj->flags |= I915_BO_READONLY; 145 } 146 147 static inline bool 148 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) 149 { 150 return obj->flags & I915_BO_READONLY; 151 } 152 153 static inline bool 154 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj) 155 { 156 return obj->flags & I915_BO_ALLOC_CONTIGUOUS; 157 } 158 159 static inline bool 160 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj) 161 { 162 return obj->flags & I915_BO_ALLOC_VOLATILE; 163 } 164 165 static inline void 166 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj) 167 { 168 obj->flags |= I915_BO_ALLOC_VOLATILE; 169 } 170 171 static inline bool 172 i915_gem_object_type_has(const struct drm_i915_gem_object *obj, 173 unsigned long flags) 174 { 175 return obj->ops->flags & flags; 176 } 177 178 static inline bool 179 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) 180 { 181 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE); 182 } 183 184 static inline bool 185 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) 186 { 187 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE); 188 } 189 190 static inline bool 191 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) 192 { 193 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY); 194 } 195 196 static inline bool 197 i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj) 198 { 199 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT); 200 } 201 202 static inline bool 203 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) 204 { 205 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL); 206 } 207 208 static inline bool 209 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) 210 { 211 return READ_ONCE(obj->frontbuffer); 212 } 213 214 static inline unsigned int 215 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj) 216 { 217 return obj->tiling_and_stride & TILING_MASK; 218 } 219 220 static inline bool 221 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj) 222 { 223 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; 224 } 225 226 static inline unsigned int 227 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj) 228 { 229 return obj->tiling_and_stride & STRIDE_MASK; 230 } 231 232 static inline unsigned int 233 i915_gem_tile_height(unsigned int tiling) 234 { 235 GEM_BUG_ON(!tiling); 236 return tiling == I915_TILING_Y ? 32 : 8; 237 } 238 239 static inline unsigned int 240 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj) 241 { 242 return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); 243 } 244 245 static inline unsigned int 246 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) 247 { 248 return (i915_gem_object_get_stride(obj) * 249 i915_gem_object_get_tile_height(obj)); 250 } 251 252 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, 253 unsigned int tiling, unsigned int stride); 254 255 struct scatterlist * 256 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 257 unsigned int n, unsigned int *offset); 258 259 struct page * 260 i915_gem_object_get_page(struct drm_i915_gem_object *obj, 261 unsigned int n); 262 263 struct page * 264 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 265 unsigned int n); 266 267 dma_addr_t 268 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, 269 unsigned long n, 270 unsigned int *len); 271 272 dma_addr_t 273 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 274 unsigned long n); 275 276 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 277 struct sg_table *pages, 278 unsigned int sg_page_sizes); 279 280 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 281 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 282 283 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */ 284 I915_MM_NORMAL = 0, 285 /* 286 * Only used by struct_mutex, when called "recursively" from 287 * direct-reclaim-esque. Safe because there is only every one 288 * struct_mutex in the entire system. 289 */ 290 I915_MM_SHRINKER = 1, 291 /* 292 * Used for obj->mm.lock when allocating pages. Safe because the object 293 * isn't yet on any LRU, and therefore the shrinker can't deadlock on 294 * it. As soon as the object has pages, obj->mm.lock nests within 295 * fs_reclaim. 296 */ 297 I915_MM_GET_PAGES = 1, 298 }; 299 300 static inline int __must_check 301 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 302 { 303 might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES); 304 305 if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) 306 return 0; 307 308 return __i915_gem_object_get_pages(obj); 309 } 310 311 static inline bool 312 i915_gem_object_has_pages(struct drm_i915_gem_object *obj) 313 { 314 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)); 315 } 316 317 static inline void 318 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 319 { 320 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 321 322 atomic_inc(&obj->mm.pages_pin_count); 323 } 324 325 static inline bool 326 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) 327 { 328 return atomic_read(&obj->mm.pages_pin_count); 329 } 330 331 static inline void 332 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 333 { 334 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 335 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 336 337 atomic_dec(&obj->mm.pages_pin_count); 338 } 339 340 static inline void 341 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 342 { 343 __i915_gem_object_unpin_pages(obj); 344 } 345 346 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 347 void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 348 void i915_gem_object_writeback(struct drm_i915_gem_object *obj); 349 350 enum i915_map_type { 351 I915_MAP_WB = 0, 352 I915_MAP_WC, 353 #define I915_MAP_OVERRIDE BIT(31) 354 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE, 355 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, 356 }; 357 358 /** 359 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 360 * @obj: the object to map into kernel address space 361 * @type: the type of mapping, used to select pgprot_t 362 * 363 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 364 * pages and then returns a contiguous mapping of the backing storage into 365 * the kernel address space. Based on the @type of mapping, the PTE will be 366 * set to either WriteBack or WriteCombine (via pgprot_t). 367 * 368 * The caller is responsible for calling i915_gem_object_unpin_map() when the 369 * mapping is no longer required. 370 * 371 * Returns the pointer through which to access the mapped object, or an 372 * ERR_PTR() on error. 373 */ 374 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 375 enum i915_map_type type); 376 377 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 378 unsigned long offset, 379 unsigned long size); 380 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) 381 { 382 __i915_gem_object_flush_map(obj, 0, obj->base.size); 383 } 384 385 /** 386 * i915_gem_object_unpin_map - releases an earlier mapping 387 * @obj: the object to unmap 388 * 389 * After pinning the object and mapping its pages, once you are finished 390 * with your access, call i915_gem_object_unpin_map() to release the pin 391 * upon the mapping. Once the pin count reaches zero, that mapping may be 392 * removed. 393 */ 394 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 395 { 396 i915_gem_object_unpin_pages(obj); 397 } 398 399 void 400 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, 401 unsigned int flush_domains); 402 403 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, 404 unsigned int *needs_clflush); 405 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, 406 unsigned int *needs_clflush); 407 #define CLFLUSH_BEFORE BIT(0) 408 #define CLFLUSH_AFTER BIT(1) 409 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) 410 411 static inline void 412 i915_gem_object_finish_access(struct drm_i915_gem_object *obj) 413 { 414 i915_gem_object_unpin_pages(obj); 415 i915_gem_object_unlock(obj); 416 } 417 418 static inline struct intel_engine_cs * 419 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) 420 { 421 struct intel_engine_cs *engine = NULL; 422 struct dma_fence *fence; 423 424 rcu_read_lock(); 425 fence = dma_resv_get_excl_rcu(obj->base.resv); 426 rcu_read_unlock(); 427 428 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) 429 engine = to_request(fence)->engine; 430 dma_fence_put(fence); 431 432 return engine; 433 } 434 435 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, 436 unsigned int cache_level); 437 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); 438 439 int __must_check 440 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); 441 int __must_check 442 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); 443 int __must_check 444 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 445 struct i915_vma * __must_check 446 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 447 u32 alignment, 448 const struct i915_ggtt_view *view, 449 unsigned int flags); 450 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); 451 452 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); 453 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); 454 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); 455 456 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 457 { 458 if (obj->cache_dirty) 459 return false; 460 461 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) 462 return true; 463 464 /* Currently in use by HW (display engine)? Keep flushed. */ 465 return i915_gem_object_is_framebuffer(obj); 466 } 467 468 static inline void __start_cpu_write(struct drm_i915_gem_object *obj) 469 { 470 obj->read_domains = I915_GEM_DOMAIN_CPU; 471 obj->write_domain = I915_GEM_DOMAIN_CPU; 472 if (cpu_write_needs_clflush(obj)) 473 obj->cache_dirty = true; 474 } 475 476 int i915_gem_object_wait(struct drm_i915_gem_object *obj, 477 unsigned int flags, 478 long timeout); 479 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 480 unsigned int flags, 481 const struct i915_sched_attr *attr); 482 483 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, 484 enum fb_op_origin origin); 485 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, 486 enum fb_op_origin origin); 487 488 static inline void 489 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, 490 enum fb_op_origin origin) 491 { 492 if (unlikely(rcu_access_pointer(obj->frontbuffer))) 493 __i915_gem_object_flush_frontbuffer(obj, origin); 494 } 495 496 static inline void 497 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, 498 enum fb_op_origin origin) 499 { 500 if (unlikely(rcu_access_pointer(obj->frontbuffer))) 501 __i915_gem_object_invalidate_frontbuffer(obj, origin); 502 } 503 504 #endif 505