1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #ifndef __I915_GEM_OBJECT_H__ 8 #define __I915_GEM_OBJECT_H__ 9 10 #include <drm/drm_gem.h> 11 #include <drm/drm_file.h> 12 #include <drm/drm_device.h> 13 14 #include "display/intel_frontbuffer.h" 15 #include "i915_gem_object_types.h" 16 #include "i915_gem_gtt.h" 17 #include "i915_vma_types.h" 18 19 void i915_gem_init__objects(struct drm_i915_private *i915); 20 21 struct drm_i915_gem_object *i915_gem_object_alloc(void); 22 void i915_gem_object_free(struct drm_i915_gem_object *obj); 23 24 void i915_gem_object_init(struct drm_i915_gem_object *obj, 25 const struct drm_i915_gem_object_ops *ops, 26 struct lock_class_key *key); 27 struct drm_i915_gem_object * 28 i915_gem_object_create_shmem(struct drm_i915_private *i915, 29 resource_size_t size); 30 struct drm_i915_gem_object * 31 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, 32 const void *data, resource_size_t size); 33 34 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops; 35 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 36 struct sg_table *pages, 37 bool needs_clflush); 38 39 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); 40 41 void i915_gem_flush_free_objects(struct drm_i915_private *i915); 42 43 struct sg_table * 44 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); 45 void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 46 47 /** 48 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle 49 * @filp: DRM file private date 50 * @handle: userspace handle 51 * 52 * Returns: 53 * 54 * A pointer to the object named by the handle if such exists on @filp, NULL 55 * otherwise. This object is only valid whilst under the RCU read lock, and 56 * note carefully the object may be in the process of being destroyed. 57 */ 58 static inline struct drm_i915_gem_object * 59 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) 60 { 61 #ifdef CONFIG_LOCKDEP 62 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); 63 #endif 64 return idr_find(&file->object_idr, handle); 65 } 66 67 static inline struct drm_i915_gem_object * 68 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj) 69 { 70 if (obj && !kref_get_unless_zero(&obj->base.refcount)) 71 obj = NULL; 72 73 return obj; 74 } 75 76 static inline struct drm_i915_gem_object * 77 i915_gem_object_lookup(struct drm_file *file, u32 handle) 78 { 79 struct drm_i915_gem_object *obj; 80 81 rcu_read_lock(); 82 obj = i915_gem_object_lookup_rcu(file, handle); 83 obj = i915_gem_object_get_rcu(obj); 84 rcu_read_unlock(); 85 86 return obj; 87 } 88 89 __deprecated 90 struct drm_gem_object * 91 drm_gem_object_lookup(struct drm_file *file, u32 handle); 92 93 __attribute__((nonnull)) 94 static inline struct drm_i915_gem_object * 95 i915_gem_object_get(struct drm_i915_gem_object *obj) 96 { 97 drm_gem_object_get(&obj->base); 98 return obj; 99 } 100 101 __attribute__((nonnull)) 102 static inline void 103 i915_gem_object_put(struct drm_i915_gem_object *obj) 104 { 105 __drm_gem_object_put(&obj->base); 106 } 107 108 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv) 109 110 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj, 111 struct i915_gem_ww_ctx *ww, 112 bool intr) 113 { 114 int ret; 115 116 if (intr) 117 ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL); 118 else 119 ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL); 120 121 if (!ret && ww) 122 list_add_tail(&obj->obj_link, &ww->obj_list); 123 if (ret == -EALREADY) 124 ret = 0; 125 126 if (ret == -EDEADLK) 127 ww->contended = obj; 128 129 return ret; 130 } 131 132 static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj, 133 struct i915_gem_ww_ctx *ww) 134 { 135 return __i915_gem_object_lock(obj, ww, ww && ww->intr); 136 } 137 138 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj, 139 struct i915_gem_ww_ctx *ww) 140 { 141 WARN_ON(ww && !ww->intr); 142 return __i915_gem_object_lock(obj, ww, true); 143 } 144 145 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj) 146 { 147 return dma_resv_trylock(obj->base.resv); 148 } 149 150 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) 151 { 152 dma_resv_unlock(obj->base.resv); 153 } 154 155 struct dma_fence * 156 i915_gem_object_lock_fence(struct drm_i915_gem_object *obj); 157 void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj, 158 struct dma_fence *fence); 159 160 static inline void 161 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) 162 { 163 obj->flags |= I915_BO_READONLY; 164 } 165 166 static inline bool 167 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) 168 { 169 return obj->flags & I915_BO_READONLY; 170 } 171 172 static inline bool 173 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj) 174 { 175 return obj->flags & I915_BO_ALLOC_CONTIGUOUS; 176 } 177 178 static inline bool 179 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj) 180 { 181 return obj->flags & I915_BO_ALLOC_VOLATILE; 182 } 183 184 static inline void 185 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj) 186 { 187 obj->flags |= I915_BO_ALLOC_VOLATILE; 188 } 189 190 static inline bool 191 i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj) 192 { 193 return test_bit(I915_TILING_QUIRK_BIT, &obj->flags); 194 } 195 196 static inline void 197 i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj) 198 { 199 set_bit(I915_TILING_QUIRK_BIT, &obj->flags); 200 } 201 202 static inline void 203 i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj) 204 { 205 clear_bit(I915_TILING_QUIRK_BIT, &obj->flags); 206 } 207 208 static inline bool 209 i915_gem_object_type_has(const struct drm_i915_gem_object *obj, 210 unsigned long flags) 211 { 212 return obj->ops->flags & flags; 213 } 214 215 static inline bool 216 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) 217 { 218 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE); 219 } 220 221 static inline bool 222 i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj) 223 { 224 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM); 225 } 226 227 static inline bool 228 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) 229 { 230 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE); 231 } 232 233 static inline bool 234 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) 235 { 236 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY); 237 } 238 239 static inline bool 240 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj) 241 { 242 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP); 243 } 244 245 static inline bool 246 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) 247 { 248 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL); 249 } 250 251 static inline bool 252 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) 253 { 254 return READ_ONCE(obj->frontbuffer); 255 } 256 257 static inline unsigned int 258 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj) 259 { 260 return obj->tiling_and_stride & TILING_MASK; 261 } 262 263 static inline bool 264 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj) 265 { 266 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; 267 } 268 269 static inline unsigned int 270 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj) 271 { 272 return obj->tiling_and_stride & STRIDE_MASK; 273 } 274 275 static inline unsigned int 276 i915_gem_tile_height(unsigned int tiling) 277 { 278 GEM_BUG_ON(!tiling); 279 return tiling == I915_TILING_Y ? 32 : 8; 280 } 281 282 static inline unsigned int 283 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj) 284 { 285 return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); 286 } 287 288 static inline unsigned int 289 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) 290 { 291 return (i915_gem_object_get_stride(obj) * 292 i915_gem_object_get_tile_height(obj)); 293 } 294 295 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, 296 unsigned int tiling, unsigned int stride); 297 298 struct scatterlist * 299 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 300 struct i915_gem_object_page_iter *iter, 301 unsigned int n, 302 unsigned int *offset); 303 304 static inline struct scatterlist * 305 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 306 unsigned int n, 307 unsigned int *offset) 308 { 309 return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset); 310 } 311 312 static inline struct scatterlist * 313 i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, 314 unsigned int n, 315 unsigned int *offset) 316 { 317 return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset); 318 } 319 320 struct page * 321 i915_gem_object_get_page(struct drm_i915_gem_object *obj, 322 unsigned int n); 323 324 struct page * 325 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 326 unsigned int n); 327 328 dma_addr_t 329 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, 330 unsigned long n, 331 unsigned int *len); 332 333 dma_addr_t 334 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 335 unsigned long n); 336 337 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 338 struct sg_table *pages, 339 unsigned int sg_page_sizes); 340 341 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 342 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 343 344 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */ 345 I915_MM_NORMAL = 0, 346 /* 347 * Only used by struct_mutex, when called "recursively" from 348 * direct-reclaim-esque. Safe because there is only every one 349 * struct_mutex in the entire system. 350 */ 351 I915_MM_SHRINKER = 1, 352 /* 353 * Used for obj->mm.lock when allocating pages. Safe because the object 354 * isn't yet on any LRU, and therefore the shrinker can't deadlock on 355 * it. As soon as the object has pages, obj->mm.lock nests within 356 * fs_reclaim. 357 */ 358 I915_MM_GET_PAGES = 1, 359 }; 360 361 static inline int __must_check 362 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 363 { 364 might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES); 365 366 if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) 367 return 0; 368 369 return __i915_gem_object_get_pages(obj); 370 } 371 372 static inline bool 373 i915_gem_object_has_pages(struct drm_i915_gem_object *obj) 374 { 375 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)); 376 } 377 378 static inline void 379 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 380 { 381 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 382 383 atomic_inc(&obj->mm.pages_pin_count); 384 } 385 386 static inline bool 387 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) 388 { 389 return atomic_read(&obj->mm.pages_pin_count); 390 } 391 392 static inline void 393 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 394 { 395 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 396 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 397 398 atomic_dec(&obj->mm.pages_pin_count); 399 } 400 401 static inline void 402 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 403 { 404 __i915_gem_object_unpin_pages(obj); 405 } 406 407 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 408 void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 409 void i915_gem_object_writeback(struct drm_i915_gem_object *obj); 410 411 /** 412 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 413 * @obj: the object to map into kernel address space 414 * @type: the type of mapping, used to select pgprot_t 415 * 416 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 417 * pages and then returns a contiguous mapping of the backing storage into 418 * the kernel address space. Based on the @type of mapping, the PTE will be 419 * set to either WriteBack or WriteCombine (via pgprot_t). 420 * 421 * The caller is responsible for calling i915_gem_object_unpin_map() when the 422 * mapping is no longer required. 423 * 424 * Returns the pointer through which to access the mapped object, or an 425 * ERR_PTR() on error. 426 */ 427 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 428 enum i915_map_type type); 429 430 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 431 unsigned long offset, 432 unsigned long size); 433 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) 434 { 435 __i915_gem_object_flush_map(obj, 0, obj->base.size); 436 } 437 438 /** 439 * i915_gem_object_unpin_map - releases an earlier mapping 440 * @obj: the object to unmap 441 * 442 * After pinning the object and mapping its pages, once you are finished 443 * with your access, call i915_gem_object_unpin_map() to release the pin 444 * upon the mapping. Once the pin count reaches zero, that mapping may be 445 * removed. 446 */ 447 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 448 { 449 i915_gem_object_unpin_pages(obj); 450 } 451 452 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj); 453 454 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, 455 unsigned int *needs_clflush); 456 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, 457 unsigned int *needs_clflush); 458 #define CLFLUSH_BEFORE BIT(0) 459 #define CLFLUSH_AFTER BIT(1) 460 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) 461 462 static inline void 463 i915_gem_object_finish_access(struct drm_i915_gem_object *obj) 464 { 465 i915_gem_object_unpin_pages(obj); 466 } 467 468 static inline struct intel_engine_cs * 469 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) 470 { 471 struct intel_engine_cs *engine = NULL; 472 struct dma_fence *fence; 473 474 rcu_read_lock(); 475 fence = dma_resv_get_excl_rcu(obj->base.resv); 476 rcu_read_unlock(); 477 478 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) 479 engine = to_request(fence)->engine; 480 dma_fence_put(fence); 481 482 return engine; 483 } 484 485 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, 486 unsigned int cache_level); 487 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); 488 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj); 489 490 int __must_check 491 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); 492 int __must_check 493 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); 494 int __must_check 495 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 496 struct i915_vma * __must_check 497 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 498 u32 alignment, 499 const struct i915_ggtt_view *view, 500 unsigned int flags); 501 502 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); 503 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); 504 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); 505 506 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 507 { 508 if (obj->cache_dirty) 509 return false; 510 511 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) 512 return true; 513 514 /* Currently in use by HW (display engine)? Keep flushed. */ 515 return i915_gem_object_is_framebuffer(obj); 516 } 517 518 static inline void __start_cpu_write(struct drm_i915_gem_object *obj) 519 { 520 obj->read_domains = I915_GEM_DOMAIN_CPU; 521 obj->write_domain = I915_GEM_DOMAIN_CPU; 522 if (cpu_write_needs_clflush(obj)) 523 obj->cache_dirty = true; 524 } 525 526 void i915_gem_fence_wait_priority(struct dma_fence *fence, 527 const struct i915_sched_attr *attr); 528 529 int i915_gem_object_wait(struct drm_i915_gem_object *obj, 530 unsigned int flags, 531 long timeout); 532 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 533 unsigned int flags, 534 const struct i915_sched_attr *attr); 535 536 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, 537 enum fb_op_origin origin); 538 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, 539 enum fb_op_origin origin); 540 541 static inline void 542 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, 543 enum fb_op_origin origin) 544 { 545 if (unlikely(rcu_access_pointer(obj->frontbuffer))) 546 __i915_gem_object_flush_frontbuffer(obj, origin); 547 } 548 549 static inline void 550 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, 551 enum fb_op_origin origin) 552 { 553 if (unlikely(rcu_access_pointer(obj->frontbuffer))) 554 __i915_gem_object_invalidate_frontbuffer(obj, origin); 555 } 556 557 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size); 558 559 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj); 560 561 #endif 562