1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #ifndef __I915_GEM_OBJECT_H__ 8 #define __I915_GEM_OBJECT_H__ 9 10 #include <drm/drm_gem.h> 11 #include <drm/drm_file.h> 12 #include <drm/drm_device.h> 13 14 #include "intel_memory_region.h" 15 #include "i915_gem_object_types.h" 16 #include "i915_gem_gtt.h" 17 #include "i915_gem_ww.h" 18 #include "i915_vma_types.h" 19 20 enum intel_region_id; 21 22 #define obj_to_i915(obj__) to_i915((obj__)->base.dev) 23 24 static inline bool i915_gem_object_size_2big(u64 size) 25 { 26 struct drm_i915_gem_object *obj; 27 28 if (overflows_type(size, obj->base.size)) 29 return true; 30 31 return false; 32 } 33 34 unsigned int i915_gem_get_pat_index(struct drm_i915_private *i915, 35 enum i915_cache_level level); 36 bool i915_gem_object_has_cache_level(const struct drm_i915_gem_object *obj, 37 enum i915_cache_level lvl); 38 void i915_gem_init__objects(struct drm_i915_private *i915); 39 40 void i915_objects_module_exit(void); 41 int i915_objects_module_init(void); 42 43 struct drm_i915_gem_object *i915_gem_object_alloc(void); 44 void i915_gem_object_free(struct drm_i915_gem_object *obj); 45 46 void i915_gem_object_init(struct drm_i915_gem_object *obj, 47 const struct drm_i915_gem_object_ops *ops, 48 struct lock_class_key *key, 49 unsigned alloc_flags); 50 51 void __i915_gem_object_fini(struct drm_i915_gem_object *obj); 52 53 struct drm_i915_gem_object * 54 i915_gem_object_create_shmem(struct drm_i915_private *i915, 55 resource_size_t size); 56 struct drm_i915_gem_object * 57 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, 58 const void *data, resource_size_t size); 59 struct drm_i915_gem_object * 60 __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size, 61 struct intel_memory_region **placements, 62 unsigned int n_placements); 63 64 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops; 65 66 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 67 struct sg_table *pages, 68 bool needs_clflush); 69 70 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj, 71 const struct drm_i915_gem_pwrite *args); 72 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj, 73 const struct drm_i915_gem_pread *args); 74 75 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); 76 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, 77 struct sg_table *pages); 78 void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, 79 struct sg_table *pages); 80 81 void i915_gem_flush_free_objects(struct drm_i915_private *i915); 82 83 struct sg_table * 84 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); 85 86 /** 87 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle 88 * @file: DRM file private date 89 * @handle: userspace handle 90 * 91 * Returns: 92 * 93 * A pointer to the object named by the handle if such exists on @filp, NULL 94 * otherwise. This object is only valid whilst under the RCU read lock, and 95 * note carefully the object may be in the process of being destroyed. 96 */ 97 static inline struct drm_i915_gem_object * 98 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) 99 { 100 #ifdef CONFIG_LOCKDEP 101 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); 102 #endif 103 return idr_find(&file->object_idr, handle); 104 } 105 106 static inline struct drm_i915_gem_object * 107 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj) 108 { 109 if (obj && !kref_get_unless_zero(&obj->base.refcount)) 110 obj = NULL; 111 112 return obj; 113 } 114 115 static inline struct drm_i915_gem_object * 116 i915_gem_object_lookup(struct drm_file *file, u32 handle) 117 { 118 struct drm_i915_gem_object *obj; 119 120 rcu_read_lock(); 121 obj = i915_gem_object_lookup_rcu(file, handle); 122 obj = i915_gem_object_get_rcu(obj); 123 rcu_read_unlock(); 124 125 return obj; 126 } 127 128 __deprecated 129 struct drm_gem_object * 130 drm_gem_object_lookup(struct drm_file *file, u32 handle); 131 132 __attribute__((nonnull)) 133 static inline struct drm_i915_gem_object * 134 i915_gem_object_get(struct drm_i915_gem_object *obj) 135 { 136 drm_gem_object_get(&obj->base); 137 return obj; 138 } 139 140 __attribute__((nonnull)) 141 static inline void 142 i915_gem_object_put(struct drm_i915_gem_object *obj) 143 { 144 __drm_gem_object_put(&obj->base); 145 } 146 147 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv) 148 149 /* 150 * If more than one potential simultaneous locker, assert held. 151 */ 152 static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj) 153 { 154 /* 155 * Note mm list lookup is protected by 156 * kref_get_unless_zero(). 157 */ 158 if (IS_ENABLED(CONFIG_LOCKDEP) && 159 kref_read(&obj->base.refcount) > 0) 160 assert_object_held(obj); 161 } 162 163 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj, 164 struct i915_gem_ww_ctx *ww, 165 bool intr) 166 { 167 int ret; 168 169 if (intr) 170 ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL); 171 else 172 ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL); 173 174 if (!ret && ww) { 175 i915_gem_object_get(obj); 176 list_add_tail(&obj->obj_link, &ww->obj_list); 177 } 178 if (ret == -EALREADY) 179 ret = 0; 180 181 if (ret == -EDEADLK) { 182 i915_gem_object_get(obj); 183 ww->contended = obj; 184 } 185 186 return ret; 187 } 188 189 static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj, 190 struct i915_gem_ww_ctx *ww) 191 { 192 return __i915_gem_object_lock(obj, ww, ww && ww->intr); 193 } 194 195 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj, 196 struct i915_gem_ww_ctx *ww) 197 { 198 WARN_ON(ww && !ww->intr); 199 return __i915_gem_object_lock(obj, ww, true); 200 } 201 202 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj, 203 struct i915_gem_ww_ctx *ww) 204 { 205 if (!ww) 206 return dma_resv_trylock(obj->base.resv); 207 else 208 return ww_mutex_trylock(&obj->base.resv->lock, &ww->ctx); 209 } 210 211 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) 212 { 213 if (obj->ops->adjust_lru) 214 obj->ops->adjust_lru(obj); 215 216 dma_resv_unlock(obj->base.resv); 217 } 218 219 static inline void 220 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) 221 { 222 obj->flags |= I915_BO_READONLY; 223 } 224 225 static inline bool 226 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) 227 { 228 return obj->flags & I915_BO_READONLY; 229 } 230 231 static inline bool 232 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj) 233 { 234 return obj->flags & I915_BO_ALLOC_CONTIGUOUS; 235 } 236 237 static inline bool 238 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj) 239 { 240 return obj->flags & I915_BO_ALLOC_VOLATILE; 241 } 242 243 static inline void 244 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj) 245 { 246 obj->flags |= I915_BO_ALLOC_VOLATILE; 247 } 248 249 static inline bool 250 i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj) 251 { 252 return test_bit(I915_TILING_QUIRK_BIT, &obj->flags); 253 } 254 255 static inline void 256 i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj) 257 { 258 set_bit(I915_TILING_QUIRK_BIT, &obj->flags); 259 } 260 261 static inline void 262 i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj) 263 { 264 clear_bit(I915_TILING_QUIRK_BIT, &obj->flags); 265 } 266 267 static inline bool 268 i915_gem_object_is_protected(const struct drm_i915_gem_object *obj) 269 { 270 return obj->flags & I915_BO_PROTECTED; 271 } 272 273 static inline bool 274 i915_gem_object_type_has(const struct drm_i915_gem_object *obj, 275 unsigned long flags) 276 { 277 return obj->ops->flags & flags; 278 } 279 280 bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj); 281 282 bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj); 283 284 static inline bool 285 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) 286 { 287 /* TODO: make DPT shrinkable when it has no bound vmas */ 288 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) && 289 !obj->is_dpt; 290 } 291 292 static inline bool 293 i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj) 294 { 295 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST); 296 } 297 298 static inline bool 299 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) 300 { 301 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY); 302 } 303 304 static inline bool 305 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj) 306 { 307 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP); 308 } 309 310 static inline bool 311 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) 312 { 313 return READ_ONCE(obj->frontbuffer) || obj->is_dpt; 314 } 315 316 static inline unsigned int 317 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj) 318 { 319 return obj->tiling_and_stride & TILING_MASK; 320 } 321 322 static inline bool 323 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj) 324 { 325 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; 326 } 327 328 static inline unsigned int 329 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj) 330 { 331 return obj->tiling_and_stride & STRIDE_MASK; 332 } 333 334 static inline unsigned int 335 i915_gem_tile_height(unsigned int tiling) 336 { 337 GEM_BUG_ON(!tiling); 338 return tiling == I915_TILING_Y ? 32 : 8; 339 } 340 341 static inline unsigned int 342 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj) 343 { 344 return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); 345 } 346 347 static inline unsigned int 348 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) 349 { 350 return (i915_gem_object_get_stride(obj) * 351 i915_gem_object_get_tile_height(obj)); 352 } 353 354 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, 355 unsigned int tiling, unsigned int stride); 356 357 /** 358 * __i915_gem_object_page_iter_get_sg - helper to find the target scatterlist 359 * pointer and the target page position using pgoff_t n input argument and 360 * i915_gem_object_page_iter 361 * @obj: i915 GEM buffer object 362 * @iter: i915 GEM buffer object page iterator 363 * @n: page offset 364 * @offset: searched physical offset, 365 * it will be used for returning physical page offset value 366 * 367 * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter. 368 * Takes and releases the RCU lock to search the radix_tree of 369 * i915_gem_object_page_iter. 370 * 371 * Returns: 372 * The target scatterlist pointer and the target page position. 373 * 374 * Recommended to use wrapper macro: i915_gem_object_page_iter_get_sg() 375 */ 376 struct scatterlist * 377 __i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj, 378 struct i915_gem_object_page_iter *iter, 379 pgoff_t n, 380 unsigned int *offset); 381 382 /** 383 * i915_gem_object_page_iter_get_sg - wrapper macro for 384 * __i915_gem_object_page_iter_get_sg() 385 * @obj: i915 GEM buffer object 386 * @it: i915 GEM buffer object page iterator 387 * @n: page offset 388 * @offset: searched physical offset, 389 * it will be used for returning physical page offset value 390 * 391 * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter. 392 * Takes and releases the RCU lock to search the radix_tree of 393 * i915_gem_object_page_iter. 394 * 395 * Returns: 396 * The target scatterlist pointer and the target page position. 397 * 398 * In order to avoid the truncation of the input parameter, it checks the page 399 * offset n's type from the input parameter before calling 400 * __i915_gem_object_page_iter_get_sg(). 401 */ 402 #define i915_gem_object_page_iter_get_sg(obj, it, n, offset) ({ \ 403 static_assert(castable_to_type(n, pgoff_t)); \ 404 __i915_gem_object_page_iter_get_sg(obj, it, n, offset); \ 405 }) 406 407 /** 408 * __i915_gem_object_get_sg - helper to find the target scatterlist 409 * pointer and the target page position using pgoff_t n input argument and 410 * drm_i915_gem_object. It uses an internal shmem scatterlist lookup function. 411 * @obj: i915 GEM buffer object 412 * @n: page offset 413 * @offset: searched physical offset, 414 * it will be used for returning physical page offset value 415 * 416 * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as 417 * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg(). 418 * 419 * Returns: 420 * The target scatterlist pointer and the target page position. 421 * 422 * Recommended to use wrapper macro: i915_gem_object_get_sg() 423 * See also __i915_gem_object_page_iter_get_sg() 424 */ 425 static inline struct scatterlist * 426 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, pgoff_t n, 427 unsigned int *offset) 428 { 429 return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_page, n, offset); 430 } 431 432 /** 433 * i915_gem_object_get_sg - wrapper macro for __i915_gem_object_get_sg() 434 * @obj: i915 GEM buffer object 435 * @n: page offset 436 * @offset: searched physical offset, 437 * it will be used for returning physical page offset value 438 * 439 * Returns: 440 * The target scatterlist pointer and the target page position. 441 * 442 * In order to avoid the truncation of the input parameter, it checks the page 443 * offset n's type from the input parameter before calling 444 * __i915_gem_object_get_sg(). 445 * See also __i915_gem_object_page_iter_get_sg() 446 */ 447 #define i915_gem_object_get_sg(obj, n, offset) ({ \ 448 static_assert(castable_to_type(n, pgoff_t)); \ 449 __i915_gem_object_get_sg(obj, n, offset); \ 450 }) 451 452 /** 453 * __i915_gem_object_get_sg_dma - helper to find the target scatterlist 454 * pointer and the target page position using pgoff_t n input argument and 455 * drm_i915_gem_object. It uses an internal DMA mapped scatterlist lookup function 456 * @obj: i915 GEM buffer object 457 * @n: page offset 458 * @offset: searched physical offset, 459 * it will be used for returning physical page offset value 460 * 461 * It uses drm_i915_gem_object's internal DMA mapped scatterlist lookup function 462 * as i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg(). 463 * 464 * Returns: 465 * The target scatterlist pointer and the target page position. 466 * 467 * Recommended to use wrapper macro: i915_gem_object_get_sg_dma() 468 * See also __i915_gem_object_page_iter_get_sg() 469 */ 470 static inline struct scatterlist * 471 __i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, pgoff_t n, 472 unsigned int *offset) 473 { 474 return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_dma_page, n, offset); 475 } 476 477 /** 478 * i915_gem_object_get_sg_dma - wrapper macro for __i915_gem_object_get_sg_dma() 479 * @obj: i915 GEM buffer object 480 * @n: page offset 481 * @offset: searched physical offset, 482 * it will be used for returning physical page offset value 483 * 484 * Returns: 485 * The target scatterlist pointer and the target page position. 486 * 487 * In order to avoid the truncation of the input parameter, it checks the page 488 * offset n's type from the input parameter before calling 489 * __i915_gem_object_get_sg_dma(). 490 * See also __i915_gem_object_page_iter_get_sg() 491 */ 492 #define i915_gem_object_get_sg_dma(obj, n, offset) ({ \ 493 static_assert(castable_to_type(n, pgoff_t)); \ 494 __i915_gem_object_get_sg_dma(obj, n, offset); \ 495 }) 496 497 /** 498 * __i915_gem_object_get_page - helper to find the target page with a page offset 499 * @obj: i915 GEM buffer object 500 * @n: page offset 501 * 502 * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as 503 * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg() 504 * internally. 505 * 506 * Returns: 507 * The target page pointer. 508 * 509 * Recommended to use wrapper macro: i915_gem_object_get_page() 510 * See also __i915_gem_object_page_iter_get_sg() 511 */ 512 struct page * 513 __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n); 514 515 /** 516 * i915_gem_object_get_page - wrapper macro for __i915_gem_object_get_page 517 * @obj: i915 GEM buffer object 518 * @n: page offset 519 * 520 * Returns: 521 * The target page pointer. 522 * 523 * In order to avoid the truncation of the input parameter, it checks the page 524 * offset n's type from the input parameter before calling 525 * __i915_gem_object_get_page(). 526 * See also __i915_gem_object_page_iter_get_sg() 527 */ 528 #define i915_gem_object_get_page(obj, n) ({ \ 529 static_assert(castable_to_type(n, pgoff_t)); \ 530 __i915_gem_object_get_page(obj, n); \ 531 }) 532 533 /** 534 * __i915_gem_object_get_dirty_page - helper to find the target page with a page 535 * offset 536 * @obj: i915 GEM buffer object 537 * @n: page offset 538 * 539 * It works like i915_gem_object_get_page(), but it marks the returned page dirty. 540 * 541 * Returns: 542 * The target page pointer. 543 * 544 * Recommended to use wrapper macro: i915_gem_object_get_dirty_page() 545 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page() 546 */ 547 struct page * 548 __i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n); 549 550 /** 551 * i915_gem_object_get_dirty_page - wrapper macro for __i915_gem_object_get_dirty_page 552 * @obj: i915 GEM buffer object 553 * @n: page offset 554 * 555 * Returns: 556 * The target page pointer. 557 * 558 * In order to avoid the truncation of the input parameter, it checks the page 559 * offset n's type from the input parameter before calling 560 * __i915_gem_object_get_dirty_page(). 561 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page() 562 */ 563 #define i915_gem_object_get_dirty_page(obj, n) ({ \ 564 static_assert(castable_to_type(n, pgoff_t)); \ 565 __i915_gem_object_get_dirty_page(obj, n); \ 566 }) 567 568 /** 569 * __i915_gem_object_get_dma_address_len - helper to get bus addresses of 570 * targeted DMA mapped scatterlist from i915 GEM buffer object and it's length 571 * @obj: i915 GEM buffer object 572 * @n: page offset 573 * @len: DMA mapped scatterlist's DMA bus addresses length to return 574 * 575 * Returns: 576 * Bus addresses of targeted DMA mapped scatterlist 577 * 578 * Recommended to use wrapper macro: i915_gem_object_get_dma_address_len() 579 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma() 580 */ 581 dma_addr_t 582 __i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, pgoff_t n, 583 unsigned int *len); 584 585 /** 586 * i915_gem_object_get_dma_address_len - wrapper macro for 587 * __i915_gem_object_get_dma_address_len 588 * @obj: i915 GEM buffer object 589 * @n: page offset 590 * @len: DMA mapped scatterlist's DMA bus addresses length to return 591 * 592 * Returns: 593 * Bus addresses of targeted DMA mapped scatterlist 594 * 595 * In order to avoid the truncation of the input parameter, it checks the page 596 * offset n's type from the input parameter before calling 597 * __i915_gem_object_get_dma_address_len(). 598 * See also __i915_gem_object_page_iter_get_sg() and 599 * __i915_gem_object_get_dma_address_len() 600 */ 601 #define i915_gem_object_get_dma_address_len(obj, n, len) ({ \ 602 static_assert(castable_to_type(n, pgoff_t)); \ 603 __i915_gem_object_get_dma_address_len(obj, n, len); \ 604 }) 605 606 /** 607 * __i915_gem_object_get_dma_address - helper to get bus addresses of 608 * targeted DMA mapped scatterlist from i915 GEM buffer object 609 * @obj: i915 GEM buffer object 610 * @n: page offset 611 * 612 * Returns: 613 * Bus addresses of targeted DMA mapped scatterlis 614 * 615 * Recommended to use wrapper macro: i915_gem_object_get_dma_address() 616 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma() 617 */ 618 dma_addr_t 619 __i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n); 620 621 /** 622 * i915_gem_object_get_dma_address - wrapper macro for 623 * __i915_gem_object_get_dma_address 624 * @obj: i915 GEM buffer object 625 * @n: page offset 626 * 627 * Returns: 628 * Bus addresses of targeted DMA mapped scatterlist 629 * 630 * In order to avoid the truncation of the input parameter, it checks the page 631 * offset n's type from the input parameter before calling 632 * __i915_gem_object_get_dma_address(). 633 * See also __i915_gem_object_page_iter_get_sg() and 634 * __i915_gem_object_get_dma_address() 635 */ 636 #define i915_gem_object_get_dma_address(obj, n) ({ \ 637 static_assert(castable_to_type(n, pgoff_t)); \ 638 __i915_gem_object_get_dma_address(obj, n); \ 639 }) 640 641 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 642 struct sg_table *pages); 643 644 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 645 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 646 647 static inline int __must_check 648 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 649 { 650 assert_object_held(obj); 651 652 if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) 653 return 0; 654 655 return __i915_gem_object_get_pages(obj); 656 } 657 658 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj); 659 660 static inline bool 661 i915_gem_object_has_pages(struct drm_i915_gem_object *obj) 662 { 663 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)); 664 } 665 666 static inline void 667 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 668 { 669 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 670 671 atomic_inc(&obj->mm.pages_pin_count); 672 } 673 674 static inline bool 675 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) 676 { 677 return atomic_read(&obj->mm.pages_pin_count); 678 } 679 680 static inline void 681 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 682 { 683 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 684 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 685 686 atomic_dec(&obj->mm.pages_pin_count); 687 } 688 689 static inline void 690 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 691 { 692 __i915_gem_object_unpin_pages(obj); 693 } 694 695 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 696 int i915_gem_object_truncate(struct drm_i915_gem_object *obj); 697 698 /** 699 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 700 * @obj: the object to map into kernel address space 701 * @type: the type of mapping, used to select pgprot_t 702 * 703 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 704 * pages and then returns a contiguous mapping of the backing storage into 705 * the kernel address space. Based on the @type of mapping, the PTE will be 706 * set to either WriteBack or WriteCombine (via pgprot_t). 707 * 708 * The caller is responsible for calling i915_gem_object_unpin_map() when the 709 * mapping is no longer required. 710 * 711 * Returns the pointer through which to access the mapped object, or an 712 * ERR_PTR() on error. 713 */ 714 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 715 enum i915_map_type type); 716 717 void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj, 718 enum i915_map_type type); 719 720 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 721 unsigned long offset, 722 unsigned long size); 723 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) 724 { 725 __i915_gem_object_flush_map(obj, 0, obj->base.size); 726 } 727 728 /** 729 * i915_gem_object_unpin_map - releases an earlier mapping 730 * @obj: the object to unmap 731 * 732 * After pinning the object and mapping its pages, once you are finished 733 * with your access, call i915_gem_object_unpin_map() to release the pin 734 * upon the mapping. Once the pin count reaches zero, that mapping may be 735 * removed. 736 */ 737 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 738 { 739 i915_gem_object_unpin_pages(obj); 740 } 741 742 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj); 743 744 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, 745 unsigned int *needs_clflush); 746 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, 747 unsigned int *needs_clflush); 748 #define CLFLUSH_BEFORE BIT(0) 749 #define CLFLUSH_AFTER BIT(1) 750 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) 751 752 static inline void 753 i915_gem_object_finish_access(struct drm_i915_gem_object *obj) 754 { 755 i915_gem_object_unpin_pages(obj); 756 } 757 758 int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj, 759 struct dma_fence **fence); 760 int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, 761 bool intr); 762 bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj); 763 764 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, 765 unsigned int cache_level); 766 void i915_gem_object_set_pat_index(struct drm_i915_gem_object *obj, 767 unsigned int pat_index); 768 bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj); 769 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); 770 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj); 771 bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj); 772 773 int __must_check 774 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); 775 int __must_check 776 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); 777 int __must_check 778 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 779 struct i915_vma * __must_check 780 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 781 struct i915_gem_ww_ctx *ww, 782 u32 alignment, 783 const struct i915_gtt_view *view, 784 unsigned int flags); 785 786 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); 787 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); 788 void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); 789 void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); 790 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); 791 792 static inline void __start_cpu_write(struct drm_i915_gem_object *obj) 793 { 794 obj->read_domains = I915_GEM_DOMAIN_CPU; 795 obj->write_domain = I915_GEM_DOMAIN_CPU; 796 if (i915_gem_cpu_write_needs_clflush(obj)) 797 obj->cache_dirty = true; 798 } 799 800 void i915_gem_fence_wait_priority(struct dma_fence *fence, 801 const struct i915_sched_attr *attr); 802 803 int i915_gem_object_wait(struct drm_i915_gem_object *obj, 804 unsigned int flags, 805 long timeout); 806 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 807 unsigned int flags, 808 const struct i915_sched_attr *attr); 809 810 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size); 811 812 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj); 813 814 void __i915_gem_free_object_rcu(struct rcu_head *head); 815 816 void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj); 817 818 void __i915_gem_free_object(struct drm_i915_gem_object *obj); 819 820 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj); 821 822 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj); 823 824 int i915_gem_object_migrate(struct drm_i915_gem_object *obj, 825 struct i915_gem_ww_ctx *ww, 826 enum intel_region_id id); 827 int __i915_gem_object_migrate(struct drm_i915_gem_object *obj, 828 struct i915_gem_ww_ctx *ww, 829 enum intel_region_id id, 830 unsigned int flags); 831 832 bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj, 833 enum intel_region_id id); 834 835 int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj, 836 unsigned int flags); 837 838 bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj, 839 enum intel_memory_type type); 840 841 bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj); 842 843 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, 844 size_t size, struct intel_memory_region *mr, 845 struct address_space *mapping, 846 unsigned int max_segment); 847 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, 848 bool dirty, bool backup); 849 void __shmem_writeback(size_t size, struct address_space *mapping); 850 851 #ifdef CONFIG_MMU_NOTIFIER 852 static inline bool 853 i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) 854 { 855 return obj->userptr.notifier.mm; 856 } 857 858 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj); 859 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj); 860 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj); 861 #else 862 static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; } 863 864 static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; } 865 static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; } 866 static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; } 867 868 #endif 869 870 #endif 871