1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #ifndef __I915_GEM_OBJECT_H__ 8 #define __I915_GEM_OBJECT_H__ 9 10 #include <drm/drm_gem.h> 11 #include <drm/drm_file.h> 12 #include <drm/drm_device.h> 13 14 #include "intel_memory_region.h" 15 #include "i915_gem_object_types.h" 16 #include "i915_gem_gtt.h" 17 #include "i915_gem_ww.h" 18 #include "i915_vma_types.h" 19 20 enum intel_region_id; 21 22 #define obj_to_i915(obj__) to_i915((obj__)->base.dev) 23 24 static inline bool i915_gem_object_size_2big(u64 size) 25 { 26 struct drm_i915_gem_object *obj; 27 28 if (overflows_type(size, obj->base.size)) 29 return true; 30 31 return false; 32 } 33 34 unsigned int i915_gem_get_pat_index(struct drm_i915_private *i915, 35 enum i915_cache_level level); 36 bool i915_gem_object_has_cache_level(const struct drm_i915_gem_object *obj, 37 enum i915_cache_level lvl); 38 void i915_gem_init__objects(struct drm_i915_private *i915); 39 40 void i915_objects_module_exit(void); 41 int i915_objects_module_init(void); 42 43 struct drm_i915_gem_object *i915_gem_object_alloc(void); 44 void i915_gem_object_free(struct drm_i915_gem_object *obj); 45 46 void i915_gem_object_init(struct drm_i915_gem_object *obj, 47 const struct drm_i915_gem_object_ops *ops, 48 struct lock_class_key *key, 49 unsigned alloc_flags); 50 51 void __i915_gem_object_fini(struct drm_i915_gem_object *obj); 52 53 struct drm_i915_gem_object * 54 i915_gem_object_create_shmem(struct drm_i915_private *i915, 55 resource_size_t size); 56 struct drm_i915_gem_object * 57 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, 58 const void *data, resource_size_t size); 59 struct drm_i915_gem_object * 60 __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size, 61 struct intel_memory_region **placements, 62 unsigned int n_placements); 63 64 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops; 65 66 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 67 struct sg_table *pages, 68 bool needs_clflush); 69 70 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj, 71 const struct drm_i915_gem_pwrite *args); 72 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj, 73 const struct drm_i915_gem_pread *args); 74 75 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); 76 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, 77 struct sg_table *pages); 78 void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, 79 struct sg_table *pages); 80 81 void i915_gem_flush_free_objects(struct drm_i915_private *i915); 82 83 struct sg_table * 84 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); 85 86 /** 87 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle 88 * @file: DRM file private date 89 * @handle: userspace handle 90 * 91 * Returns: 92 * 93 * A pointer to the object named by the handle if such exists on @filp, NULL 94 * otherwise. This object is only valid whilst under the RCU read lock, and 95 * note carefully the object may be in the process of being destroyed. 96 */ 97 static inline struct drm_i915_gem_object * 98 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) 99 { 100 #ifdef CONFIG_LOCKDEP 101 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); 102 #endif 103 return idr_find(&file->object_idr, handle); 104 } 105 106 static inline struct drm_i915_gem_object * 107 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj) 108 { 109 if (obj && !kref_get_unless_zero(&obj->base.refcount)) 110 obj = NULL; 111 112 return obj; 113 } 114 115 static inline struct drm_i915_gem_object * 116 i915_gem_object_lookup(struct drm_file *file, u32 handle) 117 { 118 struct drm_i915_gem_object *obj; 119 120 rcu_read_lock(); 121 obj = i915_gem_object_lookup_rcu(file, handle); 122 obj = i915_gem_object_get_rcu(obj); 123 rcu_read_unlock(); 124 125 return obj; 126 } 127 128 __deprecated 129 struct drm_gem_object * 130 drm_gem_object_lookup(struct drm_file *file, u32 handle); 131 132 __attribute__((nonnull)) 133 static inline struct drm_i915_gem_object * 134 i915_gem_object_get(struct drm_i915_gem_object *obj) 135 { 136 drm_gem_object_get(&obj->base); 137 return obj; 138 } 139 140 __attribute__((nonnull)) 141 static inline void 142 i915_gem_object_put(struct drm_i915_gem_object *obj) 143 { 144 __drm_gem_object_put(&obj->base); 145 } 146 147 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv) 148 149 /* 150 * If more than one potential simultaneous locker, assert held. 151 */ 152 static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj) 153 { 154 /* 155 * Note mm list lookup is protected by 156 * kref_get_unless_zero(). 157 */ 158 if (IS_ENABLED(CONFIG_LOCKDEP) && 159 kref_read(&obj->base.refcount) > 0) 160 assert_object_held(obj); 161 } 162 163 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj, 164 struct i915_gem_ww_ctx *ww, 165 bool intr) 166 { 167 int ret; 168 169 if (intr) 170 ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL); 171 else 172 ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL); 173 174 if (!ret && ww) { 175 i915_gem_object_get(obj); 176 list_add_tail(&obj->obj_link, &ww->obj_list); 177 } 178 if (ret == -EALREADY) 179 ret = 0; 180 181 if (ret == -EDEADLK) { 182 i915_gem_object_get(obj); 183 ww->contended = obj; 184 } 185 186 return ret; 187 } 188 189 static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj, 190 struct i915_gem_ww_ctx *ww) 191 { 192 return __i915_gem_object_lock(obj, ww, ww && ww->intr); 193 } 194 195 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj, 196 struct i915_gem_ww_ctx *ww) 197 { 198 WARN_ON(ww && !ww->intr); 199 return __i915_gem_object_lock(obj, ww, true); 200 } 201 202 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj, 203 struct i915_gem_ww_ctx *ww) 204 { 205 if (!ww) 206 return dma_resv_trylock(obj->base.resv); 207 else 208 return ww_mutex_trylock(&obj->base.resv->lock, &ww->ctx); 209 } 210 211 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) 212 { 213 if (obj->ops->adjust_lru) 214 obj->ops->adjust_lru(obj); 215 216 dma_resv_unlock(obj->base.resv); 217 } 218 219 static inline void 220 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) 221 { 222 obj->flags |= I915_BO_READONLY; 223 } 224 225 static inline bool 226 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) 227 { 228 return obj->flags & I915_BO_READONLY; 229 } 230 231 static inline bool 232 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj) 233 { 234 return obj->flags & I915_BO_ALLOC_CONTIGUOUS; 235 } 236 237 static inline bool 238 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj) 239 { 240 return obj->flags & I915_BO_ALLOC_VOLATILE; 241 } 242 243 static inline void 244 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj) 245 { 246 obj->flags |= I915_BO_ALLOC_VOLATILE; 247 } 248 249 static inline bool 250 i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj) 251 { 252 return test_bit(I915_TILING_QUIRK_BIT, &obj->flags); 253 } 254 255 static inline void 256 i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj) 257 { 258 set_bit(I915_TILING_QUIRK_BIT, &obj->flags); 259 } 260 261 static inline void 262 i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj) 263 { 264 clear_bit(I915_TILING_QUIRK_BIT, &obj->flags); 265 } 266 267 static inline bool 268 i915_gem_object_is_protected(const struct drm_i915_gem_object *obj) 269 { 270 return obj->flags & I915_BO_PROTECTED; 271 } 272 273 static inline bool 274 i915_gem_object_type_has(const struct drm_i915_gem_object *obj, 275 unsigned long flags) 276 { 277 return obj->ops->flags & flags; 278 } 279 280 bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj); 281 282 bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj); 283 284 static inline bool 285 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) 286 { 287 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE); 288 } 289 290 static inline bool 291 i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj) 292 { 293 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST); 294 } 295 296 static inline bool 297 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) 298 { 299 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY); 300 } 301 302 static inline bool 303 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj) 304 { 305 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP); 306 } 307 308 static inline bool 309 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) 310 { 311 return READ_ONCE(obj->frontbuffer) || obj->is_dpt; 312 } 313 314 static inline unsigned int 315 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj) 316 { 317 return obj->tiling_and_stride & TILING_MASK; 318 } 319 320 static inline bool 321 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj) 322 { 323 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; 324 } 325 326 static inline unsigned int 327 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj) 328 { 329 return obj->tiling_and_stride & STRIDE_MASK; 330 } 331 332 static inline unsigned int 333 i915_gem_tile_height(unsigned int tiling) 334 { 335 GEM_BUG_ON(!tiling); 336 return tiling == I915_TILING_Y ? 32 : 8; 337 } 338 339 static inline unsigned int 340 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj) 341 { 342 return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); 343 } 344 345 static inline unsigned int 346 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) 347 { 348 return (i915_gem_object_get_stride(obj) * 349 i915_gem_object_get_tile_height(obj)); 350 } 351 352 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, 353 unsigned int tiling, unsigned int stride); 354 355 /** 356 * __i915_gem_object_page_iter_get_sg - helper to find the target scatterlist 357 * pointer and the target page position using pgoff_t n input argument and 358 * i915_gem_object_page_iter 359 * @obj: i915 GEM buffer object 360 * @iter: i915 GEM buffer object page iterator 361 * @n: page offset 362 * @offset: searched physical offset, 363 * it will be used for returning physical page offset value 364 * 365 * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter. 366 * Takes and releases the RCU lock to search the radix_tree of 367 * i915_gem_object_page_iter. 368 * 369 * Returns: 370 * The target scatterlist pointer and the target page position. 371 * 372 * Recommended to use wrapper macro: i915_gem_object_page_iter_get_sg() 373 */ 374 struct scatterlist * 375 __i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj, 376 struct i915_gem_object_page_iter *iter, 377 pgoff_t n, 378 unsigned int *offset); 379 380 /** 381 * i915_gem_object_page_iter_get_sg - wrapper macro for 382 * __i915_gem_object_page_iter_get_sg() 383 * @obj: i915 GEM buffer object 384 * @it: i915 GEM buffer object page iterator 385 * @n: page offset 386 * @offset: searched physical offset, 387 * it will be used for returning physical page offset value 388 * 389 * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter. 390 * Takes and releases the RCU lock to search the radix_tree of 391 * i915_gem_object_page_iter. 392 * 393 * Returns: 394 * The target scatterlist pointer and the target page position. 395 * 396 * In order to avoid the truncation of the input parameter, it checks the page 397 * offset n's type from the input parameter before calling 398 * __i915_gem_object_page_iter_get_sg(). 399 */ 400 #define i915_gem_object_page_iter_get_sg(obj, it, n, offset) ({ \ 401 static_assert(castable_to_type(n, pgoff_t)); \ 402 __i915_gem_object_page_iter_get_sg(obj, it, n, offset); \ 403 }) 404 405 /** 406 * __i915_gem_object_get_sg - helper to find the target scatterlist 407 * pointer and the target page position using pgoff_t n input argument and 408 * drm_i915_gem_object. It uses an internal shmem scatterlist lookup function. 409 * @obj: i915 GEM buffer object 410 * @n: page offset 411 * @offset: searched physical offset, 412 * it will be used for returning physical page offset value 413 * 414 * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as 415 * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg(). 416 * 417 * Returns: 418 * The target scatterlist pointer and the target page position. 419 * 420 * Recommended to use wrapper macro: i915_gem_object_get_sg() 421 * See also __i915_gem_object_page_iter_get_sg() 422 */ 423 static inline struct scatterlist * 424 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, pgoff_t n, 425 unsigned int *offset) 426 { 427 return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_page, n, offset); 428 } 429 430 /** 431 * i915_gem_object_get_sg - wrapper macro for __i915_gem_object_get_sg() 432 * @obj: i915 GEM buffer object 433 * @n: page offset 434 * @offset: searched physical offset, 435 * it will be used for returning physical page offset value 436 * 437 * Returns: 438 * The target scatterlist pointer and the target page position. 439 * 440 * In order to avoid the truncation of the input parameter, it checks the page 441 * offset n's type from the input parameter before calling 442 * __i915_gem_object_get_sg(). 443 * See also __i915_gem_object_page_iter_get_sg() 444 */ 445 #define i915_gem_object_get_sg(obj, n, offset) ({ \ 446 static_assert(castable_to_type(n, pgoff_t)); \ 447 __i915_gem_object_get_sg(obj, n, offset); \ 448 }) 449 450 /** 451 * __i915_gem_object_get_sg_dma - helper to find the target scatterlist 452 * pointer and the target page position using pgoff_t n input argument and 453 * drm_i915_gem_object. It uses an internal DMA mapped scatterlist lookup function 454 * @obj: i915 GEM buffer object 455 * @n: page offset 456 * @offset: searched physical offset, 457 * it will be used for returning physical page offset value 458 * 459 * It uses drm_i915_gem_object's internal DMA mapped scatterlist lookup function 460 * as i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg(). 461 * 462 * Returns: 463 * The target scatterlist pointer and the target page position. 464 * 465 * Recommended to use wrapper macro: i915_gem_object_get_sg_dma() 466 * See also __i915_gem_object_page_iter_get_sg() 467 */ 468 static inline struct scatterlist * 469 __i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, pgoff_t n, 470 unsigned int *offset) 471 { 472 return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_dma_page, n, offset); 473 } 474 475 /** 476 * i915_gem_object_get_sg_dma - wrapper macro for __i915_gem_object_get_sg_dma() 477 * @obj: i915 GEM buffer object 478 * @n: page offset 479 * @offset: searched physical offset, 480 * it will be used for returning physical page offset value 481 * 482 * Returns: 483 * The target scatterlist pointer and the target page position. 484 * 485 * In order to avoid the truncation of the input parameter, it checks the page 486 * offset n's type from the input parameter before calling 487 * __i915_gem_object_get_sg_dma(). 488 * See also __i915_gem_object_page_iter_get_sg() 489 */ 490 #define i915_gem_object_get_sg_dma(obj, n, offset) ({ \ 491 static_assert(castable_to_type(n, pgoff_t)); \ 492 __i915_gem_object_get_sg_dma(obj, n, offset); \ 493 }) 494 495 /** 496 * __i915_gem_object_get_page - helper to find the target page with a page offset 497 * @obj: i915 GEM buffer object 498 * @n: page offset 499 * 500 * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as 501 * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg() 502 * internally. 503 * 504 * Returns: 505 * The target page pointer. 506 * 507 * Recommended to use wrapper macro: i915_gem_object_get_page() 508 * See also __i915_gem_object_page_iter_get_sg() 509 */ 510 struct page * 511 __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n); 512 513 /** 514 * i915_gem_object_get_page - wrapper macro for __i915_gem_object_get_page 515 * @obj: i915 GEM buffer object 516 * @n: page offset 517 * 518 * Returns: 519 * The target page pointer. 520 * 521 * In order to avoid the truncation of the input parameter, it checks the page 522 * offset n's type from the input parameter before calling 523 * __i915_gem_object_get_page(). 524 * See also __i915_gem_object_page_iter_get_sg() 525 */ 526 #define i915_gem_object_get_page(obj, n) ({ \ 527 static_assert(castable_to_type(n, pgoff_t)); \ 528 __i915_gem_object_get_page(obj, n); \ 529 }) 530 531 /** 532 * __i915_gem_object_get_dirty_page - helper to find the target page with a page 533 * offset 534 * @obj: i915 GEM buffer object 535 * @n: page offset 536 * 537 * It works like i915_gem_object_get_page(), but it marks the returned page dirty. 538 * 539 * Returns: 540 * The target page pointer. 541 * 542 * Recommended to use wrapper macro: i915_gem_object_get_dirty_page() 543 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page() 544 */ 545 struct page * 546 __i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n); 547 548 /** 549 * i915_gem_object_get_dirty_page - wrapper macro for __i915_gem_object_get_dirty_page 550 * @obj: i915 GEM buffer object 551 * @n: page offset 552 * 553 * Returns: 554 * The target page pointer. 555 * 556 * In order to avoid the truncation of the input parameter, it checks the page 557 * offset n's type from the input parameter before calling 558 * __i915_gem_object_get_dirty_page(). 559 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page() 560 */ 561 #define i915_gem_object_get_dirty_page(obj, n) ({ \ 562 static_assert(castable_to_type(n, pgoff_t)); \ 563 __i915_gem_object_get_dirty_page(obj, n); \ 564 }) 565 566 /** 567 * __i915_gem_object_get_dma_address_len - helper to get bus addresses of 568 * targeted DMA mapped scatterlist from i915 GEM buffer object and it's length 569 * @obj: i915 GEM buffer object 570 * @n: page offset 571 * @len: DMA mapped scatterlist's DMA bus addresses length to return 572 * 573 * Returns: 574 * Bus addresses of targeted DMA mapped scatterlist 575 * 576 * Recommended to use wrapper macro: i915_gem_object_get_dma_address_len() 577 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma() 578 */ 579 dma_addr_t 580 __i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, pgoff_t n, 581 unsigned int *len); 582 583 /** 584 * i915_gem_object_get_dma_address_len - wrapper macro for 585 * __i915_gem_object_get_dma_address_len 586 * @obj: i915 GEM buffer object 587 * @n: page offset 588 * @len: DMA mapped scatterlist's DMA bus addresses length to return 589 * 590 * Returns: 591 * Bus addresses of targeted DMA mapped scatterlist 592 * 593 * In order to avoid the truncation of the input parameter, it checks the page 594 * offset n's type from the input parameter before calling 595 * __i915_gem_object_get_dma_address_len(). 596 * See also __i915_gem_object_page_iter_get_sg() and 597 * __i915_gem_object_get_dma_address_len() 598 */ 599 #define i915_gem_object_get_dma_address_len(obj, n, len) ({ \ 600 static_assert(castable_to_type(n, pgoff_t)); \ 601 __i915_gem_object_get_dma_address_len(obj, n, len); \ 602 }) 603 604 /** 605 * __i915_gem_object_get_dma_address - helper to get bus addresses of 606 * targeted DMA mapped scatterlist from i915 GEM buffer object 607 * @obj: i915 GEM buffer object 608 * @n: page offset 609 * 610 * Returns: 611 * Bus addresses of targeted DMA mapped scatterlis 612 * 613 * Recommended to use wrapper macro: i915_gem_object_get_dma_address() 614 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma() 615 */ 616 dma_addr_t 617 __i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n); 618 619 /** 620 * i915_gem_object_get_dma_address - wrapper macro for 621 * __i915_gem_object_get_dma_address 622 * @obj: i915 GEM buffer object 623 * @n: page offset 624 * 625 * Returns: 626 * Bus addresses of targeted DMA mapped scatterlist 627 * 628 * In order to avoid the truncation of the input parameter, it checks the page 629 * offset n's type from the input parameter before calling 630 * __i915_gem_object_get_dma_address(). 631 * See also __i915_gem_object_page_iter_get_sg() and 632 * __i915_gem_object_get_dma_address() 633 */ 634 #define i915_gem_object_get_dma_address(obj, n) ({ \ 635 static_assert(castable_to_type(n, pgoff_t)); \ 636 __i915_gem_object_get_dma_address(obj, n); \ 637 }) 638 639 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 640 struct sg_table *pages); 641 642 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 643 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 644 645 static inline int __must_check 646 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 647 { 648 assert_object_held(obj); 649 650 if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) 651 return 0; 652 653 return __i915_gem_object_get_pages(obj); 654 } 655 656 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj); 657 658 static inline bool 659 i915_gem_object_has_pages(struct drm_i915_gem_object *obj) 660 { 661 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)); 662 } 663 664 static inline void 665 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 666 { 667 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 668 669 atomic_inc(&obj->mm.pages_pin_count); 670 } 671 672 static inline bool 673 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) 674 { 675 return atomic_read(&obj->mm.pages_pin_count); 676 } 677 678 static inline void 679 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 680 { 681 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 682 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 683 684 atomic_dec(&obj->mm.pages_pin_count); 685 } 686 687 static inline void 688 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 689 { 690 __i915_gem_object_unpin_pages(obj); 691 } 692 693 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 694 int i915_gem_object_truncate(struct drm_i915_gem_object *obj); 695 696 /** 697 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 698 * @obj: the object to map into kernel address space 699 * @type: the type of mapping, used to select pgprot_t 700 * 701 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 702 * pages and then returns a contiguous mapping of the backing storage into 703 * the kernel address space. Based on the @type of mapping, the PTE will be 704 * set to either WriteBack or WriteCombine (via pgprot_t). 705 * 706 * The caller is responsible for calling i915_gem_object_unpin_map() when the 707 * mapping is no longer required. 708 * 709 * Returns the pointer through which to access the mapped object, or an 710 * ERR_PTR() on error. 711 */ 712 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 713 enum i915_map_type type); 714 715 void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj, 716 enum i915_map_type type); 717 718 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 719 unsigned long offset, 720 unsigned long size); 721 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) 722 { 723 __i915_gem_object_flush_map(obj, 0, obj->base.size); 724 } 725 726 /** 727 * i915_gem_object_unpin_map - releases an earlier mapping 728 * @obj: the object to unmap 729 * 730 * After pinning the object and mapping its pages, once you are finished 731 * with your access, call i915_gem_object_unpin_map() to release the pin 732 * upon the mapping. Once the pin count reaches zero, that mapping may be 733 * removed. 734 */ 735 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 736 { 737 i915_gem_object_unpin_pages(obj); 738 } 739 740 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj); 741 742 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, 743 unsigned int *needs_clflush); 744 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, 745 unsigned int *needs_clflush); 746 #define CLFLUSH_BEFORE BIT(0) 747 #define CLFLUSH_AFTER BIT(1) 748 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) 749 750 static inline void 751 i915_gem_object_finish_access(struct drm_i915_gem_object *obj) 752 { 753 i915_gem_object_unpin_pages(obj); 754 } 755 756 int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj, 757 struct dma_fence **fence); 758 int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, 759 bool intr); 760 bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj); 761 762 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, 763 unsigned int cache_level); 764 void i915_gem_object_set_pat_index(struct drm_i915_gem_object *obj, 765 unsigned int pat_index); 766 bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj); 767 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); 768 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj); 769 bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj); 770 771 int __must_check 772 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); 773 int __must_check 774 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); 775 int __must_check 776 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 777 struct i915_vma * __must_check 778 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 779 struct i915_gem_ww_ctx *ww, 780 u32 alignment, 781 const struct i915_gtt_view *view, 782 unsigned int flags); 783 784 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); 785 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); 786 void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); 787 void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); 788 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); 789 790 static inline void __start_cpu_write(struct drm_i915_gem_object *obj) 791 { 792 obj->read_domains = I915_GEM_DOMAIN_CPU; 793 obj->write_domain = I915_GEM_DOMAIN_CPU; 794 if (i915_gem_cpu_write_needs_clflush(obj)) 795 obj->cache_dirty = true; 796 } 797 798 void i915_gem_fence_wait_priority(struct dma_fence *fence, 799 const struct i915_sched_attr *attr); 800 801 int i915_gem_object_wait(struct drm_i915_gem_object *obj, 802 unsigned int flags, 803 long timeout); 804 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 805 unsigned int flags, 806 const struct i915_sched_attr *attr); 807 808 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size); 809 810 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj); 811 812 void __i915_gem_free_object_rcu(struct rcu_head *head); 813 814 void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj); 815 816 void __i915_gem_free_object(struct drm_i915_gem_object *obj); 817 818 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj); 819 820 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj); 821 822 int i915_gem_object_migrate(struct drm_i915_gem_object *obj, 823 struct i915_gem_ww_ctx *ww, 824 enum intel_region_id id); 825 int __i915_gem_object_migrate(struct drm_i915_gem_object *obj, 826 struct i915_gem_ww_ctx *ww, 827 enum intel_region_id id, 828 unsigned int flags); 829 830 bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj, 831 enum intel_region_id id); 832 833 int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj, 834 unsigned int flags); 835 836 bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj, 837 enum intel_memory_type type); 838 839 bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj); 840 841 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, 842 size_t size, struct intel_memory_region *mr, 843 struct address_space *mapping, 844 unsigned int max_segment); 845 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, 846 bool dirty, bool backup); 847 void __shmem_writeback(size_t size, struct address_space *mapping); 848 849 #ifdef CONFIG_MMU_NOTIFIER 850 static inline bool 851 i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) 852 { 853 return obj->userptr.notifier.mm; 854 } 855 856 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj); 857 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj); 858 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj); 859 #else 860 static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; } 861 862 static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; } 863 static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; } 864 static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; } 865 866 #endif 867 868 #endif 869