1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #ifndef __I915_GEM_OBJECT_H__ 8 #define __I915_GEM_OBJECT_H__ 9 10 #include <drm/drm_gem.h> 11 #include <drm/drm_file.h> 12 #include <drm/drm_device.h> 13 14 #include "intel_memory_region.h" 15 #include "i915_gem_object_types.h" 16 #include "i915_gem_gtt.h" 17 #include "i915_gem_ww.h" 18 #include "i915_vma_types.h" 19 20 enum intel_region_id; 21 22 #define obj_to_i915(obj__) to_i915((obj__)->base.dev) 23 24 static inline bool i915_gem_object_size_2big(u64 size) 25 { 26 struct drm_i915_gem_object *obj; 27 28 if (overflows_type(size, obj->base.size)) 29 return true; 30 31 return false; 32 } 33 34 unsigned int i915_gem_get_pat_index(struct drm_i915_private *i915, 35 enum i915_cache_level level); 36 bool i915_gem_object_has_cache_level(const struct drm_i915_gem_object *obj, 37 enum i915_cache_level lvl); 38 void i915_gem_init__objects(struct drm_i915_private *i915); 39 40 void i915_objects_module_exit(void); 41 int i915_objects_module_init(void); 42 43 struct drm_i915_gem_object *i915_gem_object_alloc(void); 44 void i915_gem_object_free(struct drm_i915_gem_object *obj); 45 46 void i915_gem_object_init(struct drm_i915_gem_object *obj, 47 const struct drm_i915_gem_object_ops *ops, 48 struct lock_class_key *key, 49 unsigned alloc_flags); 50 51 void __i915_gem_object_fini(struct drm_i915_gem_object *obj); 52 53 struct drm_i915_gem_object * 54 i915_gem_object_create_shmem(struct drm_i915_private *i915, 55 resource_size_t size); 56 struct drm_i915_gem_object * 57 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, 58 const void *data, resource_size_t size); 59 struct drm_i915_gem_object * 60 __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size, 61 struct intel_memory_region **placements, 62 unsigned int n_placements); 63 64 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops; 65 66 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 67 struct sg_table *pages, 68 bool needs_clflush); 69 70 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj, 71 const struct drm_i915_gem_pwrite *args); 72 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj, 73 const struct drm_i915_gem_pread *args); 74 75 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); 76 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, 77 struct sg_table *pages); 78 void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, 79 struct sg_table *pages); 80 81 void i915_gem_flush_free_objects(struct drm_i915_private *i915); 82 83 struct sg_table * 84 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); 85 86 /** 87 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle 88 * @file: DRM file private date 89 * @handle: userspace handle 90 * 91 * Returns: 92 * A pointer to the object named by the handle if such exists on @filp, NULL 93 * otherwise. This object is only valid whilst under the RCU read lock, and 94 * note carefully the object may be in the process of being destroyed. 95 */ 96 static inline struct drm_i915_gem_object * 97 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) 98 { 99 #ifdef CONFIG_LOCKDEP 100 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); 101 #endif 102 return idr_find(&file->object_idr, handle); 103 } 104 105 static inline struct drm_i915_gem_object * 106 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj) 107 { 108 if (obj && !kref_get_unless_zero(&obj->base.refcount)) 109 obj = NULL; 110 111 return obj; 112 } 113 114 static inline struct drm_i915_gem_object * 115 i915_gem_object_lookup(struct drm_file *file, u32 handle) 116 { 117 struct drm_i915_gem_object *obj; 118 119 rcu_read_lock(); 120 obj = i915_gem_object_lookup_rcu(file, handle); 121 obj = i915_gem_object_get_rcu(obj); 122 rcu_read_unlock(); 123 124 return obj; 125 } 126 127 __deprecated 128 struct drm_gem_object * 129 drm_gem_object_lookup(struct drm_file *file, u32 handle); 130 131 __attribute__((nonnull)) 132 static inline struct drm_i915_gem_object * 133 i915_gem_object_get(struct drm_i915_gem_object *obj) 134 { 135 drm_gem_object_get(&obj->base); 136 return obj; 137 } 138 139 __attribute__((nonnull)) 140 static inline void 141 i915_gem_object_put(struct drm_i915_gem_object *obj) 142 { 143 __drm_gem_object_put(&obj->base); 144 } 145 146 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv) 147 148 /* 149 * If more than one potential simultaneous locker, assert held. 150 */ 151 static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj) 152 { 153 /* 154 * Note mm list lookup is protected by 155 * kref_get_unless_zero(). 156 */ 157 if (IS_ENABLED(CONFIG_LOCKDEP) && 158 kref_read(&obj->base.refcount) > 0) 159 assert_object_held(obj); 160 } 161 162 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj, 163 struct i915_gem_ww_ctx *ww, 164 bool intr) 165 { 166 int ret; 167 168 if (intr) 169 ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL); 170 else 171 ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL); 172 173 if (!ret && ww) { 174 i915_gem_object_get(obj); 175 list_add_tail(&obj->obj_link, &ww->obj_list); 176 } 177 if (ret == -EALREADY) 178 ret = 0; 179 180 if (ret == -EDEADLK) { 181 i915_gem_object_get(obj); 182 ww->contended = obj; 183 } 184 185 return ret; 186 } 187 188 static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj, 189 struct i915_gem_ww_ctx *ww) 190 { 191 return __i915_gem_object_lock(obj, ww, ww && ww->intr); 192 } 193 194 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj, 195 struct i915_gem_ww_ctx *ww) 196 { 197 WARN_ON(ww && !ww->intr); 198 return __i915_gem_object_lock(obj, ww, true); 199 } 200 201 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj, 202 struct i915_gem_ww_ctx *ww) 203 { 204 if (!ww) 205 return dma_resv_trylock(obj->base.resv); 206 else 207 return ww_mutex_trylock(&obj->base.resv->lock, &ww->ctx); 208 } 209 210 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) 211 { 212 if (obj->ops->adjust_lru) 213 obj->ops->adjust_lru(obj); 214 215 dma_resv_unlock(obj->base.resv); 216 } 217 218 static inline void 219 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) 220 { 221 obj->flags |= I915_BO_READONLY; 222 } 223 224 static inline bool 225 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) 226 { 227 return obj->flags & I915_BO_READONLY; 228 } 229 230 static inline bool 231 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj) 232 { 233 return obj->flags & I915_BO_ALLOC_CONTIGUOUS; 234 } 235 236 static inline bool 237 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj) 238 { 239 return obj->flags & I915_BO_ALLOC_VOLATILE; 240 } 241 242 static inline void 243 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj) 244 { 245 obj->flags |= I915_BO_ALLOC_VOLATILE; 246 } 247 248 static inline bool 249 i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj) 250 { 251 return test_bit(I915_TILING_QUIRK_BIT, &obj->flags); 252 } 253 254 static inline void 255 i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj) 256 { 257 set_bit(I915_TILING_QUIRK_BIT, &obj->flags); 258 } 259 260 static inline void 261 i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj) 262 { 263 clear_bit(I915_TILING_QUIRK_BIT, &obj->flags); 264 } 265 266 static inline bool 267 i915_gem_object_is_protected(const struct drm_i915_gem_object *obj) 268 { 269 return obj->flags & I915_BO_PROTECTED; 270 } 271 272 static inline bool 273 i915_gem_object_type_has(const struct drm_i915_gem_object *obj, 274 unsigned long flags) 275 { 276 return obj->ops->flags & flags; 277 } 278 279 bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj); 280 281 bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj); 282 283 static inline bool 284 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) 285 { 286 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE); 287 } 288 289 static inline bool 290 i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj) 291 { 292 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST); 293 } 294 295 static inline bool 296 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) 297 { 298 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY); 299 } 300 301 static inline bool 302 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj) 303 { 304 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP); 305 } 306 307 static inline bool 308 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) 309 { 310 return READ_ONCE(obj->frontbuffer) || obj->is_dpt; 311 } 312 313 static inline unsigned int 314 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj) 315 { 316 return obj->tiling_and_stride & TILING_MASK; 317 } 318 319 static inline bool 320 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj) 321 { 322 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; 323 } 324 325 static inline unsigned int 326 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj) 327 { 328 return obj->tiling_and_stride & STRIDE_MASK; 329 } 330 331 static inline unsigned int 332 i915_gem_tile_height(unsigned int tiling) 333 { 334 GEM_BUG_ON(!tiling); 335 return tiling == I915_TILING_Y ? 32 : 8; 336 } 337 338 static inline unsigned int 339 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj) 340 { 341 return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); 342 } 343 344 static inline unsigned int 345 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) 346 { 347 return (i915_gem_object_get_stride(obj) * 348 i915_gem_object_get_tile_height(obj)); 349 } 350 351 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, 352 unsigned int tiling, unsigned int stride); 353 354 /** 355 * __i915_gem_object_page_iter_get_sg - helper to find the target scatterlist 356 * pointer and the target page position using pgoff_t n input argument and 357 * i915_gem_object_page_iter 358 * @obj: i915 GEM buffer object 359 * @iter: i915 GEM buffer object page iterator 360 * @n: page offset 361 * @offset: searched physical offset, 362 * it will be used for returning physical page offset value 363 * 364 * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter. 365 * Takes and releases the RCU lock to search the radix_tree of 366 * i915_gem_object_page_iter. 367 * 368 * Returns: 369 * The target scatterlist pointer and the target page position. 370 * 371 * Recommended to use wrapper macro: i915_gem_object_page_iter_get_sg() 372 */ 373 struct scatterlist * 374 __i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj, 375 struct i915_gem_object_page_iter *iter, 376 pgoff_t n, 377 unsigned int *offset); 378 379 /** 380 * i915_gem_object_page_iter_get_sg - wrapper macro for 381 * __i915_gem_object_page_iter_get_sg() 382 * @obj: i915 GEM buffer object 383 * @it: i915 GEM buffer object page iterator 384 * @n: page offset 385 * @offset: searched physical offset, 386 * it will be used for returning physical page offset value 387 * 388 * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter. 389 * Takes and releases the RCU lock to search the radix_tree of 390 * i915_gem_object_page_iter. 391 * 392 * Returns: 393 * The target scatterlist pointer and the target page position. 394 * 395 * In order to avoid the truncation of the input parameter, it checks the page 396 * offset n's type from the input parameter before calling 397 * __i915_gem_object_page_iter_get_sg(). 398 */ 399 #define i915_gem_object_page_iter_get_sg(obj, it, n, offset) ({ \ 400 static_assert(castable_to_type(n, pgoff_t)); \ 401 __i915_gem_object_page_iter_get_sg(obj, it, n, offset); \ 402 }) 403 404 /** 405 * __i915_gem_object_get_sg - helper to find the target scatterlist 406 * pointer and the target page position using pgoff_t n input argument and 407 * drm_i915_gem_object. It uses an internal shmem scatterlist lookup function. 408 * @obj: i915 GEM buffer object 409 * @n: page offset 410 * @offset: searched physical offset, 411 * it will be used for returning physical page offset value 412 * 413 * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as 414 * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg(). 415 * 416 * Returns: 417 * The target scatterlist pointer and the target page position. 418 * 419 * Recommended to use wrapper macro: i915_gem_object_get_sg() 420 * See also __i915_gem_object_page_iter_get_sg() 421 */ 422 static inline struct scatterlist * 423 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, pgoff_t n, 424 unsigned int *offset) 425 { 426 return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_page, n, offset); 427 } 428 429 /** 430 * i915_gem_object_get_sg - wrapper macro for __i915_gem_object_get_sg() 431 * @obj: i915 GEM buffer object 432 * @n: page offset 433 * @offset: searched physical offset, 434 * it will be used for returning physical page offset value 435 * 436 * Returns: 437 * The target scatterlist pointer and the target page position. 438 * 439 * In order to avoid the truncation of the input parameter, it checks the page 440 * offset n's type from the input parameter before calling 441 * __i915_gem_object_get_sg(). 442 * See also __i915_gem_object_page_iter_get_sg() 443 */ 444 #define i915_gem_object_get_sg(obj, n, offset) ({ \ 445 static_assert(castable_to_type(n, pgoff_t)); \ 446 __i915_gem_object_get_sg(obj, n, offset); \ 447 }) 448 449 /** 450 * __i915_gem_object_get_sg_dma - helper to find the target scatterlist 451 * pointer and the target page position using pgoff_t n input argument and 452 * drm_i915_gem_object. It uses an internal DMA mapped scatterlist lookup function 453 * @obj: i915 GEM buffer object 454 * @n: page offset 455 * @offset: searched physical offset, 456 * it will be used for returning physical page offset value 457 * 458 * It uses drm_i915_gem_object's internal DMA mapped scatterlist lookup function 459 * as i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg(). 460 * 461 * Returns: 462 * The target scatterlist pointer and the target page position. 463 * 464 * Recommended to use wrapper macro: i915_gem_object_get_sg_dma() 465 * See also __i915_gem_object_page_iter_get_sg() 466 */ 467 static inline struct scatterlist * 468 __i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, pgoff_t n, 469 unsigned int *offset) 470 { 471 return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_dma_page, n, offset); 472 } 473 474 /** 475 * i915_gem_object_get_sg_dma - wrapper macro for __i915_gem_object_get_sg_dma() 476 * @obj: i915 GEM buffer object 477 * @n: page offset 478 * @offset: searched physical offset, 479 * it will be used for returning physical page offset value 480 * 481 * Returns: 482 * The target scatterlist pointer and the target page position. 483 * 484 * In order to avoid the truncation of the input parameter, it checks the page 485 * offset n's type from the input parameter before calling 486 * __i915_gem_object_get_sg_dma(). 487 * See also __i915_gem_object_page_iter_get_sg() 488 */ 489 #define i915_gem_object_get_sg_dma(obj, n, offset) ({ \ 490 static_assert(castable_to_type(n, pgoff_t)); \ 491 __i915_gem_object_get_sg_dma(obj, n, offset); \ 492 }) 493 494 /** 495 * __i915_gem_object_get_page - helper to find the target page with a page offset 496 * @obj: i915 GEM buffer object 497 * @n: page offset 498 * 499 * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as 500 * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg() 501 * internally. 502 * 503 * Returns: 504 * The target page pointer. 505 * 506 * Recommended to use wrapper macro: i915_gem_object_get_page() 507 * See also __i915_gem_object_page_iter_get_sg() 508 */ 509 struct page * 510 __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n); 511 512 /** 513 * i915_gem_object_get_page - wrapper macro for __i915_gem_object_get_page 514 * @obj: i915 GEM buffer object 515 * @n: page offset 516 * 517 * Returns: 518 * The target page pointer. 519 * 520 * In order to avoid the truncation of the input parameter, it checks the page 521 * offset n's type from the input parameter before calling 522 * __i915_gem_object_get_page(). 523 * See also __i915_gem_object_page_iter_get_sg() 524 */ 525 #define i915_gem_object_get_page(obj, n) ({ \ 526 static_assert(castable_to_type(n, pgoff_t)); \ 527 __i915_gem_object_get_page(obj, n); \ 528 }) 529 530 /** 531 * __i915_gem_object_get_dirty_page - helper to find the target page with a page 532 * offset 533 * @obj: i915 GEM buffer object 534 * @n: page offset 535 * 536 * It works like i915_gem_object_get_page(), but it marks the returned page dirty. 537 * 538 * Returns: 539 * The target page pointer. 540 * 541 * Recommended to use wrapper macro: i915_gem_object_get_dirty_page() 542 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page() 543 */ 544 struct page * 545 __i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n); 546 547 /** 548 * i915_gem_object_get_dirty_page - wrapper macro for __i915_gem_object_get_dirty_page 549 * @obj: i915 GEM buffer object 550 * @n: page offset 551 * 552 * Returns: 553 * The target page pointer. 554 * 555 * In order to avoid the truncation of the input parameter, it checks the page 556 * offset n's type from the input parameter before calling 557 * __i915_gem_object_get_dirty_page(). 558 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page() 559 */ 560 #define i915_gem_object_get_dirty_page(obj, n) ({ \ 561 static_assert(castable_to_type(n, pgoff_t)); \ 562 __i915_gem_object_get_dirty_page(obj, n); \ 563 }) 564 565 /** 566 * __i915_gem_object_get_dma_address_len - helper to get bus addresses of 567 * targeted DMA mapped scatterlist from i915 GEM buffer object and it's length 568 * @obj: i915 GEM buffer object 569 * @n: page offset 570 * @len: DMA mapped scatterlist's DMA bus addresses length to return 571 * 572 * Returns: 573 * Bus addresses of targeted DMA mapped scatterlist 574 * 575 * Recommended to use wrapper macro: i915_gem_object_get_dma_address_len() 576 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma() 577 */ 578 dma_addr_t 579 __i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, pgoff_t n, 580 unsigned int *len); 581 582 /** 583 * i915_gem_object_get_dma_address_len - wrapper macro for 584 * __i915_gem_object_get_dma_address_len 585 * @obj: i915 GEM buffer object 586 * @n: page offset 587 * @len: DMA mapped scatterlist's DMA bus addresses length to return 588 * 589 * Returns: 590 * Bus addresses of targeted DMA mapped scatterlist 591 * 592 * In order to avoid the truncation of the input parameter, it checks the page 593 * offset n's type from the input parameter before calling 594 * __i915_gem_object_get_dma_address_len(). 595 * See also __i915_gem_object_page_iter_get_sg() and 596 * __i915_gem_object_get_dma_address_len() 597 */ 598 #define i915_gem_object_get_dma_address_len(obj, n, len) ({ \ 599 static_assert(castable_to_type(n, pgoff_t)); \ 600 __i915_gem_object_get_dma_address_len(obj, n, len); \ 601 }) 602 603 /** 604 * __i915_gem_object_get_dma_address - helper to get bus addresses of 605 * targeted DMA mapped scatterlist from i915 GEM buffer object 606 * @obj: i915 GEM buffer object 607 * @n: page offset 608 * 609 * Returns: 610 * Bus addresses of targeted DMA mapped scatterlis 611 * 612 * Recommended to use wrapper macro: i915_gem_object_get_dma_address() 613 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma() 614 */ 615 dma_addr_t 616 __i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n); 617 618 /** 619 * i915_gem_object_get_dma_address - wrapper macro for 620 * __i915_gem_object_get_dma_address 621 * @obj: i915 GEM buffer object 622 * @n: page offset 623 * 624 * Returns: 625 * Bus addresses of targeted DMA mapped scatterlist 626 * 627 * In order to avoid the truncation of the input parameter, it checks the page 628 * offset n's type from the input parameter before calling 629 * __i915_gem_object_get_dma_address(). 630 * See also __i915_gem_object_page_iter_get_sg() and 631 * __i915_gem_object_get_dma_address() 632 */ 633 #define i915_gem_object_get_dma_address(obj, n) ({ \ 634 static_assert(castable_to_type(n, pgoff_t)); \ 635 __i915_gem_object_get_dma_address(obj, n); \ 636 }) 637 638 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 639 struct sg_table *pages); 640 641 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 642 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 643 644 static inline int __must_check 645 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 646 { 647 assert_object_held(obj); 648 649 if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) 650 return 0; 651 652 return __i915_gem_object_get_pages(obj); 653 } 654 655 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj); 656 657 static inline bool 658 i915_gem_object_has_pages(struct drm_i915_gem_object *obj) 659 { 660 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)); 661 } 662 663 static inline void 664 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 665 { 666 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 667 668 atomic_inc(&obj->mm.pages_pin_count); 669 } 670 671 static inline bool 672 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) 673 { 674 return atomic_read(&obj->mm.pages_pin_count); 675 } 676 677 static inline void 678 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 679 { 680 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 681 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 682 683 atomic_dec(&obj->mm.pages_pin_count); 684 } 685 686 static inline void 687 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 688 { 689 __i915_gem_object_unpin_pages(obj); 690 } 691 692 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 693 int i915_gem_object_truncate(struct drm_i915_gem_object *obj); 694 695 /** 696 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 697 * @obj: the object to map into kernel address space 698 * @type: the type of mapping, used to select pgprot_t 699 * 700 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 701 * pages and then returns a contiguous mapping of the backing storage into 702 * the kernel address space. Based on the @type of mapping, the PTE will be 703 * set to either WriteBack or WriteCombine (via pgprot_t). 704 * 705 * The caller is responsible for calling i915_gem_object_unpin_map() when the 706 * mapping is no longer required. 707 * 708 * Returns the pointer through which to access the mapped object, or an 709 * ERR_PTR() on error. 710 */ 711 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 712 enum i915_map_type type); 713 714 void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj, 715 enum i915_map_type type); 716 717 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 718 unsigned long offset, 719 unsigned long size); 720 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) 721 { 722 __i915_gem_object_flush_map(obj, 0, obj->base.size); 723 } 724 725 /** 726 * i915_gem_object_unpin_map - releases an earlier mapping 727 * @obj: the object to unmap 728 * 729 * After pinning the object and mapping its pages, once you are finished 730 * with your access, call i915_gem_object_unpin_map() to release the pin 731 * upon the mapping. Once the pin count reaches zero, that mapping may be 732 * removed. 733 */ 734 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 735 { 736 i915_gem_object_unpin_pages(obj); 737 } 738 739 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj); 740 741 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, 742 unsigned int *needs_clflush); 743 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, 744 unsigned int *needs_clflush); 745 #define CLFLUSH_BEFORE BIT(0) 746 #define CLFLUSH_AFTER BIT(1) 747 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) 748 749 static inline void 750 i915_gem_object_finish_access(struct drm_i915_gem_object *obj) 751 { 752 i915_gem_object_unpin_pages(obj); 753 } 754 755 int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj, 756 struct dma_fence **fence); 757 int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, 758 bool intr); 759 bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj); 760 761 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, 762 unsigned int cache_level); 763 void i915_gem_object_set_pat_index(struct drm_i915_gem_object *obj, 764 unsigned int pat_index); 765 bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj); 766 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); 767 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj); 768 bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj); 769 770 int __must_check 771 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); 772 int __must_check 773 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); 774 int __must_check 775 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 776 struct i915_vma * __must_check 777 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 778 struct i915_gem_ww_ctx *ww, 779 u32 alignment, unsigned int guard, 780 const struct i915_gtt_view *view, 781 unsigned int flags); 782 783 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); 784 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); 785 void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); 786 void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); 787 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); 788 789 static inline void __start_cpu_write(struct drm_i915_gem_object *obj) 790 { 791 obj->read_domains = I915_GEM_DOMAIN_CPU; 792 obj->write_domain = I915_GEM_DOMAIN_CPU; 793 if (i915_gem_cpu_write_needs_clflush(obj)) 794 obj->cache_dirty = true; 795 } 796 797 void i915_gem_fence_wait_priority(struct dma_fence *fence, 798 const struct i915_sched_attr *attr); 799 800 int i915_gem_object_wait(struct drm_i915_gem_object *obj, 801 unsigned int flags, 802 long timeout); 803 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 804 unsigned int flags, 805 const struct i915_sched_attr *attr); 806 807 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size); 808 809 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj); 810 811 void __i915_gem_free_object_rcu(struct rcu_head *head); 812 813 void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj); 814 815 void __i915_gem_free_object(struct drm_i915_gem_object *obj); 816 817 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj); 818 819 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj); 820 821 int i915_gem_object_migrate(struct drm_i915_gem_object *obj, 822 struct i915_gem_ww_ctx *ww, 823 enum intel_region_id id); 824 int __i915_gem_object_migrate(struct drm_i915_gem_object *obj, 825 struct i915_gem_ww_ctx *ww, 826 enum intel_region_id id, 827 unsigned int flags); 828 829 bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj, 830 enum intel_region_id id); 831 832 int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj, 833 unsigned int flags); 834 835 bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj, 836 enum intel_memory_type type); 837 838 bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj); 839 840 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, 841 size_t size, struct intel_memory_region *mr, 842 struct address_space *mapping, 843 unsigned int max_segment); 844 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, 845 bool dirty, bool backup); 846 void __shmem_writeback(size_t size, struct address_space *mapping); 847 848 #ifdef CONFIG_MMU_NOTIFIER 849 static inline bool 850 i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) 851 { 852 return obj->userptr.notifier.mm; 853 } 854 855 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj); 856 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj); 857 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj); 858 #else 859 static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; } 860 861 static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; } 862 static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; } 863 static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; } 864 865 #endif 866 867 #endif 868