1 #ifndef __DRM_GEM_H__ 2 #define __DRM_GEM_H__ 3 4 /* 5 * GEM Graphics Execution Manager Driver Interfaces 6 * 7 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 8 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 9 * Copyright (c) 2009-2010, Code Aurora Forum. 10 * All rights reserved. 11 * Copyright © 2014 Intel Corporation 12 * Daniel Vetter <daniel.vetter@ffwll.ch> 13 * 14 * Author: Rickard E. (Rik) Faith <faith@valinux.com> 15 * Author: Gareth Hughes <gareth@valinux.com> 16 * 17 * Permission is hereby granted, free of charge, to any person obtaining a 18 * copy of this software and associated documentation files (the "Software"), 19 * to deal in the Software without restriction, including without limitation 20 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 21 * and/or sell copies of the Software, and to permit persons to whom the 22 * Software is furnished to do so, subject to the following conditions: 23 * 24 * The above copyright notice and this permission notice (including the next 25 * paragraph) shall be included in all copies or substantial portions of the 26 * Software. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 34 * OTHER DEALINGS IN THE SOFTWARE. 35 */ 36 37 #include <linux/kref.h> 38 #include <linux/dma-buf.h> 39 #include <linux/dma-resv.h> 40 #include <linux/list.h> 41 #include <linux/mutex.h> 42 43 #include <drm/drm_vma_manager.h> 44 45 struct iosys_map; 46 struct drm_gem_object; 47 48 /** 49 * enum drm_gem_object_status - bitmask of object state for fdinfo reporting 50 * @DRM_GEM_OBJECT_RESIDENT: object is resident in memory (ie. not unpinned) 51 * @DRM_GEM_OBJECT_PURGEABLE: object marked as purgeable by userspace 52 * @DRM_GEM_OBJECT_ACTIVE: object is currently used by an active submission 53 * 54 * Bitmask of status used for fdinfo memory stats, see &drm_gem_object_funcs.status 55 * and drm_show_fdinfo(). Note that an object can report DRM_GEM_OBJECT_PURGEABLE 56 * and be active or not resident, in which case drm_show_fdinfo() will not 57 * account for it as purgeable. So drivers do not need to check if the buffer 58 * is idle and resident to return this bit, i.e. userspace can mark a buffer as 59 * purgeable even while it is still busy on the GPU. It will not get reported in 60 * the puregeable stats until it becomes idle. The status gem object func does 61 * not need to consider this. 62 */ 63 enum drm_gem_object_status { 64 DRM_GEM_OBJECT_RESIDENT = BIT(0), 65 DRM_GEM_OBJECT_PURGEABLE = BIT(1), 66 DRM_GEM_OBJECT_ACTIVE = BIT(2), 67 }; 68 69 /** 70 * struct drm_gem_object_funcs - GEM object functions 71 */ 72 struct drm_gem_object_funcs { 73 /** 74 * @free: 75 * 76 * Deconstructor for drm_gem_objects. 77 * 78 * This callback is mandatory. 79 */ 80 void (*free)(struct drm_gem_object *obj); 81 82 /** 83 * @open: 84 * 85 * Called upon GEM handle creation. 86 * 87 * This callback is optional. 88 */ 89 int (*open)(struct drm_gem_object *obj, struct drm_file *file); 90 91 /** 92 * @close: 93 * 94 * Called upon GEM handle release. 95 * 96 * This callback is optional. 97 */ 98 void (*close)(struct drm_gem_object *obj, struct drm_file *file); 99 100 /** 101 * @print_info: 102 * 103 * If driver subclasses struct &drm_gem_object, it can implement this 104 * optional hook for printing additional driver specific info. 105 * 106 * drm_printf_indent() should be used in the callback passing it the 107 * indent argument. 108 * 109 * This callback is called from drm_gem_print_info(). 110 * 111 * This callback is optional. 112 */ 113 void (*print_info)(struct drm_printer *p, unsigned int indent, 114 const struct drm_gem_object *obj); 115 116 /** 117 * @export: 118 * 119 * Export backing buffer as a &dma_buf. 120 * If this is not set drm_gem_prime_export() is used. 121 * 122 * This callback is optional. 123 */ 124 struct dma_buf *(*export)(struct drm_gem_object *obj, int flags); 125 126 /** 127 * @pin: 128 * 129 * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper. 130 * 131 * This callback is optional. 132 */ 133 int (*pin)(struct drm_gem_object *obj); 134 135 /** 136 * @unpin: 137 * 138 * Unpin backing buffer. Used by the drm_gem_map_detach() helper. 139 * 140 * This callback is optional. 141 */ 142 void (*unpin)(struct drm_gem_object *obj); 143 144 /** 145 * @get_sg_table: 146 * 147 * Returns a Scatter-Gather table representation of the buffer. 148 * Used when exporting a buffer by the drm_gem_map_dma_buf() helper. 149 * Releasing is done by calling dma_unmap_sg_attrs() and sg_free_table() 150 * in drm_gem_unmap_buf(), therefore these helpers and this callback 151 * here cannot be used for sg tables pointing at driver private memory 152 * ranges. 153 * 154 * See also drm_prime_pages_to_sg(). 155 */ 156 struct sg_table *(*get_sg_table)(struct drm_gem_object *obj); 157 158 /** 159 * @vmap: 160 * 161 * Returns a virtual address for the buffer. Used by the 162 * drm_gem_dmabuf_vmap() helper. 163 * 164 * This callback is optional. 165 */ 166 int (*vmap)(struct drm_gem_object *obj, struct iosys_map *map); 167 168 /** 169 * @vunmap: 170 * 171 * Releases the address previously returned by @vmap. Used by the 172 * drm_gem_dmabuf_vunmap() helper. 173 * 174 * This callback is optional. 175 */ 176 void (*vunmap)(struct drm_gem_object *obj, struct iosys_map *map); 177 178 /** 179 * @mmap: 180 * 181 * Handle mmap() of the gem object, setup vma accordingly. 182 * 183 * This callback is optional. 184 * 185 * The callback is used by both drm_gem_mmap_obj() and 186 * drm_gem_prime_mmap(). When @mmap is present @vm_ops is not 187 * used, the @mmap callback must set vma->vm_ops instead. 188 */ 189 int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); 190 191 /** 192 * @evict: 193 * 194 * Evicts gem object out from memory. Used by the drm_gem_object_evict() 195 * helper. Returns 0 on success, -errno otherwise. 196 * 197 * This callback is optional. 198 */ 199 int (*evict)(struct drm_gem_object *obj); 200 201 /** 202 * @status: 203 * 204 * The optional status callback can return additional object state 205 * which determines which stats the object is counted against. The 206 * callback is called under table_lock. Racing against object status 207 * change is "harmless", and the callback can expect to not race 208 * against object destruction. 209 * 210 * Called by drm_show_memory_stats(). 211 */ 212 enum drm_gem_object_status (*status)(struct drm_gem_object *obj); 213 214 /** 215 * @rss: 216 * 217 * Return resident size of the object in physical memory. 218 * 219 * Called by drm_show_memory_stats(). 220 */ 221 size_t (*rss)(struct drm_gem_object *obj); 222 223 /** 224 * @vm_ops: 225 * 226 * Virtual memory operations used with mmap. 227 * 228 * This is optional but necessary for mmap support. 229 */ 230 const struct vm_operations_struct *vm_ops; 231 }; 232 233 /** 234 * struct drm_gem_lru - A simple LRU helper 235 * 236 * A helper for tracking GEM objects in a given state, to aid in 237 * driver's shrinker implementation. Tracks the count of pages 238 * for lockless &shrinker.count_objects, and provides 239 * &drm_gem_lru_scan for driver's &shrinker.scan_objects 240 * implementation. 241 */ 242 struct drm_gem_lru { 243 /** 244 * @lock: 245 * 246 * Lock protecting movement of GEM objects between LRUs. All 247 * LRUs that the object can move between should be protected 248 * by the same lock. 249 */ 250 struct mutex *lock; 251 252 /** 253 * @count: 254 * 255 * The total number of backing pages of the GEM objects in 256 * this LRU. 257 */ 258 long count; 259 260 /** 261 * @list: 262 * 263 * The LRU list. 264 */ 265 struct list_head list; 266 }; 267 268 /** 269 * struct drm_gem_object - GEM buffer object 270 * 271 * This structure defines the generic parts for GEM buffer objects, which are 272 * mostly around handling mmap and userspace handles. 273 * 274 * Buffer objects are often abbreviated to BO. 275 */ 276 struct drm_gem_object { 277 /** 278 * @refcount: 279 * 280 * Reference count of this object 281 * 282 * Please use drm_gem_object_get() to acquire and drm_gem_object_put_locked() 283 * or drm_gem_object_put() to release a reference to a GEM 284 * buffer object. 285 */ 286 struct kref refcount; 287 288 /** 289 * @handle_count: 290 * 291 * This is the GEM file_priv handle count of this object. 292 * 293 * Each handle also holds a reference. Note that when the handle_count 294 * drops to 0 any global names (e.g. the id in the flink namespace) will 295 * be cleared. 296 * 297 * Protected by &drm_device.object_name_lock. 298 */ 299 unsigned handle_count; 300 301 /** 302 * @dev: DRM dev this object belongs to. 303 */ 304 struct drm_device *dev; 305 306 /** 307 * @filp: 308 * 309 * SHMEM file node used as backing storage for swappable buffer objects. 310 * GEM also supports driver private objects with driver-specific backing 311 * storage (contiguous DMA memory, special reserved blocks). In this 312 * case @filp is NULL. 313 */ 314 struct file *filp; 315 316 /** 317 * @vma_node: 318 * 319 * Mapping info for this object to support mmap. Drivers are supposed to 320 * allocate the mmap offset using drm_gem_create_mmap_offset(). The 321 * offset itself can be retrieved using drm_vma_node_offset_addr(). 322 * 323 * Memory mapping itself is handled by drm_gem_mmap(), which also checks 324 * that userspace is allowed to access the object. 325 */ 326 struct drm_vma_offset_node vma_node; 327 328 /** 329 * @size: 330 * 331 * Size of the object, in bytes. Immutable over the object's 332 * lifetime. 333 */ 334 size_t size; 335 336 /** 337 * @name: 338 * 339 * Global name for this object, starts at 1. 0 means unnamed. 340 * Access is covered by &drm_device.object_name_lock. This is used by 341 * the GEM_FLINK and GEM_OPEN ioctls. 342 */ 343 int name; 344 345 /** 346 * @dma_buf: 347 * 348 * dma-buf associated with this GEM object. 349 * 350 * Pointer to the dma-buf associated with this gem object (either 351 * through importing or exporting). We break the resulting reference 352 * loop when the last gem handle for this object is released. 353 * 354 * Protected by &drm_device.object_name_lock. 355 */ 356 struct dma_buf *dma_buf; 357 358 /** 359 * @import_attach: 360 * 361 * dma-buf attachment backing this object. 362 * 363 * Any foreign dma_buf imported as a gem object has this set to the 364 * attachment point for the device. This is invariant over the lifetime 365 * of a gem object. 366 * 367 * The &drm_gem_object_funcs.free callback is responsible for 368 * cleaning up the dma_buf attachment and references acquired at import 369 * time. 370 * 371 * Note that the drm gem/prime core does not depend upon drivers setting 372 * this field any more. So for drivers where this doesn't make sense 373 * (e.g. virtual devices or a displaylink behind an usb bus) they can 374 * simply leave it as NULL. 375 */ 376 struct dma_buf_attachment *import_attach; 377 378 /** 379 * @resv: 380 * 381 * Pointer to reservation object associated with the this GEM object. 382 * 383 * Normally (@resv == &@_resv) except for imported GEM objects. 384 */ 385 struct dma_resv *resv; 386 387 /** 388 * @_resv: 389 * 390 * A reservation object for this GEM object. 391 * 392 * This is unused for imported GEM objects. 393 */ 394 struct dma_resv _resv; 395 396 /** 397 * @gpuva: 398 * 399 * Provides the list of GPU VAs attached to this GEM object. 400 * 401 * Drivers should lock list accesses with the GEMs &dma_resv lock 402 * (&drm_gem_object.resv) or a custom lock if one is provided. 403 */ 404 struct { 405 struct list_head list; 406 407 #ifdef CONFIG_LOCKDEP 408 struct lockdep_map *lock_dep_map; 409 #endif 410 } gpuva; 411 412 /** 413 * @funcs: 414 * 415 * Optional GEM object functions. If this is set, it will be used instead of the 416 * corresponding &drm_driver GEM callbacks. 417 * 418 * New drivers should use this. 419 * 420 */ 421 const struct drm_gem_object_funcs *funcs; 422 423 /** 424 * @lru_node: 425 * 426 * List node in a &drm_gem_lru. 427 */ 428 struct list_head lru_node; 429 430 /** 431 * @lru: 432 * 433 * The current LRU list that the GEM object is on. 434 */ 435 struct drm_gem_lru *lru; 436 }; 437 438 /** 439 * DRM_GEM_FOPS - Default drm GEM file operations 440 * 441 * This macro provides a shorthand for setting the GEM file ops in the 442 * &file_operations structure. If all you need are the default ops, use 443 * DEFINE_DRM_GEM_FOPS instead. 444 */ 445 #define DRM_GEM_FOPS \ 446 .open = drm_open,\ 447 .release = drm_release,\ 448 .unlocked_ioctl = drm_ioctl,\ 449 .compat_ioctl = drm_compat_ioctl,\ 450 .poll = drm_poll,\ 451 .read = drm_read,\ 452 .llseek = noop_llseek,\ 453 .mmap = drm_gem_mmap, \ 454 .fop_flags = FOP_UNSIGNED_OFFSET 455 456 /** 457 * DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers 458 * @name: name for the generated structure 459 * 460 * This macro autogenerates a suitable &struct file_operations for GEM based 461 * drivers, which can be assigned to &drm_driver.fops. Note that this structure 462 * cannot be shared between drivers, because it contains a reference to the 463 * current module using THIS_MODULE. 464 * 465 * Note that the declaration is already marked as static - if you need a 466 * non-static version of this you're probably doing it wrong and will break the 467 * THIS_MODULE reference by accident. 468 */ 469 #define DEFINE_DRM_GEM_FOPS(name) \ 470 static const struct file_operations name = {\ 471 .owner = THIS_MODULE,\ 472 DRM_GEM_FOPS,\ 473 } 474 475 void drm_gem_object_release(struct drm_gem_object *obj); 476 void drm_gem_object_free(struct kref *kref); 477 int drm_gem_object_init(struct drm_device *dev, 478 struct drm_gem_object *obj, size_t size); 479 int drm_gem_object_init_with_mnt(struct drm_device *dev, 480 struct drm_gem_object *obj, size_t size, 481 struct vfsmount *gemfs); 482 void drm_gem_private_object_init(struct drm_device *dev, 483 struct drm_gem_object *obj, size_t size); 484 void drm_gem_private_object_fini(struct drm_gem_object *obj); 485 void drm_gem_vm_open(struct vm_area_struct *vma); 486 void drm_gem_vm_close(struct vm_area_struct *vma); 487 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 488 struct vm_area_struct *vma); 489 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 490 491 /** 492 * drm_gem_object_get - acquire a GEM buffer object reference 493 * @obj: GEM buffer object 494 * 495 * This function acquires an additional reference to @obj. It is illegal to 496 * call this without already holding a reference. No locks required. 497 */ 498 static inline void drm_gem_object_get(struct drm_gem_object *obj) 499 { 500 kref_get(&obj->refcount); 501 } 502 503 __attribute__((nonnull)) 504 static inline void 505 __drm_gem_object_put(struct drm_gem_object *obj) 506 { 507 kref_put(&obj->refcount, drm_gem_object_free); 508 } 509 510 /** 511 * drm_gem_object_put - drop a GEM buffer object reference 512 * @obj: GEM buffer object 513 * 514 * This releases a reference to @obj. 515 */ 516 static inline void 517 drm_gem_object_put(struct drm_gem_object *obj) 518 { 519 if (obj) 520 __drm_gem_object_put(obj); 521 } 522 523 int drm_gem_handle_create(struct drm_file *file_priv, 524 struct drm_gem_object *obj, 525 u32 *handlep); 526 int drm_gem_handle_delete(struct drm_file *filp, u32 handle); 527 528 529 void drm_gem_free_mmap_offset(struct drm_gem_object *obj); 530 int drm_gem_create_mmap_offset(struct drm_gem_object *obj); 531 int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); 532 533 struct page **drm_gem_get_pages(struct drm_gem_object *obj); 534 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 535 bool dirty, bool accessed); 536 537 void drm_gem_lock(struct drm_gem_object *obj); 538 void drm_gem_unlock(struct drm_gem_object *obj); 539 540 int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map); 541 void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map); 542 543 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 544 int count, struct drm_gem_object ***objs_out); 545 struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); 546 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 547 bool wait_all, unsigned long timeout); 548 int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 549 struct ww_acquire_ctx *acquire_ctx); 550 void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, 551 struct ww_acquire_ctx *acquire_ctx); 552 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 553 u32 handle, u64 *offset); 554 555 void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock); 556 void drm_gem_lru_remove(struct drm_gem_object *obj); 557 void drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj); 558 void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj); 559 unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, 560 unsigned int nr_to_scan, 561 unsigned long *remaining, 562 bool (*shrink)(struct drm_gem_object *obj)); 563 564 int drm_gem_evict(struct drm_gem_object *obj); 565 566 /** 567 * drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats 568 * 569 * This helper should only be used for fdinfo shared memory stats to determine 570 * if a GEM object is shared. 571 * 572 * @obj: obj in question 573 */ 574 static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_object *obj) 575 { 576 return (obj->handle_count > 1) || obj->dma_buf; 577 } 578 579 /** 580 * drm_gem_is_imported() - Tests if GEM object's buffer has been imported 581 * @obj: the GEM object 582 * 583 * Returns: 584 * True if the GEM object's buffer has been imported, false otherwise 585 */ 586 static inline bool drm_gem_is_imported(const struct drm_gem_object *obj) 587 { 588 /* The dma-buf's priv field points to the original GEM object. */ 589 return obj->dma_buf && (obj->dma_buf->priv != obj); 590 } 591 592 #ifdef CONFIG_LOCKDEP 593 /** 594 * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list. 595 * @obj: the &drm_gem_object 596 * @lock: the lock used to protect the gpuva list. The locking primitive 597 * must contain a dep_map field. 598 * 599 * Call this if you're not proctecting access to the gpuva list with the 600 * dma-resv lock, but with a custom lock. 601 */ 602 #define drm_gem_gpuva_set_lock(obj, lock) \ 603 if (!WARN((obj)->gpuva.lock_dep_map, \ 604 "GEM GPUVA lock should be set only once.")) \ 605 (obj)->gpuva.lock_dep_map = &(lock)->dep_map 606 #define drm_gem_gpuva_assert_lock_held(obj) \ 607 lockdep_assert((obj)->gpuva.lock_dep_map ? \ 608 lock_is_held((obj)->gpuva.lock_dep_map) : \ 609 dma_resv_held((obj)->resv)) 610 #else 611 #define drm_gem_gpuva_set_lock(obj, lock) do {} while (0) 612 #define drm_gem_gpuva_assert_lock_held(obj) do {} while (0) 613 #endif 614 615 /** 616 * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object 617 * @obj: the &drm_gem_object 618 * 619 * This initializes the &drm_gem_object's &drm_gpuvm_bo list. 620 * 621 * Calling this function is only necessary for drivers intending to support the 622 * &drm_driver_feature DRIVER_GEM_GPUVA. 623 * 624 * See also drm_gem_gpuva_set_lock(). 625 */ 626 static inline void drm_gem_gpuva_init(struct drm_gem_object *obj) 627 { 628 INIT_LIST_HEAD(&obj->gpuva.list); 629 } 630 631 /** 632 * drm_gem_for_each_gpuvm_bo() - iterator to walk over a list of &drm_gpuvm_bo 633 * @entry__: &drm_gpuvm_bo structure to assign to in each iteration step 634 * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with 635 * 636 * This iterator walks over all &drm_gpuvm_bo structures associated with the 637 * &drm_gem_object. 638 */ 639 #define drm_gem_for_each_gpuvm_bo(entry__, obj__) \ 640 list_for_each_entry(entry__, &(obj__)->gpuva.list, list.entry.gem) 641 642 /** 643 * drm_gem_for_each_gpuvm_bo_safe() - iterator to safely walk over a list of 644 * &drm_gpuvm_bo 645 * @entry__: &drm_gpuvm_bostructure to assign to in each iteration step 646 * @next__: &next &drm_gpuvm_bo to store the next step 647 * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with 648 * 649 * This iterator walks over all &drm_gpuvm_bo structures associated with the 650 * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence 651 * it is save against removal of elements. 652 */ 653 #define drm_gem_for_each_gpuvm_bo_safe(entry__, next__, obj__) \ 654 list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, list.entry.gem) 655 656 #endif /* __DRM_GEM_H__ */ 657