1 #ifndef __DRM_GEM_H__ 2 #define __DRM_GEM_H__ 3 4 /* 5 * GEM Graphics Execution Manager Driver Interfaces 6 * 7 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 8 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 9 * Copyright (c) 2009-2010, Code Aurora Forum. 10 * All rights reserved. 11 * Copyright © 2014 Intel Corporation 12 * Daniel Vetter <daniel.vetter@ffwll.ch> 13 * 14 * Author: Rickard E. (Rik) Faith <faith@valinux.com> 15 * Author: Gareth Hughes <gareth@valinux.com> 16 * 17 * Permission is hereby granted, free of charge, to any person obtaining a 18 * copy of this software and associated documentation files (the "Software"), 19 * to deal in the Software without restriction, including without limitation 20 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 21 * and/or sell copies of the Software, and to permit persons to whom the 22 * Software is furnished to do so, subject to the following conditions: 23 * 24 * The above copyright notice and this permission notice (including the next 25 * paragraph) shall be included in all copies or substantial portions of the 26 * Software. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 34 * OTHER DEALINGS IN THE SOFTWARE. 35 */ 36 37 #include <linux/kref.h> 38 #include <linux/dma-resv.h> 39 #include <linux/list.h> 40 #include <linux/mutex.h> 41 42 #include <drm/drm_vma_manager.h> 43 44 struct iosys_map; 45 struct drm_gem_object; 46 47 /** 48 * enum drm_gem_object_status - bitmask of object state for fdinfo reporting 49 * @DRM_GEM_OBJECT_RESIDENT: object is resident in memory (ie. not unpinned) 50 * @DRM_GEM_OBJECT_PURGEABLE: object marked as purgeable by userspace 51 * @DRM_GEM_OBJECT_ACTIVE: object is currently used by an active submission 52 * 53 * Bitmask of status used for fdinfo memory stats, see &drm_gem_object_funcs.status 54 * and drm_show_fdinfo(). Note that an object can report DRM_GEM_OBJECT_PURGEABLE 55 * and be active or not resident, in which case drm_show_fdinfo() will not 56 * account for it as purgeable. So drivers do not need to check if the buffer 57 * is idle and resident to return this bit, i.e. userspace can mark a buffer as 58 * purgeable even while it is still busy on the GPU. It will not get reported in 59 * the puregeable stats until it becomes idle. The status gem object func does 60 * not need to consider this. 61 */ 62 enum drm_gem_object_status { 63 DRM_GEM_OBJECT_RESIDENT = BIT(0), 64 DRM_GEM_OBJECT_PURGEABLE = BIT(1), 65 DRM_GEM_OBJECT_ACTIVE = BIT(2), 66 }; 67 68 /** 69 * struct drm_gem_object_funcs - GEM object functions 70 */ 71 struct drm_gem_object_funcs { 72 /** 73 * @free: 74 * 75 * Deconstructor for drm_gem_objects. 76 * 77 * This callback is mandatory. 78 */ 79 void (*free)(struct drm_gem_object *obj); 80 81 /** 82 * @open: 83 * 84 * Called upon GEM handle creation. 85 * 86 * This callback is optional. 87 */ 88 int (*open)(struct drm_gem_object *obj, struct drm_file *file); 89 90 /** 91 * @close: 92 * 93 * Called upon GEM handle release. 94 * 95 * This callback is optional. 96 */ 97 void (*close)(struct drm_gem_object *obj, struct drm_file *file); 98 99 /** 100 * @print_info: 101 * 102 * If driver subclasses struct &drm_gem_object, it can implement this 103 * optional hook for printing additional driver specific info. 104 * 105 * drm_printf_indent() should be used in the callback passing it the 106 * indent argument. 107 * 108 * This callback is called from drm_gem_print_info(). 109 * 110 * This callback is optional. 111 */ 112 void (*print_info)(struct drm_printer *p, unsigned int indent, 113 const struct drm_gem_object *obj); 114 115 /** 116 * @export: 117 * 118 * Export backing buffer as a &dma_buf. 119 * If this is not set drm_gem_prime_export() is used. 120 * 121 * This callback is optional. 122 */ 123 struct dma_buf *(*export)(struct drm_gem_object *obj, int flags); 124 125 /** 126 * @pin: 127 * 128 * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper. 129 * 130 * This callback is optional. 131 */ 132 int (*pin)(struct drm_gem_object *obj); 133 134 /** 135 * @unpin: 136 * 137 * Unpin backing buffer. Used by the drm_gem_map_detach() helper. 138 * 139 * This callback is optional. 140 */ 141 void (*unpin)(struct drm_gem_object *obj); 142 143 /** 144 * @get_sg_table: 145 * 146 * Returns a Scatter-Gather table representation of the buffer. 147 * Used when exporting a buffer by the drm_gem_map_dma_buf() helper. 148 * Releasing is done by calling dma_unmap_sg_attrs() and sg_free_table() 149 * in drm_gem_unmap_buf(), therefore these helpers and this callback 150 * here cannot be used for sg tables pointing at driver private memory 151 * ranges. 152 * 153 * See also drm_prime_pages_to_sg(). 154 */ 155 struct sg_table *(*get_sg_table)(struct drm_gem_object *obj); 156 157 /** 158 * @vmap: 159 * 160 * Returns a virtual address for the buffer. Used by the 161 * drm_gem_dmabuf_vmap() helper. 162 * 163 * This callback is optional. 164 */ 165 int (*vmap)(struct drm_gem_object *obj, struct iosys_map *map); 166 167 /** 168 * @vunmap: 169 * 170 * Releases the address previously returned by @vmap. Used by the 171 * drm_gem_dmabuf_vunmap() helper. 172 * 173 * This callback is optional. 174 */ 175 void (*vunmap)(struct drm_gem_object *obj, struct iosys_map *map); 176 177 /** 178 * @mmap: 179 * 180 * Handle mmap() of the gem object, setup vma accordingly. 181 * 182 * This callback is optional. 183 * 184 * The callback is used by both drm_gem_mmap_obj() and 185 * drm_gem_prime_mmap(). When @mmap is present @vm_ops is not 186 * used, the @mmap callback must set vma->vm_ops instead. 187 */ 188 int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); 189 190 /** 191 * @evict: 192 * 193 * Evicts gem object out from memory. Used by the drm_gem_object_evict() 194 * helper. Returns 0 on success, -errno otherwise. 195 * 196 * This callback is optional. 197 */ 198 int (*evict)(struct drm_gem_object *obj); 199 200 /** 201 * @status: 202 * 203 * The optional status callback can return additional object state 204 * which determines which stats the object is counted against. The 205 * callback is called under table_lock. Racing against object status 206 * change is "harmless", and the callback can expect to not race 207 * against object destruction. 208 * 209 * Called by drm_show_memory_stats(). 210 */ 211 enum drm_gem_object_status (*status)(struct drm_gem_object *obj); 212 213 /** 214 * @rss: 215 * 216 * Return resident size of the object in physical memory. 217 * 218 * Called by drm_show_memory_stats(). 219 */ 220 size_t (*rss)(struct drm_gem_object *obj); 221 222 /** 223 * @vm_ops: 224 * 225 * Virtual memory operations used with mmap. 226 * 227 * This is optional but necessary for mmap support. 228 */ 229 const struct vm_operations_struct *vm_ops; 230 }; 231 232 /** 233 * struct drm_gem_lru - A simple LRU helper 234 * 235 * A helper for tracking GEM objects in a given state, to aid in 236 * driver's shrinker implementation. Tracks the count of pages 237 * for lockless &shrinker.count_objects, and provides 238 * &drm_gem_lru_scan for driver's &shrinker.scan_objects 239 * implementation. 240 */ 241 struct drm_gem_lru { 242 /** 243 * @lock: 244 * 245 * Lock protecting movement of GEM objects between LRUs. All 246 * LRUs that the object can move between should be protected 247 * by the same lock. 248 */ 249 struct mutex *lock; 250 251 /** 252 * @count: 253 * 254 * The total number of backing pages of the GEM objects in 255 * this LRU. 256 */ 257 long count; 258 259 /** 260 * @list: 261 * 262 * The LRU list. 263 */ 264 struct list_head list; 265 }; 266 267 /** 268 * struct drm_gem_object - GEM buffer object 269 * 270 * This structure defines the generic parts for GEM buffer objects, which are 271 * mostly around handling mmap and userspace handles. 272 * 273 * Buffer objects are often abbreviated to BO. 274 */ 275 struct drm_gem_object { 276 /** 277 * @refcount: 278 * 279 * Reference count of this object 280 * 281 * Please use drm_gem_object_get() to acquire and drm_gem_object_put_locked() 282 * or drm_gem_object_put() to release a reference to a GEM 283 * buffer object. 284 */ 285 struct kref refcount; 286 287 /** 288 * @handle_count: 289 * 290 * This is the GEM file_priv handle count of this object. 291 * 292 * Each handle also holds a reference. Note that when the handle_count 293 * drops to 0 any global names (e.g. the id in the flink namespace) will 294 * be cleared. 295 * 296 * Protected by &drm_device.object_name_lock. 297 */ 298 unsigned handle_count; 299 300 /** 301 * @dev: DRM dev this object belongs to. 302 */ 303 struct drm_device *dev; 304 305 /** 306 * @filp: 307 * 308 * SHMEM file node used as backing storage for swappable buffer objects. 309 * GEM also supports driver private objects with driver-specific backing 310 * storage (contiguous DMA memory, special reserved blocks). In this 311 * case @filp is NULL. 312 */ 313 struct file *filp; 314 315 /** 316 * @vma_node: 317 * 318 * Mapping info for this object to support mmap. Drivers are supposed to 319 * allocate the mmap offset using drm_gem_create_mmap_offset(). The 320 * offset itself can be retrieved using drm_vma_node_offset_addr(). 321 * 322 * Memory mapping itself is handled by drm_gem_mmap(), which also checks 323 * that userspace is allowed to access the object. 324 */ 325 struct drm_vma_offset_node vma_node; 326 327 /** 328 * @size: 329 * 330 * Size of the object, in bytes. Immutable over the object's 331 * lifetime. 332 */ 333 size_t size; 334 335 /** 336 * @name: 337 * 338 * Global name for this object, starts at 1. 0 means unnamed. 339 * Access is covered by &drm_device.object_name_lock. This is used by 340 * the GEM_FLINK and GEM_OPEN ioctls. 341 */ 342 int name; 343 344 /** 345 * @dma_buf: 346 * 347 * dma-buf associated with this GEM object. 348 * 349 * Pointer to the dma-buf associated with this gem object (either 350 * through importing or exporting). We break the resulting reference 351 * loop when the last gem handle for this object is released. 352 * 353 * Protected by &drm_device.object_name_lock. 354 */ 355 struct dma_buf *dma_buf; 356 357 /** 358 * @import_attach: 359 * 360 * dma-buf attachment backing this object. 361 * 362 * Any foreign dma_buf imported as a gem object has this set to the 363 * attachment point for the device. This is invariant over the lifetime 364 * of a gem object. 365 * 366 * The &drm_gem_object_funcs.free callback is responsible for 367 * cleaning up the dma_buf attachment and references acquired at import 368 * time. 369 * 370 * Note that the drm gem/prime core does not depend upon drivers setting 371 * this field any more. So for drivers where this doesn't make sense 372 * (e.g. virtual devices or a displaylink behind an usb bus) they can 373 * simply leave it as NULL. 374 */ 375 struct dma_buf_attachment *import_attach; 376 377 /** 378 * @resv: 379 * 380 * Pointer to reservation object associated with the this GEM object. 381 * 382 * Normally (@resv == &@_resv) except for imported GEM objects. 383 */ 384 struct dma_resv *resv; 385 386 /** 387 * @_resv: 388 * 389 * A reservation object for this GEM object. 390 * 391 * This is unused for imported GEM objects. 392 */ 393 struct dma_resv _resv; 394 395 /** 396 * @gpuva: 397 * 398 * Provides the list of GPU VAs attached to this GEM object. 399 * 400 * Drivers should lock list accesses with the GEMs &dma_resv lock 401 * (&drm_gem_object.resv) or a custom lock if one is provided. 402 */ 403 struct { 404 struct list_head list; 405 406 #ifdef CONFIG_LOCKDEP 407 struct lockdep_map *lock_dep_map; 408 #endif 409 } gpuva; 410 411 /** 412 * @funcs: 413 * 414 * Optional GEM object functions. If this is set, it will be used instead of the 415 * corresponding &drm_driver GEM callbacks. 416 * 417 * New drivers should use this. 418 * 419 */ 420 const struct drm_gem_object_funcs *funcs; 421 422 /** 423 * @lru_node: 424 * 425 * List node in a &drm_gem_lru. 426 */ 427 struct list_head lru_node; 428 429 /** 430 * @lru: 431 * 432 * The current LRU list that the GEM object is on. 433 */ 434 struct drm_gem_lru *lru; 435 }; 436 437 /** 438 * DRM_GEM_FOPS - Default drm GEM file operations 439 * 440 * This macro provides a shorthand for setting the GEM file ops in the 441 * &file_operations structure. If all you need are the default ops, use 442 * DEFINE_DRM_GEM_FOPS instead. 443 */ 444 #define DRM_GEM_FOPS \ 445 .open = drm_open,\ 446 .release = drm_release,\ 447 .unlocked_ioctl = drm_ioctl,\ 448 .compat_ioctl = drm_compat_ioctl,\ 449 .poll = drm_poll,\ 450 .read = drm_read,\ 451 .llseek = noop_llseek,\ 452 .mmap = drm_gem_mmap, \ 453 .fop_flags = FOP_UNSIGNED_OFFSET 454 455 /** 456 * DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers 457 * @name: name for the generated structure 458 * 459 * This macro autogenerates a suitable &struct file_operations for GEM based 460 * drivers, which can be assigned to &drm_driver.fops. Note that this structure 461 * cannot be shared between drivers, because it contains a reference to the 462 * current module using THIS_MODULE. 463 * 464 * Note that the declaration is already marked as static - if you need a 465 * non-static version of this you're probably doing it wrong and will break the 466 * THIS_MODULE reference by accident. 467 */ 468 #define DEFINE_DRM_GEM_FOPS(name) \ 469 static const struct file_operations name = {\ 470 .owner = THIS_MODULE,\ 471 DRM_GEM_FOPS,\ 472 } 473 474 void drm_gem_object_release(struct drm_gem_object *obj); 475 void drm_gem_object_free(struct kref *kref); 476 int drm_gem_object_init(struct drm_device *dev, 477 struct drm_gem_object *obj, size_t size); 478 int drm_gem_object_init_with_mnt(struct drm_device *dev, 479 struct drm_gem_object *obj, size_t size, 480 struct vfsmount *gemfs); 481 void drm_gem_private_object_init(struct drm_device *dev, 482 struct drm_gem_object *obj, size_t size); 483 void drm_gem_private_object_fini(struct drm_gem_object *obj); 484 void drm_gem_vm_open(struct vm_area_struct *vma); 485 void drm_gem_vm_close(struct vm_area_struct *vma); 486 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 487 struct vm_area_struct *vma); 488 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 489 490 /** 491 * drm_gem_object_get - acquire a GEM buffer object reference 492 * @obj: GEM buffer object 493 * 494 * This function acquires an additional reference to @obj. It is illegal to 495 * call this without already holding a reference. No locks required. 496 */ 497 static inline void drm_gem_object_get(struct drm_gem_object *obj) 498 { 499 kref_get(&obj->refcount); 500 } 501 502 __attribute__((nonnull)) 503 static inline void 504 __drm_gem_object_put(struct drm_gem_object *obj) 505 { 506 kref_put(&obj->refcount, drm_gem_object_free); 507 } 508 509 /** 510 * drm_gem_object_put - drop a GEM buffer object reference 511 * @obj: GEM buffer object 512 * 513 * This releases a reference to @obj. 514 */ 515 static inline void 516 drm_gem_object_put(struct drm_gem_object *obj) 517 { 518 if (obj) 519 __drm_gem_object_put(obj); 520 } 521 522 int drm_gem_handle_create(struct drm_file *file_priv, 523 struct drm_gem_object *obj, 524 u32 *handlep); 525 int drm_gem_handle_delete(struct drm_file *filp, u32 handle); 526 527 528 void drm_gem_free_mmap_offset(struct drm_gem_object *obj); 529 int drm_gem_create_mmap_offset(struct drm_gem_object *obj); 530 int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); 531 532 struct page **drm_gem_get_pages(struct drm_gem_object *obj); 533 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 534 bool dirty, bool accessed); 535 536 void drm_gem_lock(struct drm_gem_object *obj); 537 void drm_gem_unlock(struct drm_gem_object *obj); 538 539 int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map); 540 void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map); 541 542 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 543 int count, struct drm_gem_object ***objs_out); 544 struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); 545 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 546 bool wait_all, unsigned long timeout); 547 int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 548 struct ww_acquire_ctx *acquire_ctx); 549 void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, 550 struct ww_acquire_ctx *acquire_ctx); 551 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 552 u32 handle, u64 *offset); 553 554 void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock); 555 void drm_gem_lru_remove(struct drm_gem_object *obj); 556 void drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj); 557 void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj); 558 unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, 559 unsigned int nr_to_scan, 560 unsigned long *remaining, 561 bool (*shrink)(struct drm_gem_object *obj)); 562 563 int drm_gem_evict(struct drm_gem_object *obj); 564 565 /** 566 * drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats 567 * 568 * This helper should only be used for fdinfo shared memory stats to determine 569 * if a GEM object is shared. 570 * 571 * @obj: obj in question 572 */ 573 static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_object *obj) 574 { 575 return (obj->handle_count > 1) || obj->dma_buf; 576 } 577 578 #ifdef CONFIG_LOCKDEP 579 /** 580 * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list. 581 * @obj: the &drm_gem_object 582 * @lock: the lock used to protect the gpuva list. The locking primitive 583 * must contain a dep_map field. 584 * 585 * Call this if you're not proctecting access to the gpuva list with the 586 * dma-resv lock, but with a custom lock. 587 */ 588 #define drm_gem_gpuva_set_lock(obj, lock) \ 589 if (!WARN((obj)->gpuva.lock_dep_map, \ 590 "GEM GPUVA lock should be set only once.")) \ 591 (obj)->gpuva.lock_dep_map = &(lock)->dep_map 592 #define drm_gem_gpuva_assert_lock_held(obj) \ 593 lockdep_assert((obj)->gpuva.lock_dep_map ? \ 594 lock_is_held((obj)->gpuva.lock_dep_map) : \ 595 dma_resv_held((obj)->resv)) 596 #else 597 #define drm_gem_gpuva_set_lock(obj, lock) do {} while (0) 598 #define drm_gem_gpuva_assert_lock_held(obj) do {} while (0) 599 #endif 600 601 /** 602 * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object 603 * @obj: the &drm_gem_object 604 * 605 * This initializes the &drm_gem_object's &drm_gpuvm_bo list. 606 * 607 * Calling this function is only necessary for drivers intending to support the 608 * &drm_driver_feature DRIVER_GEM_GPUVA. 609 * 610 * See also drm_gem_gpuva_set_lock(). 611 */ 612 static inline void drm_gem_gpuva_init(struct drm_gem_object *obj) 613 { 614 INIT_LIST_HEAD(&obj->gpuva.list); 615 } 616 617 /** 618 * drm_gem_for_each_gpuvm_bo() - iterator to walk over a list of &drm_gpuvm_bo 619 * @entry__: &drm_gpuvm_bo structure to assign to in each iteration step 620 * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with 621 * 622 * This iterator walks over all &drm_gpuvm_bo structures associated with the 623 * &drm_gem_object. 624 */ 625 #define drm_gem_for_each_gpuvm_bo(entry__, obj__) \ 626 list_for_each_entry(entry__, &(obj__)->gpuva.list, list.entry.gem) 627 628 /** 629 * drm_gem_for_each_gpuvm_bo_safe() - iterator to safely walk over a list of 630 * &drm_gpuvm_bo 631 * @entry__: &drm_gpuvm_bostructure to assign to in each iteration step 632 * @next__: &next &drm_gpuvm_bo to store the next step 633 * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with 634 * 635 * This iterator walks over all &drm_gpuvm_bo structures associated with the 636 * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence 637 * it is save against removal of elements. 638 */ 639 #define drm_gem_for_each_gpuvm_bo_safe(entry__, next__, obj__) \ 640 list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, list.entry.gem) 641 642 #endif /* __DRM_GEM_H__ */ 643