1 #ifndef __DRM_GEM_H__ 2 #define __DRM_GEM_H__ 3 4 /* 5 * GEM Graphics Execution Manager Driver Interfaces 6 * 7 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 8 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 9 * Copyright (c) 2009-2010, Code Aurora Forum. 10 * All rights reserved. 11 * Copyright © 2014 Intel Corporation 12 * Daniel Vetter <daniel.vetter@ffwll.ch> 13 * 14 * Author: Rickard E. (Rik) Faith <faith@valinux.com> 15 * Author: Gareth Hughes <gareth@valinux.com> 16 * 17 * Permission is hereby granted, free of charge, to any person obtaining a 18 * copy of this software and associated documentation files (the "Software"), 19 * to deal in the Software without restriction, including without limitation 20 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 21 * and/or sell copies of the Software, and to permit persons to whom the 22 * Software is furnished to do so, subject to the following conditions: 23 * 24 * The above copyright notice and this permission notice (including the next 25 * paragraph) shall be included in all copies or substantial portions of the 26 * Software. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 34 * OTHER DEALINGS IN THE SOFTWARE. 35 */ 36 37 #include <linux/kref.h> 38 #include <linux/dma-buf.h> 39 #include <linux/dma-resv.h> 40 #include <linux/list.h> 41 #include <linux/mutex.h> 42 43 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 44 #include <drm/drm_device.h> 45 #endif 46 #include <drm/drm_vma_manager.h> 47 48 struct iosys_map; 49 struct drm_gem_object; 50 51 /** 52 * enum drm_gem_object_status - bitmask of object state for fdinfo reporting 53 * @DRM_GEM_OBJECT_RESIDENT: object is resident in memory (ie. not unpinned) 54 * @DRM_GEM_OBJECT_PURGEABLE: object marked as purgeable by userspace 55 * @DRM_GEM_OBJECT_ACTIVE: object is currently used by an active submission 56 * 57 * Bitmask of status used for fdinfo memory stats, see &drm_gem_object_funcs.status 58 * and drm_show_fdinfo(). Note that an object can report DRM_GEM_OBJECT_PURGEABLE 59 * and be active or not resident, in which case drm_show_fdinfo() will not 60 * account for it as purgeable. So drivers do not need to check if the buffer 61 * is idle and resident to return this bit, i.e. userspace can mark a buffer as 62 * purgeable even while it is still busy on the GPU. It will not get reported in 63 * the puregeable stats until it becomes idle. The status gem object func does 64 * not need to consider this. 65 */ 66 enum drm_gem_object_status { 67 DRM_GEM_OBJECT_RESIDENT = BIT(0), 68 DRM_GEM_OBJECT_PURGEABLE = BIT(1), 69 DRM_GEM_OBJECT_ACTIVE = BIT(2), 70 }; 71 72 /** 73 * struct drm_gem_object_funcs - GEM object functions 74 */ 75 struct drm_gem_object_funcs { 76 /** 77 * @free: 78 * 79 * Deconstructor for drm_gem_objects. 80 * 81 * This callback is mandatory. 82 */ 83 void (*free)(struct drm_gem_object *obj); 84 85 /** 86 * @open: 87 * 88 * Called upon GEM handle creation. 89 * 90 * This callback is optional. 91 */ 92 int (*open)(struct drm_gem_object *obj, struct drm_file *file); 93 94 /** 95 * @close: 96 * 97 * Called upon GEM handle release. 98 * 99 * This callback is optional. 100 */ 101 void (*close)(struct drm_gem_object *obj, struct drm_file *file); 102 103 /** 104 * @print_info: 105 * 106 * If driver subclasses struct &drm_gem_object, it can implement this 107 * optional hook for printing additional driver specific info. 108 * 109 * drm_printf_indent() should be used in the callback passing it the 110 * indent argument. 111 * 112 * This callback is called from drm_gem_print_info(). 113 * 114 * This callback is optional. 115 */ 116 void (*print_info)(struct drm_printer *p, unsigned int indent, 117 const struct drm_gem_object *obj); 118 119 /** 120 * @export: 121 * 122 * Export backing buffer as a &dma_buf. 123 * If this is not set drm_gem_prime_export() is used. 124 * 125 * This callback is optional. 126 */ 127 struct dma_buf *(*export)(struct drm_gem_object *obj, int flags); 128 129 /** 130 * @pin: 131 * 132 * Pin backing buffer in memory, such that dma-buf importers can 133 * access it. Used by the drm_gem_map_attach() helper. 134 * 135 * This callback is optional. 136 */ 137 int (*pin)(struct drm_gem_object *obj); 138 139 /** 140 * @unpin: 141 * 142 * Unpin backing buffer. Used by the drm_gem_map_detach() helper. 143 * 144 * This callback is optional. 145 */ 146 void (*unpin)(struct drm_gem_object *obj); 147 148 /** 149 * @get_sg_table: 150 * 151 * Returns a Scatter-Gather table representation of the buffer. 152 * Used when exporting a buffer by the drm_gem_map_dma_buf() helper. 153 * Releasing is done by calling dma_unmap_sg_attrs() and sg_free_table() 154 * in drm_gem_unmap_buf(), therefore these helpers and this callback 155 * here cannot be used for sg tables pointing at driver private memory 156 * ranges. 157 * 158 * See also drm_prime_pages_to_sg(). 159 */ 160 struct sg_table *(*get_sg_table)(struct drm_gem_object *obj); 161 162 /** 163 * @vmap: 164 * 165 * Returns a virtual address for the buffer. Used by the 166 * drm_gem_dmabuf_vmap() helper. Called with a held GEM reservation 167 * lock. 168 * 169 * This callback is optional. 170 */ 171 int (*vmap)(struct drm_gem_object *obj, struct iosys_map *map); 172 173 /** 174 * @vunmap: 175 * 176 * Releases the address previously returned by @vmap. Used by the 177 * drm_gem_dmabuf_vunmap() helper. Called with a held GEM reservation 178 * lock. 179 * 180 * This callback is optional. 181 */ 182 void (*vunmap)(struct drm_gem_object *obj, struct iosys_map *map); 183 184 /** 185 * @mmap: 186 * 187 * Handle mmap() of the gem object, setup vma accordingly. 188 * 189 * This callback is optional. 190 * 191 * The callback is used by both drm_gem_mmap_obj() and 192 * drm_gem_prime_mmap(). When @mmap is present @vm_ops is not 193 * used, the @mmap callback must set vma->vm_ops instead. 194 */ 195 int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); 196 197 /** 198 * @evict: 199 * 200 * Evicts gem object out from memory. Used by the drm_gem_object_evict() 201 * helper. Returns 0 on success, -errno otherwise. Called with a held 202 * GEM reservation lock. 203 * 204 * This callback is optional. 205 */ 206 int (*evict)(struct drm_gem_object *obj); 207 208 /** 209 * @status: 210 * 211 * The optional status callback can return additional object state 212 * which determines which stats the object is counted against. The 213 * callback is called under table_lock. Racing against object status 214 * change is "harmless", and the callback can expect to not race 215 * against object destruction. 216 * 217 * Called by drm_show_memory_stats(). 218 */ 219 enum drm_gem_object_status (*status)(struct drm_gem_object *obj); 220 221 /** 222 * @rss: 223 * 224 * Return resident size of the object in physical memory. 225 * 226 * Called by drm_show_memory_stats(). 227 */ 228 size_t (*rss)(struct drm_gem_object *obj); 229 230 /** 231 * @vm_ops: 232 * 233 * Virtual memory operations used with mmap. 234 * 235 * This is optional but necessary for mmap support. 236 */ 237 const struct vm_operations_struct *vm_ops; 238 }; 239 240 /** 241 * struct drm_gem_lru - A simple LRU helper 242 * 243 * A helper for tracking GEM objects in a given state, to aid in 244 * driver's shrinker implementation. Tracks the count of pages 245 * for lockless &shrinker.count_objects, and provides 246 * &drm_gem_lru_scan for driver's &shrinker.scan_objects 247 * implementation. 248 */ 249 struct drm_gem_lru { 250 /** 251 * @lock: 252 * 253 * Lock protecting movement of GEM objects between LRUs. All 254 * LRUs that the object can move between should be protected 255 * by the same lock. 256 */ 257 struct mutex *lock; 258 259 /** 260 * @count: 261 * 262 * The total number of backing pages of the GEM objects in 263 * this LRU. 264 */ 265 long count; 266 267 /** 268 * @list: 269 * 270 * The LRU list. 271 */ 272 struct list_head list; 273 }; 274 275 /** 276 * struct drm_gem_object - GEM buffer object 277 * 278 * This structure defines the generic parts for GEM buffer objects, which are 279 * mostly around handling mmap and userspace handles. 280 * 281 * Buffer objects are often abbreviated to BO. 282 */ 283 struct drm_gem_object { 284 /** 285 * @refcount: 286 * 287 * Reference count of this object 288 * 289 * Please use drm_gem_object_get() to acquire and drm_gem_object_put_locked() 290 * or drm_gem_object_put() to release a reference to a GEM 291 * buffer object. 292 */ 293 struct kref refcount; 294 295 /** 296 * @handle_count: 297 * 298 * This is the GEM file_priv handle count of this object. 299 * 300 * Each handle also holds a reference. Note that when the handle_count 301 * drops to 0 any global names (e.g. the id in the flink namespace) will 302 * be cleared. 303 * 304 * Protected by &drm_device.object_name_lock. 305 */ 306 unsigned handle_count; 307 308 /** 309 * @dev: DRM dev this object belongs to. 310 */ 311 struct drm_device *dev; 312 313 /** 314 * @filp: 315 * 316 * SHMEM file node used as backing storage for swappable buffer objects. 317 * GEM also supports driver private objects with driver-specific backing 318 * storage (contiguous DMA memory, special reserved blocks). In this 319 * case @filp is NULL. 320 */ 321 struct file *filp; 322 323 /** 324 * @vma_node: 325 * 326 * Mapping info for this object to support mmap. Drivers are supposed to 327 * allocate the mmap offset using drm_gem_create_mmap_offset(). The 328 * offset itself can be retrieved using drm_vma_node_offset_addr(). 329 * 330 * Memory mapping itself is handled by drm_gem_mmap(), which also checks 331 * that userspace is allowed to access the object. 332 */ 333 struct drm_vma_offset_node vma_node; 334 335 /** 336 * @size: 337 * 338 * Size of the object, in bytes. Immutable over the object's 339 * lifetime. 340 */ 341 size_t size; 342 343 /** 344 * @name: 345 * 346 * Global name for this object, starts at 1. 0 means unnamed. 347 * Access is covered by &drm_device.object_name_lock. This is used by 348 * the GEM_FLINK and GEM_OPEN ioctls. 349 */ 350 int name; 351 352 /** 353 * @dma_buf: 354 * 355 * dma-buf associated with this GEM object. 356 * 357 * Pointer to the dma-buf associated with this gem object (either 358 * through importing or exporting). We break the resulting reference 359 * loop when the last gem handle for this object is released. 360 * 361 * Protected by &drm_device.object_name_lock. 362 */ 363 struct dma_buf *dma_buf; 364 365 /** 366 * @import_attach: 367 * 368 * dma-buf attachment backing this object. 369 * 370 * Any foreign dma_buf imported as a gem object has this set to the 371 * attachment point for the device. This is invariant over the lifetime 372 * of a gem object. 373 * 374 * The &drm_gem_object_funcs.free callback is responsible for 375 * cleaning up the dma_buf attachment and references acquired at import 376 * time. 377 * 378 * Note that the drm gem/prime core does not depend upon drivers setting 379 * this field any more. So for drivers where this doesn't make sense 380 * (e.g. virtual devices or a displaylink behind an usb bus) they can 381 * simply leave it as NULL. 382 */ 383 struct dma_buf_attachment *import_attach; 384 385 /** 386 * @resv: 387 * 388 * Pointer to reservation object associated with the this GEM object. 389 * 390 * Normally (@resv == &@_resv) except for imported GEM objects. 391 */ 392 struct dma_resv *resv; 393 394 /** 395 * @_resv: 396 * 397 * A reservation object for this GEM object. 398 * 399 * This is unused for imported GEM objects. 400 */ 401 struct dma_resv _resv; 402 403 /** 404 * @gpuva: Fields used by GPUVM to manage mappings pointing to this GEM object. 405 * 406 * When DRM_GPUVM_IMMEDIATE_MODE is set, this list is protected by the 407 * mutex. Otherwise, the list is protected by the GEMs &dma_resv lock. 408 * 409 * Note that all entries in this list must agree on whether 410 * DRM_GPUVM_IMMEDIATE_MODE is set. 411 */ 412 struct { 413 /** 414 * @gpuva.list: list of GPUVM mappings attached to this GEM object. 415 * 416 * Drivers should lock list accesses with either the GEMs 417 * &dma_resv lock (&drm_gem_object.resv) or the 418 * &drm_gem_object.gpuva.lock mutex. 419 */ 420 struct list_head list; 421 422 /** 423 * @gpuva.lock: lock protecting access to &drm_gem_object.gpuva.list 424 * when DRM_GPUVM_IMMEDIATE_MODE is used. 425 * 426 * Only used when DRM_GPUVM_IMMEDIATE_MODE is set. It should be 427 * safe to take this mutex during the fence signalling path, so 428 * do not allocate memory while holding this lock. Otherwise, 429 * the &dma_resv lock should be used. 430 */ 431 struct mutex lock; 432 } gpuva; 433 434 /** 435 * @funcs: 436 * 437 * Optional GEM object functions. If this is set, it will be used instead of the 438 * corresponding &drm_driver GEM callbacks. 439 * 440 * New drivers should use this. 441 * 442 */ 443 const struct drm_gem_object_funcs *funcs; 444 445 /** 446 * @lru_node: 447 * 448 * List node in a &drm_gem_lru. 449 */ 450 struct list_head lru_node; 451 452 /** 453 * @lru: 454 * 455 * The current LRU list that the GEM object is on. 456 */ 457 struct drm_gem_lru *lru; 458 }; 459 460 /** 461 * DRM_GEM_FOPS - Default drm GEM file operations 462 * 463 * This macro provides a shorthand for setting the GEM file ops in the 464 * &file_operations structure. If all you need are the default ops, use 465 * DEFINE_DRM_GEM_FOPS instead. 466 */ 467 #define DRM_GEM_FOPS \ 468 .open = drm_open,\ 469 .release = drm_release,\ 470 .unlocked_ioctl = drm_ioctl,\ 471 .compat_ioctl = drm_compat_ioctl,\ 472 .poll = drm_poll,\ 473 .read = drm_read,\ 474 .llseek = noop_llseek,\ 475 .get_unmapped_area = drm_gem_get_unmapped_area,\ 476 .mmap = drm_gem_mmap, \ 477 .fop_flags = FOP_UNSIGNED_OFFSET 478 479 /** 480 * DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers 481 * @name: name for the generated structure 482 * 483 * This macro autogenerates a suitable &struct file_operations for GEM based 484 * drivers, which can be assigned to &drm_driver.fops. Note that this structure 485 * cannot be shared between drivers, because it contains a reference to the 486 * current module using THIS_MODULE. 487 * 488 * Note that the declaration is already marked as static - if you need a 489 * non-static version of this you're probably doing it wrong and will break the 490 * THIS_MODULE reference by accident. 491 */ 492 #define DEFINE_DRM_GEM_FOPS(name) \ 493 static const struct file_operations name = {\ 494 .owner = THIS_MODULE,\ 495 DRM_GEM_FOPS,\ 496 } 497 498 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 499 int drm_gem_huge_mnt_create(struct drm_device *dev, const char *value); 500 #else 501 static inline int drm_gem_huge_mnt_create(struct drm_device *dev, 502 const char *value) 503 { 504 return 0; 505 } 506 #endif 507 508 /** 509 * drm_gem_get_huge_mnt - Get the huge tmpfs mountpoint used by a DRM device 510 * @dev: DRM device 511 * 512 * This function gets the huge tmpfs mountpoint used by DRM device @dev. A huge 513 * tmpfs mountpoint is used instead of `shm_mnt` after a successful call to 514 * drm_gem_huge_mnt_create() when CONFIG_TRANSPARENT_HUGEPAGE is enabled. 515 * 516 * Returns: 517 * The huge tmpfs mountpoint in use, NULL otherwise. 518 */ 519 static inline struct vfsmount *drm_gem_get_huge_mnt(struct drm_device *dev) 520 { 521 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 522 return dev->huge_mnt; 523 #else 524 return NULL; 525 #endif 526 } 527 528 void drm_gem_object_release(struct drm_gem_object *obj); 529 void drm_gem_object_free(struct kref *kref); 530 int drm_gem_object_init(struct drm_device *dev, 531 struct drm_gem_object *obj, size_t size); 532 void drm_gem_private_object_init(struct drm_device *dev, 533 struct drm_gem_object *obj, size_t size); 534 void drm_gem_private_object_fini(struct drm_gem_object *obj); 535 void drm_gem_vm_open(struct vm_area_struct *vma); 536 void drm_gem_vm_close(struct vm_area_struct *vma); 537 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 538 struct vm_area_struct *vma); 539 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 540 541 #ifdef CONFIG_MMU 542 unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr, 543 unsigned long len, unsigned long pgoff, 544 unsigned long flags); 545 #else 546 #define drm_gem_get_unmapped_area NULL 547 #endif 548 549 /** 550 * drm_gem_object_get - acquire a GEM buffer object reference 551 * @obj: GEM buffer object 552 * 553 * This function acquires an additional reference to @obj. It is illegal to 554 * call this without already holding a reference. No locks required. 555 */ 556 static inline void drm_gem_object_get(struct drm_gem_object *obj) 557 { 558 kref_get(&obj->refcount); 559 } 560 561 __attribute__((nonnull)) 562 static inline void 563 __drm_gem_object_put(struct drm_gem_object *obj) 564 { 565 kref_put(&obj->refcount, drm_gem_object_free); 566 } 567 568 /** 569 * drm_gem_object_put - drop a GEM buffer object reference 570 * @obj: GEM buffer object 571 * 572 * This releases a reference to @obj. 573 */ 574 static inline void 575 drm_gem_object_put(struct drm_gem_object *obj) 576 { 577 if (obj) 578 __drm_gem_object_put(obj); 579 } 580 581 int drm_gem_handle_create(struct drm_file *file_priv, 582 struct drm_gem_object *obj, 583 u32 *handlep); 584 int drm_gem_handle_delete(struct drm_file *filp, u32 handle); 585 586 587 void drm_gem_free_mmap_offset(struct drm_gem_object *obj); 588 int drm_gem_create_mmap_offset(struct drm_gem_object *obj); 589 int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); 590 591 struct page **drm_gem_get_pages(struct drm_gem_object *obj); 592 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 593 bool dirty, bool accessed); 594 595 void drm_gem_lock(struct drm_gem_object *obj); 596 void drm_gem_unlock(struct drm_gem_object *obj); 597 598 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map); 599 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map); 600 601 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 602 int count, struct drm_gem_object ***objs_out); 603 struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); 604 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 605 bool wait_all, unsigned long timeout); 606 int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 607 struct ww_acquire_ctx *acquire_ctx); 608 void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, 609 struct ww_acquire_ctx *acquire_ctx); 610 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 611 u32 handle, u64 *offset); 612 613 void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock); 614 void drm_gem_lru_remove(struct drm_gem_object *obj); 615 void drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj); 616 void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj); 617 unsigned long 618 drm_gem_lru_scan(struct drm_gem_lru *lru, 619 unsigned int nr_to_scan, 620 unsigned long *remaining, 621 bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket), 622 struct ww_acquire_ctx *ticket); 623 624 int drm_gem_evict_locked(struct drm_gem_object *obj); 625 626 /** 627 * drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats 628 * 629 * This helper should only be used for fdinfo shared memory stats to determine 630 * if a GEM object is shared. 631 * 632 * @obj: obj in question 633 */ 634 static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_object *obj) 635 { 636 return (obj->handle_count > 1) || obj->dma_buf; 637 } 638 639 /** 640 * drm_gem_is_imported() - Tests if GEM object's buffer has been imported 641 * @obj: the GEM object 642 * 643 * Returns: 644 * True if the GEM object's buffer has been imported, false otherwise 645 */ 646 static inline bool drm_gem_is_imported(const struct drm_gem_object *obj) 647 { 648 return !!obj->import_attach; 649 } 650 651 #ifdef CONFIG_LOCKDEP 652 #define drm_gem_gpuva_assert_lock_held(gpuvm, obj) \ 653 lockdep_assert(drm_gpuvm_immediate_mode(gpuvm) ? \ 654 lockdep_is_held(&(obj)->gpuva.lock) : \ 655 dma_resv_held((obj)->resv)) 656 #else 657 #define drm_gem_gpuva_assert_lock_held(gpuvm, obj) do {} while (0) 658 #endif 659 660 /** 661 * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object 662 * @obj: the &drm_gem_object 663 * 664 * This initializes the &drm_gem_object's &drm_gpuvm_bo list. 665 * 666 * Calling this function is only necessary for drivers intending to support the 667 * &drm_driver_feature DRIVER_GEM_GPUVA. 668 * 669 * See also drm_gem_gpuva_set_lock(). 670 */ 671 static inline void drm_gem_gpuva_init(struct drm_gem_object *obj) 672 { 673 INIT_LIST_HEAD(&obj->gpuva.list); 674 } 675 676 /** 677 * drm_gem_for_each_gpuvm_bo() - iterator to walk over a list of &drm_gpuvm_bo 678 * @entry__: &drm_gpuvm_bo structure to assign to in each iteration step 679 * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with 680 * 681 * This iterator walks over all &drm_gpuvm_bo structures associated with the 682 * &drm_gem_object. 683 */ 684 #define drm_gem_for_each_gpuvm_bo(entry__, obj__) \ 685 list_for_each_entry(entry__, &(obj__)->gpuva.list, list.entry.gem) 686 687 /** 688 * drm_gem_for_each_gpuvm_bo_safe() - iterator to safely walk over a list of 689 * &drm_gpuvm_bo 690 * @entry__: &drm_gpuvm_bostructure to assign to in each iteration step 691 * @next__: &next &drm_gpuvm_bo to store the next step 692 * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with 693 * 694 * This iterator walks over all &drm_gpuvm_bo structures associated with the 695 * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence 696 * it is save against removal of elements. 697 */ 698 #define drm_gem_for_each_gpuvm_bo_safe(entry__, next__, obj__) \ 699 list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, list.entry.gem) 700 701 #endif /* __DRM_GEM_H__ */ 702