1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/dma-buf.h> 29 #include <linux/export.h> 30 #include <linux/file.h> 31 #include <linux/fs.h> 32 #include <linux/iosys-map.h> 33 #include <linux/mem_encrypt.h> 34 #include <linux/mm.h> 35 #include <linux/mman.h> 36 #include <linux/module.h> 37 #include <linux/pagemap.h> 38 #include <linux/pagevec.h> 39 #include <linux/shmem_fs.h> 40 #include <linux/slab.h> 41 #include <linux/string_helpers.h> 42 #include <linux/types.h> 43 #include <linux/uaccess.h> 44 45 #include <drm/drm.h> 46 #include <drm/drm_device.h> 47 #include <drm/drm_drv.h> 48 #include <drm/drm_file.h> 49 #include <drm/drm_gem.h> 50 #include <drm/drm_managed.h> 51 #include <drm/drm_print.h> 52 #include <drm/drm_vma_manager.h> 53 54 #include "drm_internal.h" 55 56 /** @file drm_gem.c 57 * 58 * This file provides some of the base ioctls and library routines for 59 * the graphics memory manager implemented by each device driver. 60 * 61 * Because various devices have different requirements in terms of 62 * synchronization and migration strategies, implementing that is left up to 63 * the driver, and all that the general API provides should be generic -- 64 * allocating objects, reading/writing data with the cpu, freeing objects. 65 * Even there, platform-dependent optimizations for reading/writing data with 66 * the CPU mean we'll likely hook those out to driver-specific calls. However, 67 * the DRI2 implementation wants to have at least allocate/mmap be generic. 68 * 69 * The goal was to have swap-backed object allocation managed through 70 * struct file. However, file descriptors as handles to a struct file have 71 * two major failings: 72 * - Process limits prevent more than 1024 or so being used at a time by 73 * default. 74 * - Inability to allocate high fds will aggravate the X Server's select() 75 * handling, and likely that of many GL client applications as well. 76 * 77 * This led to a plan of using our own integer IDs (called handles, following 78 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 79 * ioctls. The objects themselves will still include the struct file so 80 * that we can transition to fds if the required kernel infrastructure shows 81 * up at a later date, and as our interface with shmfs for memory allocation. 82 */ 83 84 static void 85 drm_gem_init_release(struct drm_device *dev, void *ptr) 86 { 87 drm_vma_offset_manager_destroy(dev->vma_offset_manager); 88 } 89 90 /** 91 * drm_gem_init - Initialize the GEM device fields 92 * @dev: drm_devic structure to initialize 93 */ 94 int 95 drm_gem_init(struct drm_device *dev) 96 { 97 struct drm_vma_offset_manager *vma_offset_manager; 98 99 mutex_init(&dev->object_name_lock); 100 idr_init_base(&dev->object_name_idr, 1); 101 102 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), 103 GFP_KERNEL); 104 if (!vma_offset_manager) 105 return -ENOMEM; 106 107 dev->vma_offset_manager = vma_offset_manager; 108 drm_vma_offset_manager_init(vma_offset_manager, 109 DRM_FILE_PAGE_OFFSET_START, 110 DRM_FILE_PAGE_OFFSET_SIZE); 111 112 return drmm_add_action(dev, drm_gem_init_release, NULL); 113 } 114 115 /** 116 * drm_gem_object_init_with_mnt - initialize an allocated shmem-backed GEM 117 * object in a given shmfs mountpoint 118 * 119 * @dev: drm_device the object should be initialized for 120 * @obj: drm_gem_object to initialize 121 * @size: object size 122 * @gemfs: tmpfs mount where the GEM object will be created. If NULL, use 123 * the usual tmpfs mountpoint (`shm_mnt`). 124 * 125 * Initialize an already allocated GEM object of the specified size with 126 * shmfs backing store. 127 */ 128 int drm_gem_object_init_with_mnt(struct drm_device *dev, 129 struct drm_gem_object *obj, size_t size, 130 struct vfsmount *gemfs) 131 { 132 struct file *filp; 133 const vma_flags_t flags = mk_vma_flags(VMA_NORESERVE_BIT); 134 135 drm_gem_private_object_init(dev, obj, size); 136 137 if (gemfs) 138 filp = shmem_file_setup_with_mnt(gemfs, "drm mm object", size, 139 flags); 140 else 141 filp = shmem_file_setup("drm mm object", size, flags); 142 143 if (IS_ERR(filp)) 144 return PTR_ERR(filp); 145 146 obj->filp = filp; 147 148 return 0; 149 } 150 EXPORT_SYMBOL(drm_gem_object_init_with_mnt); 151 152 /** 153 * drm_gem_object_init - initialize an allocated shmem-backed GEM object 154 * @dev: drm_device the object should be initialized for 155 * @obj: drm_gem_object to initialize 156 * @size: object size 157 * 158 * Initialize an already allocated GEM object of the specified size with 159 * shmfs backing store. 160 */ 161 int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, 162 size_t size) 163 { 164 return drm_gem_object_init_with_mnt(dev, obj, size, NULL); 165 } 166 EXPORT_SYMBOL(drm_gem_object_init); 167 168 /** 169 * drm_gem_private_object_init - initialize an allocated private GEM object 170 * @dev: drm_device the object should be initialized for 171 * @obj: drm_gem_object to initialize 172 * @size: object size 173 * 174 * Initialize an already allocated GEM object of the specified size with 175 * no GEM provided backing store. Instead the caller is responsible for 176 * backing the object and handling it. 177 */ 178 void drm_gem_private_object_init(struct drm_device *dev, 179 struct drm_gem_object *obj, size_t size) 180 { 181 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 182 183 obj->dev = dev; 184 obj->filp = NULL; 185 186 kref_init(&obj->refcount); 187 obj->handle_count = 0; 188 obj->size = size; 189 mutex_init(&obj->gpuva.lock); 190 dma_resv_init(&obj->_resv); 191 if (!obj->resv) 192 obj->resv = &obj->_resv; 193 194 if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA)) 195 drm_gem_gpuva_init(obj); 196 197 drm_vma_node_reset(&obj->vma_node); 198 INIT_LIST_HEAD(&obj->lru_node); 199 } 200 EXPORT_SYMBOL(drm_gem_private_object_init); 201 202 /** 203 * drm_gem_private_object_fini - Finalize a failed drm_gem_object 204 * @obj: drm_gem_object 205 * 206 * Uninitialize an already allocated GEM object when it initialized failed 207 */ 208 void drm_gem_private_object_fini(struct drm_gem_object *obj) 209 { 210 WARN_ON(obj->dma_buf); 211 212 dma_resv_fini(&obj->_resv); 213 mutex_destroy(&obj->gpuva.lock); 214 } 215 EXPORT_SYMBOL(drm_gem_private_object_fini); 216 217 static void drm_gem_object_handle_get(struct drm_gem_object *obj) 218 { 219 struct drm_device *dev = obj->dev; 220 221 drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock)); 222 223 if (obj->handle_count++ == 0) 224 drm_gem_object_get(obj); 225 } 226 227 /** 228 * drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any 229 * @obj: GEM object 230 * 231 * Acquires a reference on the GEM buffer object's handle. Required to keep 232 * the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked() 233 * to release the reference. Does nothing if the buffer object has no handle. 234 * 235 * Returns: 236 * True if a handle exists, or false otherwise 237 */ 238 bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj) 239 { 240 struct drm_device *dev = obj->dev; 241 242 guard(mutex)(&dev->object_name_lock); 243 244 /* 245 * First ref taken during GEM object creation, if any. Some 246 * drivers set up internal framebuffers with GEM objects that 247 * do not have a GEM handle. Hence, this counter can be zero. 248 */ 249 if (!obj->handle_count) 250 return false; 251 252 drm_gem_object_handle_get(obj); 253 254 return true; 255 } 256 257 /** 258 * drm_gem_object_handle_free - release resources bound to userspace handles 259 * @obj: GEM object to clean up. 260 * 261 * Called after the last handle to the object has been closed 262 * 263 * Removes any name for the object. Note that this must be 264 * called before drm_gem_object_free or we'll be touching 265 * freed memory 266 */ 267 static void drm_gem_object_handle_free(struct drm_gem_object *obj) 268 { 269 struct drm_device *dev = obj->dev; 270 271 /* Remove any name for this object */ 272 if (obj->name) { 273 idr_remove(&dev->object_name_idr, obj->name); 274 obj->name = 0; 275 } 276 } 277 278 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) 279 { 280 /* Unbreak the reference cycle if we have an exported dma_buf. */ 281 if (obj->dma_buf) { 282 dma_buf_put(obj->dma_buf); 283 obj->dma_buf = NULL; 284 } 285 } 286 287 /** 288 * drm_gem_object_handle_put_unlocked - releases reference on user-space handle 289 * @obj: GEM object 290 * 291 * Releases a reference on the GEM buffer object's handle. Possibly releases 292 * the GEM buffer object and associated dma-buf objects. 293 */ 294 void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) 295 { 296 struct drm_device *dev = obj->dev; 297 bool final = false; 298 299 if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0)) 300 return; 301 302 /* 303 * Must bump handle count first as this may be the last 304 * ref, in which case the object would disappear before 305 * we checked for a name. 306 */ 307 308 mutex_lock(&dev->object_name_lock); 309 if (--obj->handle_count == 0) { 310 drm_gem_object_handle_free(obj); 311 drm_gem_object_exported_dma_buf_free(obj); 312 final = true; 313 } 314 mutex_unlock(&dev->object_name_lock); 315 316 if (final) 317 drm_gem_object_put(obj); 318 } 319 320 /* 321 * Called at device or object close to release the file's 322 * handle references on objects. 323 */ 324 static int 325 drm_gem_object_release_handle(int id, void *ptr, void *data) 326 { 327 struct drm_file *file_priv = data; 328 struct drm_gem_object *obj = ptr; 329 330 if (drm_WARN_ON(obj->dev, !data)) 331 return 0; 332 333 if (obj->funcs->close) 334 obj->funcs->close(obj, file_priv); 335 336 mutex_lock(&file_priv->prime.lock); 337 338 drm_prime_remove_buf_handle(&file_priv->prime, id); 339 340 mutex_unlock(&file_priv->prime.lock); 341 342 drm_vma_node_revoke(&obj->vma_node, file_priv); 343 344 drm_gem_object_handle_put_unlocked(obj); 345 346 return 0; 347 } 348 349 /** 350 * drm_gem_handle_delete - deletes the given file-private handle 351 * @filp: drm file-private structure to use for the handle look up 352 * @handle: userspace handle to delete 353 * 354 * Removes the GEM handle from the @filp lookup table which has been added with 355 * drm_gem_handle_create(). If this is the last handle also cleans up linked 356 * resources like GEM names. 357 */ 358 int 359 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 360 { 361 struct drm_gem_object *obj; 362 363 spin_lock(&filp->table_lock); 364 365 /* Check if we currently have a reference on the object */ 366 obj = idr_replace(&filp->object_idr, NULL, handle); 367 spin_unlock(&filp->table_lock); 368 if (IS_ERR_OR_NULL(obj)) 369 return -EINVAL; 370 371 /* Release driver's reference and decrement refcount. */ 372 drm_gem_object_release_handle(handle, obj, filp); 373 374 /* And finally make the handle available for future allocations. */ 375 spin_lock(&filp->table_lock); 376 idr_remove(&filp->object_idr, handle); 377 spin_unlock(&filp->table_lock); 378 379 return 0; 380 } 381 EXPORT_SYMBOL(drm_gem_handle_delete); 382 383 /** 384 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object 385 * @file: drm file-private structure containing the gem object 386 * @dev: corresponding drm_device 387 * @handle: gem object handle 388 * @offset: return location for the fake mmap offset 389 * 390 * This implements the &drm_driver.dumb_map_offset kms driver callback for 391 * drivers which use gem to manage their backing storage. 392 * 393 * Returns: 394 * 0 on success or a negative error code on failure. 395 */ 396 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 397 u32 handle, u64 *offset) 398 { 399 struct drm_gem_object *obj; 400 int ret; 401 402 obj = drm_gem_object_lookup(file, handle); 403 if (!obj) 404 return -ENOENT; 405 406 /* Don't allow imported objects to be mapped */ 407 if (drm_gem_is_imported(obj)) { 408 ret = -EINVAL; 409 goto out; 410 } 411 412 ret = drm_gem_create_mmap_offset(obj); 413 if (ret) 414 goto out; 415 416 *offset = drm_vma_node_offset_addr(&obj->vma_node); 417 out: 418 drm_gem_object_put(obj); 419 420 return ret; 421 } 422 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); 423 424 /** 425 * drm_gem_handle_create_tail - internal functions to create a handle 426 * @file_priv: drm file-private structure to register the handle for 427 * @obj: object to register 428 * @handlep: pointer to return the created handle to the caller 429 * 430 * This expects the &drm_device.object_name_lock to be held already and will 431 * drop it before returning. Used to avoid races in establishing new handles 432 * when importing an object from either an flink name or a dma-buf. 433 * 434 * Handles must be release again through drm_gem_handle_delete(). This is done 435 * when userspace closes @file_priv for all attached handles, or through the 436 * GEM_CLOSE ioctl for individual handles. 437 */ 438 int 439 drm_gem_handle_create_tail(struct drm_file *file_priv, 440 struct drm_gem_object *obj, 441 u32 *handlep) 442 { 443 struct drm_device *dev = obj->dev; 444 u32 handle; 445 int ret; 446 447 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 448 449 drm_gem_object_handle_get(obj); 450 451 /* 452 * Get the user-visible handle using idr. Preload and perform 453 * allocation under our spinlock. 454 */ 455 idr_preload(GFP_KERNEL); 456 spin_lock(&file_priv->table_lock); 457 458 ret = idr_alloc(&file_priv->object_idr, NULL, 1, 0, GFP_NOWAIT); 459 460 spin_unlock(&file_priv->table_lock); 461 idr_preload_end(); 462 463 mutex_unlock(&dev->object_name_lock); 464 if (ret < 0) 465 goto err_unref; 466 467 handle = ret; 468 469 ret = drm_vma_node_allow(&obj->vma_node, file_priv); 470 if (ret) 471 goto err_remove; 472 473 if (obj->funcs->open) { 474 ret = obj->funcs->open(obj, file_priv); 475 if (ret) 476 goto err_revoke; 477 } 478 479 /* mirrors drm_gem_handle_delete to avoid races */ 480 spin_lock(&file_priv->table_lock); 481 obj = idr_replace(&file_priv->object_idr, obj, handle); 482 WARN_ON(obj != NULL); 483 spin_unlock(&file_priv->table_lock); 484 *handlep = handle; 485 return 0; 486 487 err_revoke: 488 drm_vma_node_revoke(&obj->vma_node, file_priv); 489 err_remove: 490 spin_lock(&file_priv->table_lock); 491 idr_remove(&file_priv->object_idr, handle); 492 spin_unlock(&file_priv->table_lock); 493 err_unref: 494 drm_gem_object_handle_put_unlocked(obj); 495 return ret; 496 } 497 498 /** 499 * drm_gem_handle_create - create a gem handle for an object 500 * @file_priv: drm file-private structure to register the handle for 501 * @obj: object to register 502 * @handlep: pointer to return the created handle to the caller 503 * 504 * Create a handle for this object. This adds a handle reference to the object, 505 * which includes a regular reference count. Callers will likely want to 506 * dereference the object afterwards. 507 * 508 * Since this publishes @obj to userspace it must be fully set up by this point, 509 * drivers must call this last in their buffer object creation callbacks. 510 */ 511 int drm_gem_handle_create(struct drm_file *file_priv, 512 struct drm_gem_object *obj, 513 u32 *handlep) 514 { 515 mutex_lock(&obj->dev->object_name_lock); 516 517 return drm_gem_handle_create_tail(file_priv, obj, handlep); 518 } 519 EXPORT_SYMBOL(drm_gem_handle_create); 520 521 522 /** 523 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 524 * @obj: obj in question 525 * 526 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 527 * 528 * Note that drm_gem_object_release() already calls this function, so drivers 529 * don't have to take care of releasing the mmap offset themselves when freeing 530 * the GEM object. 531 */ 532 void 533 drm_gem_free_mmap_offset(struct drm_gem_object *obj) 534 { 535 struct drm_device *dev = obj->dev; 536 537 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); 538 } 539 EXPORT_SYMBOL(drm_gem_free_mmap_offset); 540 541 /** 542 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 543 * @obj: obj in question 544 * @size: the virtual size 545 * 546 * GEM memory mapping works by handing back to userspace a fake mmap offset 547 * it can use in a subsequent mmap(2) call. The DRM core code then looks 548 * up the object based on the offset and sets up the various memory mapping 549 * structures. 550 * 551 * This routine allocates and attaches a fake offset for @obj, in cases where 552 * the virtual size differs from the physical size (ie. &drm_gem_object.size). 553 * Otherwise just use drm_gem_create_mmap_offset(). 554 * 555 * This function is idempotent and handles an already allocated mmap offset 556 * transparently. Drivers do not need to check for this case. 557 */ 558 int 559 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 560 { 561 struct drm_device *dev = obj->dev; 562 563 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, 564 size / PAGE_SIZE); 565 } 566 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 567 568 /** 569 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 570 * @obj: obj in question 571 * 572 * GEM memory mapping works by handing back to userspace a fake mmap offset 573 * it can use in a subsequent mmap(2) call. The DRM core code then looks 574 * up the object based on the offset and sets up the various memory mapping 575 * structures. 576 * 577 * This routine allocates and attaches a fake offset for @obj. 578 * 579 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release 580 * the fake offset again. 581 */ 582 int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 583 { 584 return drm_gem_create_mmap_offset_size(obj, obj->size); 585 } 586 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 587 588 /* 589 * Move folios to appropriate lru and release the folios, decrementing the 590 * ref count of those folios. 591 */ 592 static void drm_gem_check_release_batch(struct folio_batch *fbatch) 593 { 594 check_move_unevictable_folios(fbatch); 595 __folio_batch_release(fbatch); 596 cond_resched(); 597 } 598 599 /** 600 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 601 * from shmem 602 * @obj: obj in question 603 * 604 * This reads the page-array of the shmem-backing storage of the given gem 605 * object. An array of pages is returned. If a page is not allocated or 606 * swapped-out, this will allocate/swap-in the required pages. Note that the 607 * whole object is covered by the page-array and pinned in memory. 608 * 609 * Use drm_gem_put_pages() to release the array and unpin all pages. 610 * 611 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). 612 * If you require other GFP-masks, you have to do those allocations yourself. 613 * 614 * Note that you are not allowed to change gfp-zones during runtime. That is, 615 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as 616 * set during initialization. If you have special zone constraints, set them 617 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care 618 * to keep pages in the required zone during swap-in. 619 * 620 * This function is only valid on objects initialized with 621 * drm_gem_object_init(), but not for those initialized with 622 * drm_gem_private_object_init() only. 623 */ 624 struct page **drm_gem_get_pages(struct drm_gem_object *obj) 625 { 626 struct address_space *mapping; 627 struct page **pages; 628 struct folio *folio; 629 struct folio_batch fbatch; 630 unsigned long i, j, npages; 631 632 if (WARN_ON(!obj->filp)) 633 return ERR_PTR(-EINVAL); 634 635 /* This is the shared memory object that backs the GEM resource */ 636 mapping = obj->filp->f_mapping; 637 638 /* We already BUG_ON() for non-page-aligned sizes in 639 * drm_gem_object_init(), so we should never hit this unless 640 * driver author is doing something really wrong: 641 */ 642 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 643 644 npages = obj->size >> PAGE_SHIFT; 645 646 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 647 if (pages == NULL) 648 return ERR_PTR(-ENOMEM); 649 650 mapping_set_unevictable(mapping); 651 652 i = 0; 653 while (i < npages) { 654 unsigned long nr; 655 folio = shmem_read_folio_gfp(mapping, i, 656 mapping_gfp_mask(mapping)); 657 if (IS_ERR(folio)) 658 goto fail; 659 nr = min(npages - i, folio_nr_pages(folio)); 660 for (j = 0; j < nr; j++, i++) 661 pages[i] = folio_file_page(folio, i); 662 663 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the 664 * correct region during swapin. Note that this requires 665 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 666 * so shmem can relocate pages during swapin if required. 667 */ 668 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && 669 (folio_pfn(folio) >= 0x00100000UL)); 670 } 671 672 return pages; 673 674 fail: 675 mapping_clear_unevictable(mapping); 676 folio_batch_init(&fbatch); 677 j = 0; 678 while (j < i) { 679 struct folio *f = page_folio(pages[j]); 680 if (!folio_batch_add(&fbatch, f)) 681 drm_gem_check_release_batch(&fbatch); 682 j += folio_nr_pages(f); 683 } 684 if (fbatch.nr) 685 drm_gem_check_release_batch(&fbatch); 686 687 kvfree(pages); 688 return ERR_CAST(folio); 689 } 690 EXPORT_SYMBOL(drm_gem_get_pages); 691 692 /** 693 * drm_gem_put_pages - helper to free backing pages for a GEM object 694 * @obj: obj in question 695 * @pages: pages to free 696 * @dirty: if true, pages will be marked as dirty 697 * @accessed: if true, the pages will be marked as accessed 698 */ 699 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 700 bool dirty, bool accessed) 701 { 702 int i, npages; 703 struct address_space *mapping; 704 struct folio_batch fbatch; 705 706 mapping = file_inode(obj->filp)->i_mapping; 707 mapping_clear_unevictable(mapping); 708 709 /* We already BUG_ON() for non-page-aligned sizes in 710 * drm_gem_object_init(), so we should never hit this unless 711 * driver author is doing something really wrong: 712 */ 713 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 714 715 npages = obj->size >> PAGE_SHIFT; 716 717 folio_batch_init(&fbatch); 718 for (i = 0; i < npages; i++) { 719 struct folio *folio; 720 721 if (!pages[i]) 722 continue; 723 folio = page_folio(pages[i]); 724 725 if (dirty) 726 folio_mark_dirty(folio); 727 728 if (accessed) 729 folio_mark_accessed(folio); 730 731 /* Undo the reference we took when populating the table */ 732 if (!folio_batch_add(&fbatch, folio)) 733 drm_gem_check_release_batch(&fbatch); 734 i += folio_nr_pages(folio) - 1; 735 } 736 if (folio_batch_count(&fbatch)) 737 drm_gem_check_release_batch(&fbatch); 738 739 kvfree(pages); 740 } 741 EXPORT_SYMBOL(drm_gem_put_pages); 742 743 static int objects_lookup(struct drm_file *filp, u32 *handle, int count, 744 struct drm_gem_object **objs) 745 { 746 int i, ret = 0; 747 struct drm_gem_object *obj; 748 749 spin_lock(&filp->table_lock); 750 751 for (i = 0; i < count; i++) { 752 /* Check if we currently have a reference on the object */ 753 obj = idr_find(&filp->object_idr, handle[i]); 754 if (!obj) { 755 ret = -ENOENT; 756 break; 757 } 758 drm_gem_object_get(obj); 759 objs[i] = obj; 760 } 761 spin_unlock(&filp->table_lock); 762 763 return ret; 764 } 765 766 /** 767 * drm_gem_objects_lookup - look up GEM objects from an array of handles 768 * @filp: DRM file private date 769 * @bo_handles: user pointer to array of userspace handle 770 * @count: size of handle array 771 * @objs_out: returned pointer to array of drm_gem_object pointers 772 * 773 * Takes an array of userspace handles and returns a newly allocated array of 774 * GEM objects. 775 * 776 * For a single handle lookup, use drm_gem_object_lookup(). 777 * 778 * Returns: 779 * @objs filled in with GEM object pointers. Returned GEM objects need to be 780 * released with drm_gem_object_put(). -ENOENT is returned on a lookup 781 * failure. 0 is returned on success. 782 * 783 */ 784 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 785 int count, struct drm_gem_object ***objs_out) 786 { 787 struct drm_gem_object **objs; 788 u32 *handles; 789 int ret; 790 791 if (!count) 792 return 0; 793 794 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), 795 GFP_KERNEL | __GFP_ZERO); 796 if (!objs) 797 return -ENOMEM; 798 799 *objs_out = objs; 800 801 handles = vmemdup_array_user(bo_handles, count, sizeof(u32)); 802 if (IS_ERR(handles)) 803 return PTR_ERR(handles); 804 805 ret = objects_lookup(filp, handles, count, objs); 806 kvfree(handles); 807 return ret; 808 809 } 810 EXPORT_SYMBOL(drm_gem_objects_lookup); 811 812 /** 813 * drm_gem_object_lookup - look up a GEM object from its handle 814 * @filp: DRM file private date 815 * @handle: userspace handle 816 * 817 * If looking up an array of handles, use drm_gem_objects_lookup(). 818 * 819 * Returns: 820 * A reference to the object named by the handle if such exists on @filp, NULL 821 * otherwise. 822 */ 823 struct drm_gem_object * 824 drm_gem_object_lookup(struct drm_file *filp, u32 handle) 825 { 826 struct drm_gem_object *obj = NULL; 827 828 objects_lookup(filp, &handle, 1, &obj); 829 return obj; 830 } 831 EXPORT_SYMBOL(drm_gem_object_lookup); 832 833 /** 834 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects 835 * shared and/or exclusive fences. 836 * @filep: DRM file private date 837 * @handle: userspace handle 838 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 839 * @timeout: timeout value in jiffies or zero to return immediately 840 * 841 * Returns: 842 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 843 * greater than 0 on success. 844 */ 845 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 846 bool wait_all, unsigned long timeout) 847 { 848 struct drm_device *dev = filep->minor->dev; 849 struct drm_gem_object *obj; 850 long ret; 851 852 obj = drm_gem_object_lookup(filep, handle); 853 if (!obj) { 854 drm_dbg_core(dev, "Failed to look up GEM BO %d\n", handle); 855 return -EINVAL; 856 } 857 858 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), 859 true, timeout); 860 if (ret == 0) 861 ret = -ETIME; 862 else if (ret > 0) 863 ret = 0; 864 865 drm_gem_object_put(obj); 866 867 return ret; 868 } 869 EXPORT_SYMBOL(drm_gem_dma_resv_wait); 870 871 int 872 drm_gem_close_ioctl(struct drm_device *dev, void *data, 873 struct drm_file *file_priv) 874 { 875 struct drm_gem_close *args = data; 876 int ret; 877 878 if (!drm_core_check_feature(dev, DRIVER_GEM)) 879 return -EOPNOTSUPP; 880 881 ret = drm_gem_handle_delete(file_priv, args->handle); 882 883 return ret; 884 } 885 886 int 887 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 888 struct drm_file *file_priv) 889 { 890 struct drm_gem_flink *args = data; 891 struct drm_gem_object *obj; 892 int ret; 893 894 if (!drm_core_check_feature(dev, DRIVER_GEM)) 895 return -EOPNOTSUPP; 896 897 obj = drm_gem_object_lookup(file_priv, args->handle); 898 if (obj == NULL) 899 return -ENOENT; 900 901 mutex_lock(&dev->object_name_lock); 902 /* prevent races with concurrent gem_close. */ 903 if (obj->handle_count == 0) { 904 ret = -ENOENT; 905 goto err; 906 } 907 908 if (!obj->name) { 909 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); 910 if (ret < 0) 911 goto err; 912 913 obj->name = ret; 914 } 915 916 args->name = (uint64_t) obj->name; 917 ret = 0; 918 919 err: 920 mutex_unlock(&dev->object_name_lock); 921 drm_gem_object_put(obj); 922 return ret; 923 } 924 925 int 926 drm_gem_open_ioctl(struct drm_device *dev, void *data, 927 struct drm_file *file_priv) 928 { 929 struct drm_gem_open *args = data; 930 struct drm_gem_object *obj; 931 int ret; 932 u32 handle; 933 934 if (!drm_core_check_feature(dev, DRIVER_GEM)) 935 return -EOPNOTSUPP; 936 937 mutex_lock(&dev->object_name_lock); 938 obj = idr_find(&dev->object_name_idr, (int) args->name); 939 if (obj) { 940 drm_gem_object_get(obj); 941 } else { 942 mutex_unlock(&dev->object_name_lock); 943 return -ENOENT; 944 } 945 946 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 947 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 948 if (ret) 949 goto err; 950 951 args->handle = handle; 952 args->size = obj->size; 953 954 err: 955 drm_gem_object_put(obj); 956 return ret; 957 } 958 959 int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data, 960 struct drm_file *file_priv) 961 { 962 struct drm_gem_change_handle *args = data; 963 struct drm_gem_object *obj; 964 int ret; 965 966 if (!drm_core_check_feature(dev, DRIVER_GEM)) 967 return -EOPNOTSUPP; 968 969 obj = drm_gem_object_lookup(file_priv, args->handle); 970 if (!obj) 971 return -ENOENT; 972 973 if (args->handle == args->new_handle) { 974 ret = 0; 975 goto out; 976 } 977 978 mutex_lock(&file_priv->prime.lock); 979 980 spin_lock(&file_priv->table_lock); 981 ret = idr_alloc(&file_priv->object_idr, obj, 982 args->new_handle, args->new_handle + 1, GFP_NOWAIT); 983 spin_unlock(&file_priv->table_lock); 984 985 if (ret < 0) 986 goto out_unlock; 987 988 if (obj->dma_buf) { 989 ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf, args->new_handle); 990 if (ret < 0) { 991 spin_lock(&file_priv->table_lock); 992 idr_remove(&file_priv->object_idr, args->new_handle); 993 spin_unlock(&file_priv->table_lock); 994 goto out_unlock; 995 } 996 997 drm_prime_remove_buf_handle(&file_priv->prime, args->handle); 998 } 999 1000 ret = 0; 1001 1002 spin_lock(&file_priv->table_lock); 1003 idr_remove(&file_priv->object_idr, args->handle); 1004 spin_unlock(&file_priv->table_lock); 1005 1006 out_unlock: 1007 mutex_unlock(&file_priv->prime.lock); 1008 out: 1009 drm_gem_object_put(obj); 1010 1011 return ret; 1012 } 1013 1014 /** 1015 * drm_gem_open - initializes GEM file-private structures at devnode open time 1016 * @dev: drm_device which is being opened by userspace 1017 * @file_private: drm file-private structure to set up 1018 * 1019 * Called at device open time, sets up the structure for handling refcounting 1020 * of mm objects. 1021 */ 1022 void 1023 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 1024 { 1025 idr_init_base(&file_private->object_idr, 1); 1026 spin_lock_init(&file_private->table_lock); 1027 } 1028 1029 /** 1030 * drm_gem_release - release file-private GEM resources 1031 * @dev: drm_device which is being closed by userspace 1032 * @file_private: drm file-private structure to clean up 1033 * 1034 * Called at close time when the filp is going away. 1035 * 1036 * Releases any remaining references on objects by this filp. 1037 */ 1038 void 1039 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 1040 { 1041 idr_for_each(&file_private->object_idr, 1042 &drm_gem_object_release_handle, file_private); 1043 idr_destroy(&file_private->object_idr); 1044 } 1045 1046 /** 1047 * drm_gem_object_release - release GEM buffer object resources 1048 * @obj: GEM buffer object 1049 * 1050 * This releases any structures and resources used by @obj and is the inverse of 1051 * drm_gem_object_init(). 1052 */ 1053 void 1054 drm_gem_object_release(struct drm_gem_object *obj) 1055 { 1056 if (obj->filp) 1057 fput(obj->filp); 1058 1059 drm_gem_private_object_fini(obj); 1060 1061 drm_gem_free_mmap_offset(obj); 1062 drm_gem_lru_remove(obj); 1063 } 1064 EXPORT_SYMBOL(drm_gem_object_release); 1065 1066 /** 1067 * drm_gem_object_free - free a GEM object 1068 * @kref: kref of the object to free 1069 * 1070 * Called after the last reference to the object has been lost. 1071 * 1072 * Frees the object 1073 */ 1074 void 1075 drm_gem_object_free(struct kref *kref) 1076 { 1077 struct drm_gem_object *obj = 1078 container_of(kref, struct drm_gem_object, refcount); 1079 1080 if (WARN_ON(!obj->funcs->free)) 1081 return; 1082 1083 obj->funcs->free(obj); 1084 } 1085 EXPORT_SYMBOL(drm_gem_object_free); 1086 1087 /** 1088 * drm_gem_vm_open - vma->ops->open implementation for GEM 1089 * @vma: VM area structure 1090 * 1091 * This function implements the #vm_operations_struct open() callback for GEM 1092 * drivers. This must be used together with drm_gem_vm_close(). 1093 */ 1094 void drm_gem_vm_open(struct vm_area_struct *vma) 1095 { 1096 struct drm_gem_object *obj = vma->vm_private_data; 1097 1098 drm_gem_object_get(obj); 1099 } 1100 EXPORT_SYMBOL(drm_gem_vm_open); 1101 1102 /** 1103 * drm_gem_vm_close - vma->ops->close implementation for GEM 1104 * @vma: VM area structure 1105 * 1106 * This function implements the #vm_operations_struct close() callback for GEM 1107 * drivers. This must be used together with drm_gem_vm_open(). 1108 */ 1109 void drm_gem_vm_close(struct vm_area_struct *vma) 1110 { 1111 struct drm_gem_object *obj = vma->vm_private_data; 1112 1113 drm_gem_object_put(obj); 1114 } 1115 EXPORT_SYMBOL(drm_gem_vm_close); 1116 1117 /** 1118 * drm_gem_mmap_obj - memory map a GEM object 1119 * @obj: the GEM object to map 1120 * @obj_size: the object size to be mapped, in bytes 1121 * @vma: VMA for the area to be mapped 1122 * 1123 * Set up the VMA to prepare mapping of the GEM object using the GEM object's 1124 * vm_ops. Depending on their requirements, GEM objects can either 1125 * provide a fault handler in their vm_ops (in which case any accesses to 1126 * the object will be trapped, to perform migration, GTT binding, surface 1127 * register allocation, or performance monitoring), or mmap the buffer memory 1128 * synchronously after calling drm_gem_mmap_obj. 1129 * 1130 * This function is mainly intended to implement the DMABUF mmap operation, when 1131 * the GEM object is not looked up based on its fake offset. To implement the 1132 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 1133 * 1134 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while 1135 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 1136 * callers must verify access restrictions before calling this helper. 1137 * 1138 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 1139 * size, or if no vm_ops are provided. 1140 */ 1141 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 1142 struct vm_area_struct *vma) 1143 { 1144 int ret; 1145 1146 /* Check for valid size. */ 1147 if (obj_size < vma->vm_end - vma->vm_start) 1148 return -EINVAL; 1149 1150 /* Take a ref for this mapping of the object, so that the fault 1151 * handler can dereference the mmap offset's pointer to the object. 1152 * This reference is cleaned up by the corresponding vm_close 1153 * (which should happen whether the vma was created by this call, or 1154 * by a vm_open due to mremap or partial unmap or whatever). 1155 */ 1156 drm_gem_object_get(obj); 1157 1158 vma->vm_private_data = obj; 1159 vma->vm_ops = obj->funcs->vm_ops; 1160 1161 if (obj->funcs->mmap) { 1162 ret = obj->funcs->mmap(obj, vma); 1163 if (ret) 1164 goto err_drm_gem_object_put; 1165 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); 1166 } else { 1167 if (!vma->vm_ops) { 1168 ret = -EINVAL; 1169 goto err_drm_gem_object_put; 1170 } 1171 1172 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); 1173 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1174 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1175 } 1176 1177 return 0; 1178 1179 err_drm_gem_object_put: 1180 drm_gem_object_put(obj); 1181 return ret; 1182 } 1183 EXPORT_SYMBOL(drm_gem_mmap_obj); 1184 1185 /** 1186 * drm_gem_mmap - memory map routine for GEM objects 1187 * @filp: DRM file pointer 1188 * @vma: VMA for the area to be mapped 1189 * 1190 * If a driver supports GEM object mapping, mmap calls on the DRM file 1191 * descriptor will end up here. 1192 * 1193 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 1194 * contain the fake offset we created when the GTT map ioctl was called on 1195 * the object) and map it with a call to drm_gem_mmap_obj(). 1196 * 1197 * If the caller is not granted access to the buffer object, the mmap will fail 1198 * with EACCES. Please see the vma manager for more information. 1199 */ 1200 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 1201 { 1202 struct drm_file *priv = filp->private_data; 1203 struct drm_device *dev = priv->minor->dev; 1204 struct drm_gem_object *obj = NULL; 1205 struct drm_vma_offset_node *node; 1206 int ret; 1207 1208 if (drm_dev_is_unplugged(dev)) 1209 return -ENODEV; 1210 1211 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1212 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1213 vma->vm_pgoff, 1214 vma_pages(vma)); 1215 if (likely(node)) { 1216 obj = container_of(node, struct drm_gem_object, vma_node); 1217 /* 1218 * When the object is being freed, after it hits 0-refcnt it 1219 * proceeds to tear down the object. In the process it will 1220 * attempt to remove the VMA offset and so acquire this 1221 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 1222 * that matches our range, we know it is in the process of being 1223 * destroyed and will be freed as soon as we release the lock - 1224 * so we have to check for the 0-refcnted object and treat it as 1225 * invalid. 1226 */ 1227 if (!kref_get_unless_zero(&obj->refcount)) 1228 obj = NULL; 1229 } 1230 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1231 1232 if (!obj) 1233 return -EINVAL; 1234 1235 if (!drm_vma_node_is_allowed(node, priv)) { 1236 drm_gem_object_put(obj); 1237 return -EACCES; 1238 } 1239 1240 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, 1241 vma); 1242 1243 drm_gem_object_put(obj); 1244 1245 return ret; 1246 } 1247 EXPORT_SYMBOL(drm_gem_mmap); 1248 1249 void drm_gem_print_info(struct drm_printer *p, unsigned int indent, 1250 const struct drm_gem_object *obj) 1251 { 1252 drm_printf_indent(p, indent, "name=%d\n", obj->name); 1253 drm_printf_indent(p, indent, "refcount=%u\n", 1254 kref_read(&obj->refcount)); 1255 drm_printf_indent(p, indent, "start=%08lx\n", 1256 drm_vma_node_start(&obj->vma_node)); 1257 drm_printf_indent(p, indent, "size=%zu\n", obj->size); 1258 drm_printf_indent(p, indent, "imported=%s\n", 1259 str_yes_no(drm_gem_is_imported(obj))); 1260 1261 if (obj->funcs->print_info) 1262 obj->funcs->print_info(p, indent, obj); 1263 } 1264 1265 int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map) 1266 { 1267 int ret; 1268 1269 dma_resv_assert_held(obj->resv); 1270 1271 if (!obj->funcs->vmap) 1272 return -EOPNOTSUPP; 1273 1274 ret = obj->funcs->vmap(obj, map); 1275 if (ret) 1276 return ret; 1277 else if (iosys_map_is_null(map)) 1278 return -ENOMEM; 1279 1280 return 0; 1281 } 1282 EXPORT_SYMBOL(drm_gem_vmap_locked); 1283 1284 void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map) 1285 { 1286 dma_resv_assert_held(obj->resv); 1287 1288 if (iosys_map_is_null(map)) 1289 return; 1290 1291 if (obj->funcs->vunmap) 1292 obj->funcs->vunmap(obj, map); 1293 1294 /* Always set the mapping to NULL. Callers may rely on this. */ 1295 iosys_map_clear(map); 1296 } 1297 EXPORT_SYMBOL(drm_gem_vunmap_locked); 1298 1299 void drm_gem_lock(struct drm_gem_object *obj) 1300 { 1301 dma_resv_lock(obj->resv, NULL); 1302 } 1303 EXPORT_SYMBOL(drm_gem_lock); 1304 1305 void drm_gem_unlock(struct drm_gem_object *obj) 1306 { 1307 dma_resv_unlock(obj->resv); 1308 } 1309 EXPORT_SYMBOL(drm_gem_unlock); 1310 1311 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) 1312 { 1313 int ret; 1314 1315 dma_resv_lock(obj->resv, NULL); 1316 ret = drm_gem_vmap_locked(obj, map); 1317 dma_resv_unlock(obj->resv); 1318 1319 return ret; 1320 } 1321 EXPORT_SYMBOL(drm_gem_vmap); 1322 1323 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) 1324 { 1325 dma_resv_lock(obj->resv, NULL); 1326 drm_gem_vunmap_locked(obj, map); 1327 dma_resv_unlock(obj->resv); 1328 } 1329 EXPORT_SYMBOL(drm_gem_vunmap); 1330 1331 /** 1332 * drm_gem_lock_reservations - Sets up the ww context and acquires 1333 * the lock on an array of GEM objects. 1334 * 1335 * Once you've locked your reservations, you'll want to set up space 1336 * for your shared fences (if applicable), submit your job, then 1337 * drm_gem_unlock_reservations(). 1338 * 1339 * @objs: drm_gem_objects to lock 1340 * @count: Number of objects in @objs 1341 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as 1342 * part of tracking this set of locked reservations. 1343 */ 1344 int 1345 drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 1346 struct ww_acquire_ctx *acquire_ctx) 1347 { 1348 int contended = -1; 1349 int i, ret; 1350 1351 ww_acquire_init(acquire_ctx, &reservation_ww_class); 1352 1353 retry: 1354 if (contended != -1) { 1355 struct drm_gem_object *obj = objs[contended]; 1356 1357 ret = dma_resv_lock_slow_interruptible(obj->resv, 1358 acquire_ctx); 1359 if (ret) { 1360 ww_acquire_fini(acquire_ctx); 1361 return ret; 1362 } 1363 } 1364 1365 for (i = 0; i < count; i++) { 1366 if (i == contended) 1367 continue; 1368 1369 ret = dma_resv_lock_interruptible(objs[i]->resv, 1370 acquire_ctx); 1371 if (ret) { 1372 int j; 1373 1374 for (j = 0; j < i; j++) 1375 dma_resv_unlock(objs[j]->resv); 1376 1377 if (contended != -1 && contended >= i) 1378 dma_resv_unlock(objs[contended]->resv); 1379 1380 if (ret == -EDEADLK) { 1381 contended = i; 1382 goto retry; 1383 } 1384 1385 ww_acquire_fini(acquire_ctx); 1386 return ret; 1387 } 1388 } 1389 1390 ww_acquire_done(acquire_ctx); 1391 1392 return 0; 1393 } 1394 EXPORT_SYMBOL(drm_gem_lock_reservations); 1395 1396 void 1397 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, 1398 struct ww_acquire_ctx *acquire_ctx) 1399 { 1400 int i; 1401 1402 for (i = 0; i < count; i++) 1403 dma_resv_unlock(objs[i]->resv); 1404 1405 ww_acquire_fini(acquire_ctx); 1406 } 1407 EXPORT_SYMBOL(drm_gem_unlock_reservations); 1408 1409 /** 1410 * drm_gem_lru_init - initialize a LRU 1411 * 1412 * @lru: The LRU to initialize 1413 * @lock: The lock protecting the LRU 1414 */ 1415 void 1416 drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock) 1417 { 1418 lru->lock = lock; 1419 lru->count = 0; 1420 INIT_LIST_HEAD(&lru->list); 1421 } 1422 EXPORT_SYMBOL(drm_gem_lru_init); 1423 1424 static void 1425 drm_gem_lru_remove_locked(struct drm_gem_object *obj) 1426 { 1427 obj->lru->count -= obj->size >> PAGE_SHIFT; 1428 WARN_ON(obj->lru->count < 0); 1429 list_del(&obj->lru_node); 1430 obj->lru = NULL; 1431 } 1432 1433 /** 1434 * drm_gem_lru_remove - remove object from whatever LRU it is in 1435 * 1436 * If the object is currently in any LRU, remove it. 1437 * 1438 * @obj: The GEM object to remove from current LRU 1439 */ 1440 void 1441 drm_gem_lru_remove(struct drm_gem_object *obj) 1442 { 1443 struct drm_gem_lru *lru = obj->lru; 1444 1445 if (!lru) 1446 return; 1447 1448 mutex_lock(lru->lock); 1449 drm_gem_lru_remove_locked(obj); 1450 mutex_unlock(lru->lock); 1451 } 1452 EXPORT_SYMBOL(drm_gem_lru_remove); 1453 1454 /** 1455 * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU 1456 * 1457 * Like &drm_gem_lru_move_tail but lru lock must be held 1458 * 1459 * @lru: The LRU to move the object into. 1460 * @obj: The GEM object to move into this LRU 1461 */ 1462 void 1463 drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj) 1464 { 1465 lockdep_assert_held_once(lru->lock); 1466 1467 if (obj->lru) 1468 drm_gem_lru_remove_locked(obj); 1469 1470 lru->count += obj->size >> PAGE_SHIFT; 1471 list_add_tail(&obj->lru_node, &lru->list); 1472 obj->lru = lru; 1473 } 1474 EXPORT_SYMBOL(drm_gem_lru_move_tail_locked); 1475 1476 /** 1477 * drm_gem_lru_move_tail - move the object to the tail of the LRU 1478 * 1479 * If the object is already in this LRU it will be moved to the 1480 * tail. Otherwise it will be removed from whichever other LRU 1481 * it is in (if any) and moved into this LRU. 1482 * 1483 * @lru: The LRU to move the object into. 1484 * @obj: The GEM object to move into this LRU 1485 */ 1486 void 1487 drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj) 1488 { 1489 mutex_lock(lru->lock); 1490 drm_gem_lru_move_tail_locked(lru, obj); 1491 mutex_unlock(lru->lock); 1492 } 1493 EXPORT_SYMBOL(drm_gem_lru_move_tail); 1494 1495 /** 1496 * drm_gem_lru_scan - helper to implement shrinker.scan_objects 1497 * 1498 * If the shrink callback succeeds, it is expected that the driver 1499 * move the object out of this LRU. 1500 * 1501 * If the LRU possibly contain active buffers, it is the responsibility 1502 * of the shrink callback to check for this (ie. dma_resv_test_signaled()) 1503 * or if necessary block until the buffer becomes idle. 1504 * 1505 * @lru: The LRU to scan 1506 * @nr_to_scan: The number of pages to try to reclaim 1507 * @remaining: The number of pages left to reclaim, should be initialized by caller 1508 * @shrink: Callback to try to shrink/reclaim the object. 1509 * @ticket: Optional ww_acquire_ctx context to use for locking 1510 */ 1511 unsigned long 1512 drm_gem_lru_scan(struct drm_gem_lru *lru, 1513 unsigned int nr_to_scan, 1514 unsigned long *remaining, 1515 bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket), 1516 struct ww_acquire_ctx *ticket) 1517 { 1518 struct drm_gem_lru still_in_lru; 1519 struct drm_gem_object *obj; 1520 unsigned freed = 0; 1521 1522 drm_gem_lru_init(&still_in_lru, lru->lock); 1523 1524 mutex_lock(lru->lock); 1525 1526 while (freed < nr_to_scan) { 1527 obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node); 1528 1529 if (!obj) 1530 break; 1531 1532 drm_gem_lru_move_tail_locked(&still_in_lru, obj); 1533 1534 /* 1535 * If it's in the process of being freed, gem_object->free() 1536 * may be blocked on lock waiting to remove it. So just 1537 * skip it. 1538 */ 1539 if (!kref_get_unless_zero(&obj->refcount)) 1540 continue; 1541 1542 /* 1543 * Now that we own a reference, we can drop the lock for the 1544 * rest of the loop body, to reduce contention with other 1545 * code paths that need the LRU lock 1546 */ 1547 mutex_unlock(lru->lock); 1548 1549 if (ticket) 1550 ww_acquire_init(ticket, &reservation_ww_class); 1551 1552 /* 1553 * Note that this still needs to be trylock, since we can 1554 * hit shrinker in response to trying to get backing pages 1555 * for this obj (ie. while it's lock is already held) 1556 */ 1557 if (!ww_mutex_trylock(&obj->resv->lock, ticket)) { 1558 *remaining += obj->size >> PAGE_SHIFT; 1559 goto tail; 1560 } 1561 1562 if (shrink(obj, ticket)) { 1563 freed += obj->size >> PAGE_SHIFT; 1564 1565 /* 1566 * If we succeeded in releasing the object's backing 1567 * pages, we expect the driver to have moved the object 1568 * out of this LRU 1569 */ 1570 WARN_ON(obj->lru == &still_in_lru); 1571 WARN_ON(obj->lru == lru); 1572 } 1573 1574 dma_resv_unlock(obj->resv); 1575 1576 if (ticket) 1577 ww_acquire_fini(ticket); 1578 1579 tail: 1580 drm_gem_object_put(obj); 1581 mutex_lock(lru->lock); 1582 } 1583 1584 /* 1585 * Move objects we've skipped over out of the temporary still_in_lru 1586 * back into this LRU 1587 */ 1588 list_for_each_entry (obj, &still_in_lru.list, lru_node) 1589 obj->lru = lru; 1590 list_splice_tail(&still_in_lru.list, &lru->list); 1591 lru->count += still_in_lru.count; 1592 1593 mutex_unlock(lru->lock); 1594 1595 return freed; 1596 } 1597 EXPORT_SYMBOL(drm_gem_lru_scan); 1598 1599 /** 1600 * drm_gem_evict_locked - helper to evict backing pages for a GEM object 1601 * @obj: obj in question 1602 */ 1603 int drm_gem_evict_locked(struct drm_gem_object *obj) 1604 { 1605 dma_resv_assert_held(obj->resv); 1606 1607 if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)) 1608 return -EBUSY; 1609 1610 if (obj->funcs->evict) 1611 return obj->funcs->evict(obj); 1612 1613 return 0; 1614 } 1615 EXPORT_SYMBOL(drm_gem_evict_locked); 1616