1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/dma-buf.h> 29 #include <linux/export.h> 30 #include <linux/file.h> 31 #include <linux/fs.h> 32 #include <linux/iosys-map.h> 33 #include <linux/mem_encrypt.h> 34 #include <linux/mm.h> 35 #include <linux/mman.h> 36 #include <linux/module.h> 37 #include <linux/pagemap.h> 38 #include <linux/pagevec.h> 39 #include <linux/shmem_fs.h> 40 #include <linux/slab.h> 41 #include <linux/string_helpers.h> 42 #include <linux/types.h> 43 #include <linux/uaccess.h> 44 45 #include <drm/drm.h> 46 #include <drm/drm_device.h> 47 #include <drm/drm_drv.h> 48 #include <drm/drm_file.h> 49 #include <drm/drm_gem.h> 50 #include <drm/drm_managed.h> 51 #include <drm/drm_print.h> 52 #include <drm/drm_vma_manager.h> 53 54 #include "drm_internal.h" 55 56 /** @file drm_gem.c 57 * 58 * This file provides some of the base ioctls and library routines for 59 * the graphics memory manager implemented by each device driver. 60 * 61 * Because various devices have different requirements in terms of 62 * synchronization and migration strategies, implementing that is left up to 63 * the driver, and all that the general API provides should be generic -- 64 * allocating objects, reading/writing data with the cpu, freeing objects. 65 * Even there, platform-dependent optimizations for reading/writing data with 66 * the CPU mean we'll likely hook those out to driver-specific calls. However, 67 * the DRI2 implementation wants to have at least allocate/mmap be generic. 68 * 69 * The goal was to have swap-backed object allocation managed through 70 * struct file. However, file descriptors as handles to a struct file have 71 * two major failings: 72 * - Process limits prevent more than 1024 or so being used at a time by 73 * default. 74 * - Inability to allocate high fds will aggravate the X Server's select() 75 * handling, and likely that of many GL client applications as well. 76 * 77 * This led to a plan of using our own integer IDs (called handles, following 78 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 79 * ioctls. The objects themselves will still include the struct file so 80 * that we can transition to fds if the required kernel infrastructure shows 81 * up at a later date, and as our interface with shmfs for memory allocation. 82 */ 83 84 static void 85 drm_gem_init_release(struct drm_device *dev, void *ptr) 86 { 87 drm_vma_offset_manager_destroy(dev->vma_offset_manager); 88 } 89 90 /** 91 * drm_gem_init - Initialize the GEM device fields 92 * @dev: drm_devic structure to initialize 93 */ 94 int 95 drm_gem_init(struct drm_device *dev) 96 { 97 struct drm_vma_offset_manager *vma_offset_manager; 98 99 mutex_init(&dev->object_name_lock); 100 idr_init_base(&dev->object_name_idr, 1); 101 102 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), 103 GFP_KERNEL); 104 if (!vma_offset_manager) 105 return -ENOMEM; 106 107 dev->vma_offset_manager = vma_offset_manager; 108 drm_vma_offset_manager_init(vma_offset_manager, 109 DRM_FILE_PAGE_OFFSET_START, 110 DRM_FILE_PAGE_OFFSET_SIZE); 111 112 return drmm_add_action(dev, drm_gem_init_release, NULL); 113 } 114 115 /** 116 * drm_gem_object_init_with_mnt - initialize an allocated shmem-backed GEM 117 * object in a given shmfs mountpoint 118 * 119 * @dev: drm_device the object should be initialized for 120 * @obj: drm_gem_object to initialize 121 * @size: object size 122 * @gemfs: tmpfs mount where the GEM object will be created. If NULL, use 123 * the usual tmpfs mountpoint (`shm_mnt`). 124 * 125 * Initialize an already allocated GEM object of the specified size with 126 * shmfs backing store. 127 */ 128 int drm_gem_object_init_with_mnt(struct drm_device *dev, 129 struct drm_gem_object *obj, size_t size, 130 struct vfsmount *gemfs) 131 { 132 struct file *filp; 133 134 drm_gem_private_object_init(dev, obj, size); 135 136 if (gemfs) 137 filp = shmem_file_setup_with_mnt(gemfs, "drm mm object", size, 138 VM_NORESERVE); 139 else 140 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 141 142 if (IS_ERR(filp)) 143 return PTR_ERR(filp); 144 145 obj->filp = filp; 146 147 return 0; 148 } 149 EXPORT_SYMBOL(drm_gem_object_init_with_mnt); 150 151 /** 152 * drm_gem_object_init - initialize an allocated shmem-backed GEM object 153 * @dev: drm_device the object should be initialized for 154 * @obj: drm_gem_object to initialize 155 * @size: object size 156 * 157 * Initialize an already allocated GEM object of the specified size with 158 * shmfs backing store. 159 */ 160 int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, 161 size_t size) 162 { 163 return drm_gem_object_init_with_mnt(dev, obj, size, NULL); 164 } 165 EXPORT_SYMBOL(drm_gem_object_init); 166 167 /** 168 * drm_gem_private_object_init - initialize an allocated private GEM object 169 * @dev: drm_device the object should be initialized for 170 * @obj: drm_gem_object to initialize 171 * @size: object size 172 * 173 * Initialize an already allocated GEM object of the specified size with 174 * no GEM provided backing store. Instead the caller is responsible for 175 * backing the object and handling it. 176 */ 177 void drm_gem_private_object_init(struct drm_device *dev, 178 struct drm_gem_object *obj, size_t size) 179 { 180 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 181 182 obj->dev = dev; 183 obj->filp = NULL; 184 185 kref_init(&obj->refcount); 186 obj->handle_count = 0; 187 obj->size = size; 188 mutex_init(&obj->gpuva.lock); 189 dma_resv_init(&obj->_resv); 190 if (!obj->resv) 191 obj->resv = &obj->_resv; 192 193 if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA)) 194 drm_gem_gpuva_init(obj); 195 196 drm_vma_node_reset(&obj->vma_node); 197 INIT_LIST_HEAD(&obj->lru_node); 198 } 199 EXPORT_SYMBOL(drm_gem_private_object_init); 200 201 /** 202 * drm_gem_private_object_fini - Finalize a failed drm_gem_object 203 * @obj: drm_gem_object 204 * 205 * Uninitialize an already allocated GEM object when it initialized failed 206 */ 207 void drm_gem_private_object_fini(struct drm_gem_object *obj) 208 { 209 WARN_ON(obj->dma_buf); 210 211 dma_resv_fini(&obj->_resv); 212 mutex_destroy(&obj->gpuva.lock); 213 } 214 EXPORT_SYMBOL(drm_gem_private_object_fini); 215 216 static void drm_gem_object_handle_get(struct drm_gem_object *obj) 217 { 218 struct drm_device *dev = obj->dev; 219 220 drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock)); 221 222 if (obj->handle_count++ == 0) 223 drm_gem_object_get(obj); 224 } 225 226 /** 227 * drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any 228 * @obj: GEM object 229 * 230 * Acquires a reference on the GEM buffer object's handle. Required to keep 231 * the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked() 232 * to release the reference. Does nothing if the buffer object has no handle. 233 * 234 * Returns: 235 * True if a handle exists, or false otherwise 236 */ 237 bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj) 238 { 239 struct drm_device *dev = obj->dev; 240 241 guard(mutex)(&dev->object_name_lock); 242 243 /* 244 * First ref taken during GEM object creation, if any. Some 245 * drivers set up internal framebuffers with GEM objects that 246 * do not have a GEM handle. Hence, this counter can be zero. 247 */ 248 if (!obj->handle_count) 249 return false; 250 251 drm_gem_object_handle_get(obj); 252 253 return true; 254 } 255 256 /** 257 * drm_gem_object_handle_free - release resources bound to userspace handles 258 * @obj: GEM object to clean up. 259 * 260 * Called after the last handle to the object has been closed 261 * 262 * Removes any name for the object. Note that this must be 263 * called before drm_gem_object_free or we'll be touching 264 * freed memory 265 */ 266 static void drm_gem_object_handle_free(struct drm_gem_object *obj) 267 { 268 struct drm_device *dev = obj->dev; 269 270 /* Remove any name for this object */ 271 if (obj->name) { 272 idr_remove(&dev->object_name_idr, obj->name); 273 obj->name = 0; 274 } 275 } 276 277 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) 278 { 279 /* Unbreak the reference cycle if we have an exported dma_buf. */ 280 if (obj->dma_buf) { 281 dma_buf_put(obj->dma_buf); 282 obj->dma_buf = NULL; 283 } 284 } 285 286 /** 287 * drm_gem_object_handle_put_unlocked - releases reference on user-space handle 288 * @obj: GEM object 289 * 290 * Releases a reference on the GEM buffer object's handle. Possibly releases 291 * the GEM buffer object and associated dma-buf objects. 292 */ 293 void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) 294 { 295 struct drm_device *dev = obj->dev; 296 bool final = false; 297 298 if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0)) 299 return; 300 301 /* 302 * Must bump handle count first as this may be the last 303 * ref, in which case the object would disappear before 304 * we checked for a name. 305 */ 306 307 mutex_lock(&dev->object_name_lock); 308 if (--obj->handle_count == 0) { 309 drm_gem_object_handle_free(obj); 310 drm_gem_object_exported_dma_buf_free(obj); 311 final = true; 312 } 313 mutex_unlock(&dev->object_name_lock); 314 315 if (final) 316 drm_gem_object_put(obj); 317 } 318 319 /* 320 * Called at device or object close to release the file's 321 * handle references on objects. 322 */ 323 static int 324 drm_gem_object_release_handle(int id, void *ptr, void *data) 325 { 326 struct drm_file *file_priv = data; 327 struct drm_gem_object *obj = ptr; 328 329 if (drm_WARN_ON(obj->dev, !data)) 330 return 0; 331 332 if (obj->funcs->close) 333 obj->funcs->close(obj, file_priv); 334 335 mutex_lock(&file_priv->prime.lock); 336 337 drm_prime_remove_buf_handle(&file_priv->prime, id); 338 339 mutex_unlock(&file_priv->prime.lock); 340 341 drm_vma_node_revoke(&obj->vma_node, file_priv); 342 343 drm_gem_object_handle_put_unlocked(obj); 344 345 return 0; 346 } 347 348 /** 349 * drm_gem_handle_delete - deletes the given file-private handle 350 * @filp: drm file-private structure to use for the handle look up 351 * @handle: userspace handle to delete 352 * 353 * Removes the GEM handle from the @filp lookup table which has been added with 354 * drm_gem_handle_create(). If this is the last handle also cleans up linked 355 * resources like GEM names. 356 */ 357 int 358 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 359 { 360 struct drm_gem_object *obj; 361 362 spin_lock(&filp->table_lock); 363 364 /* Check if we currently have a reference on the object */ 365 obj = idr_replace(&filp->object_idr, NULL, handle); 366 spin_unlock(&filp->table_lock); 367 if (IS_ERR_OR_NULL(obj)) 368 return -EINVAL; 369 370 /* Release driver's reference and decrement refcount. */ 371 drm_gem_object_release_handle(handle, obj, filp); 372 373 /* And finally make the handle available for future allocations. */ 374 spin_lock(&filp->table_lock); 375 idr_remove(&filp->object_idr, handle); 376 spin_unlock(&filp->table_lock); 377 378 return 0; 379 } 380 EXPORT_SYMBOL(drm_gem_handle_delete); 381 382 /** 383 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object 384 * @file: drm file-private structure containing the gem object 385 * @dev: corresponding drm_device 386 * @handle: gem object handle 387 * @offset: return location for the fake mmap offset 388 * 389 * This implements the &drm_driver.dumb_map_offset kms driver callback for 390 * drivers which use gem to manage their backing storage. 391 * 392 * Returns: 393 * 0 on success or a negative error code on failure. 394 */ 395 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 396 u32 handle, u64 *offset) 397 { 398 struct drm_gem_object *obj; 399 int ret; 400 401 obj = drm_gem_object_lookup(file, handle); 402 if (!obj) 403 return -ENOENT; 404 405 /* Don't allow imported objects to be mapped */ 406 if (drm_gem_is_imported(obj)) { 407 ret = -EINVAL; 408 goto out; 409 } 410 411 ret = drm_gem_create_mmap_offset(obj); 412 if (ret) 413 goto out; 414 415 *offset = drm_vma_node_offset_addr(&obj->vma_node); 416 out: 417 drm_gem_object_put(obj); 418 419 return ret; 420 } 421 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); 422 423 /** 424 * drm_gem_handle_create_tail - internal functions to create a handle 425 * @file_priv: drm file-private structure to register the handle for 426 * @obj: object to register 427 * @handlep: pointer to return the created handle to the caller 428 * 429 * This expects the &drm_device.object_name_lock to be held already and will 430 * drop it before returning. Used to avoid races in establishing new handles 431 * when importing an object from either an flink name or a dma-buf. 432 * 433 * Handles must be release again through drm_gem_handle_delete(). This is done 434 * when userspace closes @file_priv for all attached handles, or through the 435 * GEM_CLOSE ioctl for individual handles. 436 */ 437 int 438 drm_gem_handle_create_tail(struct drm_file *file_priv, 439 struct drm_gem_object *obj, 440 u32 *handlep) 441 { 442 struct drm_device *dev = obj->dev; 443 u32 handle; 444 int ret; 445 446 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 447 448 drm_gem_object_handle_get(obj); 449 450 /* 451 * Get the user-visible handle using idr. Preload and perform 452 * allocation under our spinlock. 453 */ 454 idr_preload(GFP_KERNEL); 455 spin_lock(&file_priv->table_lock); 456 457 ret = idr_alloc(&file_priv->object_idr, NULL, 1, 0, GFP_NOWAIT); 458 459 spin_unlock(&file_priv->table_lock); 460 idr_preload_end(); 461 462 mutex_unlock(&dev->object_name_lock); 463 if (ret < 0) 464 goto err_unref; 465 466 handle = ret; 467 468 ret = drm_vma_node_allow(&obj->vma_node, file_priv); 469 if (ret) 470 goto err_remove; 471 472 if (obj->funcs->open) { 473 ret = obj->funcs->open(obj, file_priv); 474 if (ret) 475 goto err_revoke; 476 } 477 478 /* mirrors drm_gem_handle_delete to avoid races */ 479 spin_lock(&file_priv->table_lock); 480 obj = idr_replace(&file_priv->object_idr, obj, handle); 481 WARN_ON(obj != NULL); 482 spin_unlock(&file_priv->table_lock); 483 *handlep = handle; 484 return 0; 485 486 err_revoke: 487 drm_vma_node_revoke(&obj->vma_node, file_priv); 488 err_remove: 489 spin_lock(&file_priv->table_lock); 490 idr_remove(&file_priv->object_idr, handle); 491 spin_unlock(&file_priv->table_lock); 492 err_unref: 493 drm_gem_object_handle_put_unlocked(obj); 494 return ret; 495 } 496 497 /** 498 * drm_gem_handle_create - create a gem handle for an object 499 * @file_priv: drm file-private structure to register the handle for 500 * @obj: object to register 501 * @handlep: pointer to return the created handle to the caller 502 * 503 * Create a handle for this object. This adds a handle reference to the object, 504 * which includes a regular reference count. Callers will likely want to 505 * dereference the object afterwards. 506 * 507 * Since this publishes @obj to userspace it must be fully set up by this point, 508 * drivers must call this last in their buffer object creation callbacks. 509 */ 510 int drm_gem_handle_create(struct drm_file *file_priv, 511 struct drm_gem_object *obj, 512 u32 *handlep) 513 { 514 mutex_lock(&obj->dev->object_name_lock); 515 516 return drm_gem_handle_create_tail(file_priv, obj, handlep); 517 } 518 EXPORT_SYMBOL(drm_gem_handle_create); 519 520 521 /** 522 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 523 * @obj: obj in question 524 * 525 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 526 * 527 * Note that drm_gem_object_release() already calls this function, so drivers 528 * don't have to take care of releasing the mmap offset themselves when freeing 529 * the GEM object. 530 */ 531 void 532 drm_gem_free_mmap_offset(struct drm_gem_object *obj) 533 { 534 struct drm_device *dev = obj->dev; 535 536 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); 537 } 538 EXPORT_SYMBOL(drm_gem_free_mmap_offset); 539 540 /** 541 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 542 * @obj: obj in question 543 * @size: the virtual size 544 * 545 * GEM memory mapping works by handing back to userspace a fake mmap offset 546 * it can use in a subsequent mmap(2) call. The DRM core code then looks 547 * up the object based on the offset and sets up the various memory mapping 548 * structures. 549 * 550 * This routine allocates and attaches a fake offset for @obj, in cases where 551 * the virtual size differs from the physical size (ie. &drm_gem_object.size). 552 * Otherwise just use drm_gem_create_mmap_offset(). 553 * 554 * This function is idempotent and handles an already allocated mmap offset 555 * transparently. Drivers do not need to check for this case. 556 */ 557 int 558 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 559 { 560 struct drm_device *dev = obj->dev; 561 562 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, 563 size / PAGE_SIZE); 564 } 565 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 566 567 /** 568 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 569 * @obj: obj in question 570 * 571 * GEM memory mapping works by handing back to userspace a fake mmap offset 572 * it can use in a subsequent mmap(2) call. The DRM core code then looks 573 * up the object based on the offset and sets up the various memory mapping 574 * structures. 575 * 576 * This routine allocates and attaches a fake offset for @obj. 577 * 578 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release 579 * the fake offset again. 580 */ 581 int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 582 { 583 return drm_gem_create_mmap_offset_size(obj, obj->size); 584 } 585 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 586 587 /* 588 * Move folios to appropriate lru and release the folios, decrementing the 589 * ref count of those folios. 590 */ 591 static void drm_gem_check_release_batch(struct folio_batch *fbatch) 592 { 593 check_move_unevictable_folios(fbatch); 594 __folio_batch_release(fbatch); 595 cond_resched(); 596 } 597 598 /** 599 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 600 * from shmem 601 * @obj: obj in question 602 * 603 * This reads the page-array of the shmem-backing storage of the given gem 604 * object. An array of pages is returned. If a page is not allocated or 605 * swapped-out, this will allocate/swap-in the required pages. Note that the 606 * whole object is covered by the page-array and pinned in memory. 607 * 608 * Use drm_gem_put_pages() to release the array and unpin all pages. 609 * 610 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). 611 * If you require other GFP-masks, you have to do those allocations yourself. 612 * 613 * Note that you are not allowed to change gfp-zones during runtime. That is, 614 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as 615 * set during initialization. If you have special zone constraints, set them 616 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care 617 * to keep pages in the required zone during swap-in. 618 * 619 * This function is only valid on objects initialized with 620 * drm_gem_object_init(), but not for those initialized with 621 * drm_gem_private_object_init() only. 622 */ 623 struct page **drm_gem_get_pages(struct drm_gem_object *obj) 624 { 625 struct address_space *mapping; 626 struct page **pages; 627 struct folio *folio; 628 struct folio_batch fbatch; 629 unsigned long i, j, npages; 630 631 if (WARN_ON(!obj->filp)) 632 return ERR_PTR(-EINVAL); 633 634 /* This is the shared memory object that backs the GEM resource */ 635 mapping = obj->filp->f_mapping; 636 637 /* We already BUG_ON() for non-page-aligned sizes in 638 * drm_gem_object_init(), so we should never hit this unless 639 * driver author is doing something really wrong: 640 */ 641 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 642 643 npages = obj->size >> PAGE_SHIFT; 644 645 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 646 if (pages == NULL) 647 return ERR_PTR(-ENOMEM); 648 649 mapping_set_unevictable(mapping); 650 651 i = 0; 652 while (i < npages) { 653 unsigned long nr; 654 folio = shmem_read_folio_gfp(mapping, i, 655 mapping_gfp_mask(mapping)); 656 if (IS_ERR(folio)) 657 goto fail; 658 nr = min(npages - i, folio_nr_pages(folio)); 659 for (j = 0; j < nr; j++, i++) 660 pages[i] = folio_file_page(folio, i); 661 662 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the 663 * correct region during swapin. Note that this requires 664 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 665 * so shmem can relocate pages during swapin if required. 666 */ 667 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && 668 (folio_pfn(folio) >= 0x00100000UL)); 669 } 670 671 return pages; 672 673 fail: 674 mapping_clear_unevictable(mapping); 675 folio_batch_init(&fbatch); 676 j = 0; 677 while (j < i) { 678 struct folio *f = page_folio(pages[j]); 679 if (!folio_batch_add(&fbatch, f)) 680 drm_gem_check_release_batch(&fbatch); 681 j += folio_nr_pages(f); 682 } 683 if (fbatch.nr) 684 drm_gem_check_release_batch(&fbatch); 685 686 kvfree(pages); 687 return ERR_CAST(folio); 688 } 689 EXPORT_SYMBOL(drm_gem_get_pages); 690 691 /** 692 * drm_gem_put_pages - helper to free backing pages for a GEM object 693 * @obj: obj in question 694 * @pages: pages to free 695 * @dirty: if true, pages will be marked as dirty 696 * @accessed: if true, the pages will be marked as accessed 697 */ 698 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 699 bool dirty, bool accessed) 700 { 701 int i, npages; 702 struct address_space *mapping; 703 struct folio_batch fbatch; 704 705 mapping = file_inode(obj->filp)->i_mapping; 706 mapping_clear_unevictable(mapping); 707 708 /* We already BUG_ON() for non-page-aligned sizes in 709 * drm_gem_object_init(), so we should never hit this unless 710 * driver author is doing something really wrong: 711 */ 712 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 713 714 npages = obj->size >> PAGE_SHIFT; 715 716 folio_batch_init(&fbatch); 717 for (i = 0; i < npages; i++) { 718 struct folio *folio; 719 720 if (!pages[i]) 721 continue; 722 folio = page_folio(pages[i]); 723 724 if (dirty) 725 folio_mark_dirty(folio); 726 727 if (accessed) 728 folio_mark_accessed(folio); 729 730 /* Undo the reference we took when populating the table */ 731 if (!folio_batch_add(&fbatch, folio)) 732 drm_gem_check_release_batch(&fbatch); 733 i += folio_nr_pages(folio) - 1; 734 } 735 if (folio_batch_count(&fbatch)) 736 drm_gem_check_release_batch(&fbatch); 737 738 kvfree(pages); 739 } 740 EXPORT_SYMBOL(drm_gem_put_pages); 741 742 static int objects_lookup(struct drm_file *filp, u32 *handle, int count, 743 struct drm_gem_object **objs) 744 { 745 int i, ret = 0; 746 struct drm_gem_object *obj; 747 748 spin_lock(&filp->table_lock); 749 750 for (i = 0; i < count; i++) { 751 /* Check if we currently have a reference on the object */ 752 obj = idr_find(&filp->object_idr, handle[i]); 753 if (!obj) { 754 ret = -ENOENT; 755 break; 756 } 757 drm_gem_object_get(obj); 758 objs[i] = obj; 759 } 760 spin_unlock(&filp->table_lock); 761 762 return ret; 763 } 764 765 /** 766 * drm_gem_objects_lookup - look up GEM objects from an array of handles 767 * @filp: DRM file private date 768 * @bo_handles: user pointer to array of userspace handle 769 * @count: size of handle array 770 * @objs_out: returned pointer to array of drm_gem_object pointers 771 * 772 * Takes an array of userspace handles and returns a newly allocated array of 773 * GEM objects. 774 * 775 * For a single handle lookup, use drm_gem_object_lookup(). 776 * 777 * Returns: 778 * @objs filled in with GEM object pointers. Returned GEM objects need to be 779 * released with drm_gem_object_put(). -ENOENT is returned on a lookup 780 * failure. 0 is returned on success. 781 * 782 */ 783 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 784 int count, struct drm_gem_object ***objs_out) 785 { 786 struct drm_gem_object **objs; 787 u32 *handles; 788 int ret; 789 790 if (!count) 791 return 0; 792 793 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), 794 GFP_KERNEL | __GFP_ZERO); 795 if (!objs) 796 return -ENOMEM; 797 798 *objs_out = objs; 799 800 handles = vmemdup_array_user(bo_handles, count, sizeof(u32)); 801 if (IS_ERR(handles)) 802 return PTR_ERR(handles); 803 804 ret = objects_lookup(filp, handles, count, objs); 805 kvfree(handles); 806 return ret; 807 808 } 809 EXPORT_SYMBOL(drm_gem_objects_lookup); 810 811 /** 812 * drm_gem_object_lookup - look up a GEM object from its handle 813 * @filp: DRM file private date 814 * @handle: userspace handle 815 * 816 * If looking up an array of handles, use drm_gem_objects_lookup(). 817 * 818 * Returns: 819 * A reference to the object named by the handle if such exists on @filp, NULL 820 * otherwise. 821 */ 822 struct drm_gem_object * 823 drm_gem_object_lookup(struct drm_file *filp, u32 handle) 824 { 825 struct drm_gem_object *obj = NULL; 826 827 objects_lookup(filp, &handle, 1, &obj); 828 return obj; 829 } 830 EXPORT_SYMBOL(drm_gem_object_lookup); 831 832 /** 833 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects 834 * shared and/or exclusive fences. 835 * @filep: DRM file private date 836 * @handle: userspace handle 837 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 838 * @timeout: timeout value in jiffies or zero to return immediately 839 * 840 * Returns: 841 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 842 * greater than 0 on success. 843 */ 844 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 845 bool wait_all, unsigned long timeout) 846 { 847 struct drm_device *dev = filep->minor->dev; 848 struct drm_gem_object *obj; 849 long ret; 850 851 obj = drm_gem_object_lookup(filep, handle); 852 if (!obj) { 853 drm_dbg_core(dev, "Failed to look up GEM BO %d\n", handle); 854 return -EINVAL; 855 } 856 857 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), 858 true, timeout); 859 if (ret == 0) 860 ret = -ETIME; 861 else if (ret > 0) 862 ret = 0; 863 864 drm_gem_object_put(obj); 865 866 return ret; 867 } 868 EXPORT_SYMBOL(drm_gem_dma_resv_wait); 869 870 int 871 drm_gem_close_ioctl(struct drm_device *dev, void *data, 872 struct drm_file *file_priv) 873 { 874 struct drm_gem_close *args = data; 875 int ret; 876 877 if (!drm_core_check_feature(dev, DRIVER_GEM)) 878 return -EOPNOTSUPP; 879 880 ret = drm_gem_handle_delete(file_priv, args->handle); 881 882 return ret; 883 } 884 885 int 886 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 887 struct drm_file *file_priv) 888 { 889 struct drm_gem_flink *args = data; 890 struct drm_gem_object *obj; 891 int ret; 892 893 if (!drm_core_check_feature(dev, DRIVER_GEM)) 894 return -EOPNOTSUPP; 895 896 obj = drm_gem_object_lookup(file_priv, args->handle); 897 if (obj == NULL) 898 return -ENOENT; 899 900 mutex_lock(&dev->object_name_lock); 901 /* prevent races with concurrent gem_close. */ 902 if (obj->handle_count == 0) { 903 ret = -ENOENT; 904 goto err; 905 } 906 907 if (!obj->name) { 908 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); 909 if (ret < 0) 910 goto err; 911 912 obj->name = ret; 913 } 914 915 args->name = (uint64_t) obj->name; 916 ret = 0; 917 918 err: 919 mutex_unlock(&dev->object_name_lock); 920 drm_gem_object_put(obj); 921 return ret; 922 } 923 924 int 925 drm_gem_open_ioctl(struct drm_device *dev, void *data, 926 struct drm_file *file_priv) 927 { 928 struct drm_gem_open *args = data; 929 struct drm_gem_object *obj; 930 int ret; 931 u32 handle; 932 933 if (!drm_core_check_feature(dev, DRIVER_GEM)) 934 return -EOPNOTSUPP; 935 936 mutex_lock(&dev->object_name_lock); 937 obj = idr_find(&dev->object_name_idr, (int) args->name); 938 if (obj) { 939 drm_gem_object_get(obj); 940 } else { 941 mutex_unlock(&dev->object_name_lock); 942 return -ENOENT; 943 } 944 945 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 946 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 947 if (ret) 948 goto err; 949 950 args->handle = handle; 951 args->size = obj->size; 952 953 err: 954 drm_gem_object_put(obj); 955 return ret; 956 } 957 958 int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data, 959 struct drm_file *file_priv) 960 { 961 struct drm_gem_change_handle *args = data; 962 struct drm_gem_object *obj; 963 int ret; 964 965 if (!drm_core_check_feature(dev, DRIVER_GEM)) 966 return -EOPNOTSUPP; 967 968 obj = drm_gem_object_lookup(file_priv, args->handle); 969 if (!obj) 970 return -ENOENT; 971 972 if (args->handle == args->new_handle) 973 return 0; 974 975 mutex_lock(&file_priv->prime.lock); 976 977 spin_lock(&file_priv->table_lock); 978 ret = idr_alloc(&file_priv->object_idr, obj, 979 args->new_handle, args->new_handle + 1, GFP_NOWAIT); 980 spin_unlock(&file_priv->table_lock); 981 982 if (ret < 0) 983 goto out_unlock; 984 985 if (obj->dma_buf) { 986 ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf, args->new_handle); 987 if (ret < 0) { 988 spin_lock(&file_priv->table_lock); 989 idr_remove(&file_priv->object_idr, args->new_handle); 990 spin_unlock(&file_priv->table_lock); 991 goto out_unlock; 992 } 993 994 drm_prime_remove_buf_handle(&file_priv->prime, args->handle); 995 } 996 997 ret = 0; 998 999 spin_lock(&file_priv->table_lock); 1000 idr_remove(&file_priv->object_idr, args->handle); 1001 spin_unlock(&file_priv->table_lock); 1002 1003 out_unlock: 1004 mutex_unlock(&file_priv->prime.lock); 1005 1006 return ret; 1007 } 1008 1009 /** 1010 * drm_gem_open - initializes GEM file-private structures at devnode open time 1011 * @dev: drm_device which is being opened by userspace 1012 * @file_private: drm file-private structure to set up 1013 * 1014 * Called at device open time, sets up the structure for handling refcounting 1015 * of mm objects. 1016 */ 1017 void 1018 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 1019 { 1020 idr_init_base(&file_private->object_idr, 1); 1021 spin_lock_init(&file_private->table_lock); 1022 } 1023 1024 /** 1025 * drm_gem_release - release file-private GEM resources 1026 * @dev: drm_device which is being closed by userspace 1027 * @file_private: drm file-private structure to clean up 1028 * 1029 * Called at close time when the filp is going away. 1030 * 1031 * Releases any remaining references on objects by this filp. 1032 */ 1033 void 1034 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 1035 { 1036 idr_for_each(&file_private->object_idr, 1037 &drm_gem_object_release_handle, file_private); 1038 idr_destroy(&file_private->object_idr); 1039 } 1040 1041 /** 1042 * drm_gem_object_release - release GEM buffer object resources 1043 * @obj: GEM buffer object 1044 * 1045 * This releases any structures and resources used by @obj and is the inverse of 1046 * drm_gem_object_init(). 1047 */ 1048 void 1049 drm_gem_object_release(struct drm_gem_object *obj) 1050 { 1051 if (obj->filp) 1052 fput(obj->filp); 1053 1054 drm_gem_private_object_fini(obj); 1055 1056 drm_gem_free_mmap_offset(obj); 1057 drm_gem_lru_remove(obj); 1058 } 1059 EXPORT_SYMBOL(drm_gem_object_release); 1060 1061 /** 1062 * drm_gem_object_free - free a GEM object 1063 * @kref: kref of the object to free 1064 * 1065 * Called after the last reference to the object has been lost. 1066 * 1067 * Frees the object 1068 */ 1069 void 1070 drm_gem_object_free(struct kref *kref) 1071 { 1072 struct drm_gem_object *obj = 1073 container_of(kref, struct drm_gem_object, refcount); 1074 1075 if (WARN_ON(!obj->funcs->free)) 1076 return; 1077 1078 obj->funcs->free(obj); 1079 } 1080 EXPORT_SYMBOL(drm_gem_object_free); 1081 1082 /** 1083 * drm_gem_vm_open - vma->ops->open implementation for GEM 1084 * @vma: VM area structure 1085 * 1086 * This function implements the #vm_operations_struct open() callback for GEM 1087 * drivers. This must be used together with drm_gem_vm_close(). 1088 */ 1089 void drm_gem_vm_open(struct vm_area_struct *vma) 1090 { 1091 struct drm_gem_object *obj = vma->vm_private_data; 1092 1093 drm_gem_object_get(obj); 1094 } 1095 EXPORT_SYMBOL(drm_gem_vm_open); 1096 1097 /** 1098 * drm_gem_vm_close - vma->ops->close implementation for GEM 1099 * @vma: VM area structure 1100 * 1101 * This function implements the #vm_operations_struct close() callback for GEM 1102 * drivers. This must be used together with drm_gem_vm_open(). 1103 */ 1104 void drm_gem_vm_close(struct vm_area_struct *vma) 1105 { 1106 struct drm_gem_object *obj = vma->vm_private_data; 1107 1108 drm_gem_object_put(obj); 1109 } 1110 EXPORT_SYMBOL(drm_gem_vm_close); 1111 1112 /** 1113 * drm_gem_mmap_obj - memory map a GEM object 1114 * @obj: the GEM object to map 1115 * @obj_size: the object size to be mapped, in bytes 1116 * @vma: VMA for the area to be mapped 1117 * 1118 * Set up the VMA to prepare mapping of the GEM object using the GEM object's 1119 * vm_ops. Depending on their requirements, GEM objects can either 1120 * provide a fault handler in their vm_ops (in which case any accesses to 1121 * the object will be trapped, to perform migration, GTT binding, surface 1122 * register allocation, or performance monitoring), or mmap the buffer memory 1123 * synchronously after calling drm_gem_mmap_obj. 1124 * 1125 * This function is mainly intended to implement the DMABUF mmap operation, when 1126 * the GEM object is not looked up based on its fake offset. To implement the 1127 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 1128 * 1129 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while 1130 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 1131 * callers must verify access restrictions before calling this helper. 1132 * 1133 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 1134 * size, or if no vm_ops are provided. 1135 */ 1136 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 1137 struct vm_area_struct *vma) 1138 { 1139 int ret; 1140 1141 /* Check for valid size. */ 1142 if (obj_size < vma->vm_end - vma->vm_start) 1143 return -EINVAL; 1144 1145 /* Take a ref for this mapping of the object, so that the fault 1146 * handler can dereference the mmap offset's pointer to the object. 1147 * This reference is cleaned up by the corresponding vm_close 1148 * (which should happen whether the vma was created by this call, or 1149 * by a vm_open due to mremap or partial unmap or whatever). 1150 */ 1151 drm_gem_object_get(obj); 1152 1153 vma->vm_private_data = obj; 1154 vma->vm_ops = obj->funcs->vm_ops; 1155 1156 if (obj->funcs->mmap) { 1157 ret = obj->funcs->mmap(obj, vma); 1158 if (ret) 1159 goto err_drm_gem_object_put; 1160 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); 1161 } else { 1162 if (!vma->vm_ops) { 1163 ret = -EINVAL; 1164 goto err_drm_gem_object_put; 1165 } 1166 1167 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); 1168 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1169 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1170 } 1171 1172 return 0; 1173 1174 err_drm_gem_object_put: 1175 drm_gem_object_put(obj); 1176 return ret; 1177 } 1178 EXPORT_SYMBOL(drm_gem_mmap_obj); 1179 1180 /** 1181 * drm_gem_mmap - memory map routine for GEM objects 1182 * @filp: DRM file pointer 1183 * @vma: VMA for the area to be mapped 1184 * 1185 * If a driver supports GEM object mapping, mmap calls on the DRM file 1186 * descriptor will end up here. 1187 * 1188 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 1189 * contain the fake offset we created when the GTT map ioctl was called on 1190 * the object) and map it with a call to drm_gem_mmap_obj(). 1191 * 1192 * If the caller is not granted access to the buffer object, the mmap will fail 1193 * with EACCES. Please see the vma manager for more information. 1194 */ 1195 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 1196 { 1197 struct drm_file *priv = filp->private_data; 1198 struct drm_device *dev = priv->minor->dev; 1199 struct drm_gem_object *obj = NULL; 1200 struct drm_vma_offset_node *node; 1201 int ret; 1202 1203 if (drm_dev_is_unplugged(dev)) 1204 return -ENODEV; 1205 1206 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1207 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1208 vma->vm_pgoff, 1209 vma_pages(vma)); 1210 if (likely(node)) { 1211 obj = container_of(node, struct drm_gem_object, vma_node); 1212 /* 1213 * When the object is being freed, after it hits 0-refcnt it 1214 * proceeds to tear down the object. In the process it will 1215 * attempt to remove the VMA offset and so acquire this 1216 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 1217 * that matches our range, we know it is in the process of being 1218 * destroyed and will be freed as soon as we release the lock - 1219 * so we have to check for the 0-refcnted object and treat it as 1220 * invalid. 1221 */ 1222 if (!kref_get_unless_zero(&obj->refcount)) 1223 obj = NULL; 1224 } 1225 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1226 1227 if (!obj) 1228 return -EINVAL; 1229 1230 if (!drm_vma_node_is_allowed(node, priv)) { 1231 drm_gem_object_put(obj); 1232 return -EACCES; 1233 } 1234 1235 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, 1236 vma); 1237 1238 drm_gem_object_put(obj); 1239 1240 return ret; 1241 } 1242 EXPORT_SYMBOL(drm_gem_mmap); 1243 1244 void drm_gem_print_info(struct drm_printer *p, unsigned int indent, 1245 const struct drm_gem_object *obj) 1246 { 1247 drm_printf_indent(p, indent, "name=%d\n", obj->name); 1248 drm_printf_indent(p, indent, "refcount=%u\n", 1249 kref_read(&obj->refcount)); 1250 drm_printf_indent(p, indent, "start=%08lx\n", 1251 drm_vma_node_start(&obj->vma_node)); 1252 drm_printf_indent(p, indent, "size=%zu\n", obj->size); 1253 drm_printf_indent(p, indent, "imported=%s\n", 1254 str_yes_no(drm_gem_is_imported(obj))); 1255 1256 if (obj->funcs->print_info) 1257 obj->funcs->print_info(p, indent, obj); 1258 } 1259 1260 int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map) 1261 { 1262 int ret; 1263 1264 dma_resv_assert_held(obj->resv); 1265 1266 if (!obj->funcs->vmap) 1267 return -EOPNOTSUPP; 1268 1269 ret = obj->funcs->vmap(obj, map); 1270 if (ret) 1271 return ret; 1272 else if (iosys_map_is_null(map)) 1273 return -ENOMEM; 1274 1275 return 0; 1276 } 1277 EXPORT_SYMBOL(drm_gem_vmap_locked); 1278 1279 void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map) 1280 { 1281 dma_resv_assert_held(obj->resv); 1282 1283 if (iosys_map_is_null(map)) 1284 return; 1285 1286 if (obj->funcs->vunmap) 1287 obj->funcs->vunmap(obj, map); 1288 1289 /* Always set the mapping to NULL. Callers may rely on this. */ 1290 iosys_map_clear(map); 1291 } 1292 EXPORT_SYMBOL(drm_gem_vunmap_locked); 1293 1294 void drm_gem_lock(struct drm_gem_object *obj) 1295 { 1296 dma_resv_lock(obj->resv, NULL); 1297 } 1298 EXPORT_SYMBOL(drm_gem_lock); 1299 1300 void drm_gem_unlock(struct drm_gem_object *obj) 1301 { 1302 dma_resv_unlock(obj->resv); 1303 } 1304 EXPORT_SYMBOL(drm_gem_unlock); 1305 1306 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) 1307 { 1308 int ret; 1309 1310 dma_resv_lock(obj->resv, NULL); 1311 ret = drm_gem_vmap_locked(obj, map); 1312 dma_resv_unlock(obj->resv); 1313 1314 return ret; 1315 } 1316 EXPORT_SYMBOL(drm_gem_vmap); 1317 1318 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) 1319 { 1320 dma_resv_lock(obj->resv, NULL); 1321 drm_gem_vunmap_locked(obj, map); 1322 dma_resv_unlock(obj->resv); 1323 } 1324 EXPORT_SYMBOL(drm_gem_vunmap); 1325 1326 /** 1327 * drm_gem_lock_reservations - Sets up the ww context and acquires 1328 * the lock on an array of GEM objects. 1329 * 1330 * Once you've locked your reservations, you'll want to set up space 1331 * for your shared fences (if applicable), submit your job, then 1332 * drm_gem_unlock_reservations(). 1333 * 1334 * @objs: drm_gem_objects to lock 1335 * @count: Number of objects in @objs 1336 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as 1337 * part of tracking this set of locked reservations. 1338 */ 1339 int 1340 drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 1341 struct ww_acquire_ctx *acquire_ctx) 1342 { 1343 int contended = -1; 1344 int i, ret; 1345 1346 ww_acquire_init(acquire_ctx, &reservation_ww_class); 1347 1348 retry: 1349 if (contended != -1) { 1350 struct drm_gem_object *obj = objs[contended]; 1351 1352 ret = dma_resv_lock_slow_interruptible(obj->resv, 1353 acquire_ctx); 1354 if (ret) { 1355 ww_acquire_fini(acquire_ctx); 1356 return ret; 1357 } 1358 } 1359 1360 for (i = 0; i < count; i++) { 1361 if (i == contended) 1362 continue; 1363 1364 ret = dma_resv_lock_interruptible(objs[i]->resv, 1365 acquire_ctx); 1366 if (ret) { 1367 int j; 1368 1369 for (j = 0; j < i; j++) 1370 dma_resv_unlock(objs[j]->resv); 1371 1372 if (contended != -1 && contended >= i) 1373 dma_resv_unlock(objs[contended]->resv); 1374 1375 if (ret == -EDEADLK) { 1376 contended = i; 1377 goto retry; 1378 } 1379 1380 ww_acquire_fini(acquire_ctx); 1381 return ret; 1382 } 1383 } 1384 1385 ww_acquire_done(acquire_ctx); 1386 1387 return 0; 1388 } 1389 EXPORT_SYMBOL(drm_gem_lock_reservations); 1390 1391 void 1392 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, 1393 struct ww_acquire_ctx *acquire_ctx) 1394 { 1395 int i; 1396 1397 for (i = 0; i < count; i++) 1398 dma_resv_unlock(objs[i]->resv); 1399 1400 ww_acquire_fini(acquire_ctx); 1401 } 1402 EXPORT_SYMBOL(drm_gem_unlock_reservations); 1403 1404 /** 1405 * drm_gem_lru_init - initialize a LRU 1406 * 1407 * @lru: The LRU to initialize 1408 * @lock: The lock protecting the LRU 1409 */ 1410 void 1411 drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock) 1412 { 1413 lru->lock = lock; 1414 lru->count = 0; 1415 INIT_LIST_HEAD(&lru->list); 1416 } 1417 EXPORT_SYMBOL(drm_gem_lru_init); 1418 1419 static void 1420 drm_gem_lru_remove_locked(struct drm_gem_object *obj) 1421 { 1422 obj->lru->count -= obj->size >> PAGE_SHIFT; 1423 WARN_ON(obj->lru->count < 0); 1424 list_del(&obj->lru_node); 1425 obj->lru = NULL; 1426 } 1427 1428 /** 1429 * drm_gem_lru_remove - remove object from whatever LRU it is in 1430 * 1431 * If the object is currently in any LRU, remove it. 1432 * 1433 * @obj: The GEM object to remove from current LRU 1434 */ 1435 void 1436 drm_gem_lru_remove(struct drm_gem_object *obj) 1437 { 1438 struct drm_gem_lru *lru = obj->lru; 1439 1440 if (!lru) 1441 return; 1442 1443 mutex_lock(lru->lock); 1444 drm_gem_lru_remove_locked(obj); 1445 mutex_unlock(lru->lock); 1446 } 1447 EXPORT_SYMBOL(drm_gem_lru_remove); 1448 1449 /** 1450 * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU 1451 * 1452 * Like &drm_gem_lru_move_tail but lru lock must be held 1453 * 1454 * @lru: The LRU to move the object into. 1455 * @obj: The GEM object to move into this LRU 1456 */ 1457 void 1458 drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj) 1459 { 1460 lockdep_assert_held_once(lru->lock); 1461 1462 if (obj->lru) 1463 drm_gem_lru_remove_locked(obj); 1464 1465 lru->count += obj->size >> PAGE_SHIFT; 1466 list_add_tail(&obj->lru_node, &lru->list); 1467 obj->lru = lru; 1468 } 1469 EXPORT_SYMBOL(drm_gem_lru_move_tail_locked); 1470 1471 /** 1472 * drm_gem_lru_move_tail - move the object to the tail of the LRU 1473 * 1474 * If the object is already in this LRU it will be moved to the 1475 * tail. Otherwise it will be removed from whichever other LRU 1476 * it is in (if any) and moved into this LRU. 1477 * 1478 * @lru: The LRU to move the object into. 1479 * @obj: The GEM object to move into this LRU 1480 */ 1481 void 1482 drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj) 1483 { 1484 mutex_lock(lru->lock); 1485 drm_gem_lru_move_tail_locked(lru, obj); 1486 mutex_unlock(lru->lock); 1487 } 1488 EXPORT_SYMBOL(drm_gem_lru_move_tail); 1489 1490 /** 1491 * drm_gem_lru_scan - helper to implement shrinker.scan_objects 1492 * 1493 * If the shrink callback succeeds, it is expected that the driver 1494 * move the object out of this LRU. 1495 * 1496 * If the LRU possibly contain active buffers, it is the responsibility 1497 * of the shrink callback to check for this (ie. dma_resv_test_signaled()) 1498 * or if necessary block until the buffer becomes idle. 1499 * 1500 * @lru: The LRU to scan 1501 * @nr_to_scan: The number of pages to try to reclaim 1502 * @remaining: The number of pages left to reclaim, should be initialized by caller 1503 * @shrink: Callback to try to shrink/reclaim the object. 1504 * @ticket: Optional ww_acquire_ctx context to use for locking 1505 */ 1506 unsigned long 1507 drm_gem_lru_scan(struct drm_gem_lru *lru, 1508 unsigned int nr_to_scan, 1509 unsigned long *remaining, 1510 bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket), 1511 struct ww_acquire_ctx *ticket) 1512 { 1513 struct drm_gem_lru still_in_lru; 1514 struct drm_gem_object *obj; 1515 unsigned freed = 0; 1516 1517 drm_gem_lru_init(&still_in_lru, lru->lock); 1518 1519 mutex_lock(lru->lock); 1520 1521 while (freed < nr_to_scan) { 1522 obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node); 1523 1524 if (!obj) 1525 break; 1526 1527 drm_gem_lru_move_tail_locked(&still_in_lru, obj); 1528 1529 /* 1530 * If it's in the process of being freed, gem_object->free() 1531 * may be blocked on lock waiting to remove it. So just 1532 * skip it. 1533 */ 1534 if (!kref_get_unless_zero(&obj->refcount)) 1535 continue; 1536 1537 /* 1538 * Now that we own a reference, we can drop the lock for the 1539 * rest of the loop body, to reduce contention with other 1540 * code paths that need the LRU lock 1541 */ 1542 mutex_unlock(lru->lock); 1543 1544 if (ticket) 1545 ww_acquire_init(ticket, &reservation_ww_class); 1546 1547 /* 1548 * Note that this still needs to be trylock, since we can 1549 * hit shrinker in response to trying to get backing pages 1550 * for this obj (ie. while it's lock is already held) 1551 */ 1552 if (!ww_mutex_trylock(&obj->resv->lock, ticket)) { 1553 *remaining += obj->size >> PAGE_SHIFT; 1554 goto tail; 1555 } 1556 1557 if (shrink(obj, ticket)) { 1558 freed += obj->size >> PAGE_SHIFT; 1559 1560 /* 1561 * If we succeeded in releasing the object's backing 1562 * pages, we expect the driver to have moved the object 1563 * out of this LRU 1564 */ 1565 WARN_ON(obj->lru == &still_in_lru); 1566 WARN_ON(obj->lru == lru); 1567 } 1568 1569 dma_resv_unlock(obj->resv); 1570 1571 if (ticket) 1572 ww_acquire_fini(ticket); 1573 1574 tail: 1575 drm_gem_object_put(obj); 1576 mutex_lock(lru->lock); 1577 } 1578 1579 /* 1580 * Move objects we've skipped over out of the temporary still_in_lru 1581 * back into this LRU 1582 */ 1583 list_for_each_entry (obj, &still_in_lru.list, lru_node) 1584 obj->lru = lru; 1585 list_splice_tail(&still_in_lru.list, &lru->list); 1586 lru->count += still_in_lru.count; 1587 1588 mutex_unlock(lru->lock); 1589 1590 return freed; 1591 } 1592 EXPORT_SYMBOL(drm_gem_lru_scan); 1593 1594 /** 1595 * drm_gem_evict_locked - helper to evict backing pages for a GEM object 1596 * @obj: obj in question 1597 */ 1598 int drm_gem_evict_locked(struct drm_gem_object *obj) 1599 { 1600 dma_resv_assert_held(obj->resv); 1601 1602 if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)) 1603 return -EBUSY; 1604 1605 if (obj->funcs->evict) 1606 return obj->funcs->evict(obj); 1607 1608 return 0; 1609 } 1610 EXPORT_SYMBOL(drm_gem_evict_locked); 1611