1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/dma-buf.h> 29 #include <linux/export.h> 30 #include <linux/file.h> 31 #include <linux/fs.h> 32 #include <linux/iosys-map.h> 33 #include <linux/mem_encrypt.h> 34 #include <linux/mm.h> 35 #include <linux/mman.h> 36 #include <linux/module.h> 37 #include <linux/pagemap.h> 38 #include <linux/pagevec.h> 39 #include <linux/shmem_fs.h> 40 #include <linux/slab.h> 41 #include <linux/string_helpers.h> 42 #include <linux/types.h> 43 #include <linux/uaccess.h> 44 45 #include <drm/drm.h> 46 #include <drm/drm_device.h> 47 #include <drm/drm_drv.h> 48 #include <drm/drm_file.h> 49 #include <drm/drm_gem.h> 50 #include <drm/drm_managed.h> 51 #include <drm/drm_print.h> 52 #include <drm/drm_vma_manager.h> 53 54 #include "drm_internal.h" 55 56 /** @file drm_gem.c 57 * 58 * This file provides some of the base ioctls and library routines for 59 * the graphics memory manager implemented by each device driver. 60 * 61 * Because various devices have different requirements in terms of 62 * synchronization and migration strategies, implementing that is left up to 63 * the driver, and all that the general API provides should be generic -- 64 * allocating objects, reading/writing data with the cpu, freeing objects. 65 * Even there, platform-dependent optimizations for reading/writing data with 66 * the CPU mean we'll likely hook those out to driver-specific calls. However, 67 * the DRI2 implementation wants to have at least allocate/mmap be generic. 68 * 69 * The goal was to have swap-backed object allocation managed through 70 * struct file. However, file descriptors as handles to a struct file have 71 * two major failings: 72 * - Process limits prevent more than 1024 or so being used at a time by 73 * default. 74 * - Inability to allocate high fds will aggravate the X Server's select() 75 * handling, and likely that of many GL client applications as well. 76 * 77 * This led to a plan of using our own integer IDs (called handles, following 78 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 79 * ioctls. The objects themselves will still include the struct file so 80 * that we can transition to fds if the required kernel infrastructure shows 81 * up at a later date, and as our interface with shmfs for memory allocation. 82 */ 83 84 static void 85 drm_gem_init_release(struct drm_device *dev, void *ptr) 86 { 87 drm_vma_offset_manager_destroy(dev->vma_offset_manager); 88 } 89 90 /** 91 * drm_gem_init - Initialize the GEM device fields 92 * @dev: drm_devic structure to initialize 93 */ 94 int 95 drm_gem_init(struct drm_device *dev) 96 { 97 struct drm_vma_offset_manager *vma_offset_manager; 98 99 mutex_init(&dev->object_name_lock); 100 idr_init_base(&dev->object_name_idr, 1); 101 102 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), 103 GFP_KERNEL); 104 if (!vma_offset_manager) { 105 DRM_ERROR("out of memory\n"); 106 return -ENOMEM; 107 } 108 109 dev->vma_offset_manager = vma_offset_manager; 110 drm_vma_offset_manager_init(vma_offset_manager, 111 DRM_FILE_PAGE_OFFSET_START, 112 DRM_FILE_PAGE_OFFSET_SIZE); 113 114 return drmm_add_action(dev, drm_gem_init_release, NULL); 115 } 116 117 /** 118 * drm_gem_object_init_with_mnt - initialize an allocated shmem-backed GEM 119 * object in a given shmfs mountpoint 120 * 121 * @dev: drm_device the object should be initialized for 122 * @obj: drm_gem_object to initialize 123 * @size: object size 124 * @gemfs: tmpfs mount where the GEM object will be created. If NULL, use 125 * the usual tmpfs mountpoint (`shm_mnt`). 126 * 127 * Initialize an already allocated GEM object of the specified size with 128 * shmfs backing store. 129 */ 130 int drm_gem_object_init_with_mnt(struct drm_device *dev, 131 struct drm_gem_object *obj, size_t size, 132 struct vfsmount *gemfs) 133 { 134 struct file *filp; 135 136 drm_gem_private_object_init(dev, obj, size); 137 138 if (gemfs) 139 filp = shmem_file_setup_with_mnt(gemfs, "drm mm object", size, 140 VM_NORESERVE); 141 else 142 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 143 144 if (IS_ERR(filp)) 145 return PTR_ERR(filp); 146 147 obj->filp = filp; 148 149 return 0; 150 } 151 EXPORT_SYMBOL(drm_gem_object_init_with_mnt); 152 153 /** 154 * drm_gem_object_init - initialize an allocated shmem-backed GEM object 155 * @dev: drm_device the object should be initialized for 156 * @obj: drm_gem_object to initialize 157 * @size: object size 158 * 159 * Initialize an already allocated GEM object of the specified size with 160 * shmfs backing store. 161 */ 162 int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, 163 size_t size) 164 { 165 return drm_gem_object_init_with_mnt(dev, obj, size, NULL); 166 } 167 EXPORT_SYMBOL(drm_gem_object_init); 168 169 /** 170 * drm_gem_private_object_init - initialize an allocated private GEM object 171 * @dev: drm_device the object should be initialized for 172 * @obj: drm_gem_object to initialize 173 * @size: object size 174 * 175 * Initialize an already allocated GEM object of the specified size with 176 * no GEM provided backing store. Instead the caller is responsible for 177 * backing the object and handling it. 178 */ 179 void drm_gem_private_object_init(struct drm_device *dev, 180 struct drm_gem_object *obj, size_t size) 181 { 182 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 183 184 obj->dev = dev; 185 obj->filp = NULL; 186 187 kref_init(&obj->refcount); 188 obj->handle_count = 0; 189 obj->size = size; 190 dma_resv_init(&obj->_resv); 191 if (!obj->resv) 192 obj->resv = &obj->_resv; 193 194 if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA)) 195 drm_gem_gpuva_init(obj); 196 197 drm_vma_node_reset(&obj->vma_node); 198 INIT_LIST_HEAD(&obj->lru_node); 199 } 200 EXPORT_SYMBOL(drm_gem_private_object_init); 201 202 /** 203 * drm_gem_private_object_fini - Finalize a failed drm_gem_object 204 * @obj: drm_gem_object 205 * 206 * Uninitialize an already allocated GEM object when it initialized failed 207 */ 208 void drm_gem_private_object_fini(struct drm_gem_object *obj) 209 { 210 WARN_ON(obj->dma_buf); 211 212 dma_resv_fini(&obj->_resv); 213 } 214 EXPORT_SYMBOL(drm_gem_private_object_fini); 215 216 /** 217 * drm_gem_object_handle_free - release resources bound to userspace handles 218 * @obj: GEM object to clean up. 219 * 220 * Called after the last handle to the object has been closed 221 * 222 * Removes any name for the object. Note that this must be 223 * called before drm_gem_object_free or we'll be touching 224 * freed memory 225 */ 226 static void drm_gem_object_handle_free(struct drm_gem_object *obj) 227 { 228 struct drm_device *dev = obj->dev; 229 230 /* Remove any name for this object */ 231 if (obj->name) { 232 idr_remove(&dev->object_name_idr, obj->name); 233 obj->name = 0; 234 } 235 } 236 237 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) 238 { 239 /* Unbreak the reference cycle if we have an exported dma_buf. */ 240 if (obj->dma_buf) { 241 dma_buf_put(obj->dma_buf); 242 obj->dma_buf = NULL; 243 } 244 } 245 246 static void 247 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) 248 { 249 struct drm_device *dev = obj->dev; 250 bool final = false; 251 252 if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) 253 return; 254 255 /* 256 * Must bump handle count first as this may be the last 257 * ref, in which case the object would disappear before we 258 * checked for a name 259 */ 260 261 mutex_lock(&dev->object_name_lock); 262 if (--obj->handle_count == 0) { 263 drm_gem_object_handle_free(obj); 264 drm_gem_object_exported_dma_buf_free(obj); 265 final = true; 266 } 267 mutex_unlock(&dev->object_name_lock); 268 269 if (final) 270 drm_gem_object_put(obj); 271 } 272 273 /* 274 * Called at device or object close to release the file's 275 * handle references on objects. 276 */ 277 static int 278 drm_gem_object_release_handle(int id, void *ptr, void *data) 279 { 280 struct drm_file *file_priv = data; 281 struct drm_gem_object *obj = ptr; 282 283 if (obj->funcs->close) 284 obj->funcs->close(obj, file_priv); 285 286 mutex_lock(&file_priv->prime.lock); 287 288 drm_prime_remove_buf_handle(&file_priv->prime, id); 289 290 mutex_unlock(&file_priv->prime.lock); 291 292 drm_vma_node_revoke(&obj->vma_node, file_priv); 293 294 drm_gem_object_handle_put_unlocked(obj); 295 296 return 0; 297 } 298 299 /** 300 * drm_gem_handle_delete - deletes the given file-private handle 301 * @filp: drm file-private structure to use for the handle look up 302 * @handle: userspace handle to delete 303 * 304 * Removes the GEM handle from the @filp lookup table which has been added with 305 * drm_gem_handle_create(). If this is the last handle also cleans up linked 306 * resources like GEM names. 307 */ 308 int 309 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 310 { 311 struct drm_gem_object *obj; 312 313 spin_lock(&filp->table_lock); 314 315 /* Check if we currently have a reference on the object */ 316 obj = idr_replace(&filp->object_idr, NULL, handle); 317 spin_unlock(&filp->table_lock); 318 if (IS_ERR_OR_NULL(obj)) 319 return -EINVAL; 320 321 /* Release driver's reference and decrement refcount. */ 322 drm_gem_object_release_handle(handle, obj, filp); 323 324 /* And finally make the handle available for future allocations. */ 325 spin_lock(&filp->table_lock); 326 idr_remove(&filp->object_idr, handle); 327 spin_unlock(&filp->table_lock); 328 329 return 0; 330 } 331 EXPORT_SYMBOL(drm_gem_handle_delete); 332 333 /** 334 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object 335 * @file: drm file-private structure containing the gem object 336 * @dev: corresponding drm_device 337 * @handle: gem object handle 338 * @offset: return location for the fake mmap offset 339 * 340 * This implements the &drm_driver.dumb_map_offset kms driver callback for 341 * drivers which use gem to manage their backing storage. 342 * 343 * Returns: 344 * 0 on success or a negative error code on failure. 345 */ 346 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 347 u32 handle, u64 *offset) 348 { 349 struct drm_gem_object *obj; 350 int ret; 351 352 obj = drm_gem_object_lookup(file, handle); 353 if (!obj) 354 return -ENOENT; 355 356 /* Don't allow imported objects to be mapped */ 357 if (drm_gem_is_imported(obj)) { 358 ret = -EINVAL; 359 goto out; 360 } 361 362 ret = drm_gem_create_mmap_offset(obj); 363 if (ret) 364 goto out; 365 366 *offset = drm_vma_node_offset_addr(&obj->vma_node); 367 out: 368 drm_gem_object_put(obj); 369 370 return ret; 371 } 372 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); 373 374 /** 375 * drm_gem_handle_create_tail - internal functions to create a handle 376 * @file_priv: drm file-private structure to register the handle for 377 * @obj: object to register 378 * @handlep: pointer to return the created handle to the caller 379 * 380 * This expects the &drm_device.object_name_lock to be held already and will 381 * drop it before returning. Used to avoid races in establishing new handles 382 * when importing an object from either an flink name or a dma-buf. 383 * 384 * Handles must be release again through drm_gem_handle_delete(). This is done 385 * when userspace closes @file_priv for all attached handles, or through the 386 * GEM_CLOSE ioctl for individual handles. 387 */ 388 int 389 drm_gem_handle_create_tail(struct drm_file *file_priv, 390 struct drm_gem_object *obj, 391 u32 *handlep) 392 { 393 struct drm_device *dev = obj->dev; 394 u32 handle; 395 int ret; 396 397 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 398 if (obj->handle_count++ == 0) 399 drm_gem_object_get(obj); 400 401 /* 402 * Get the user-visible handle using idr. Preload and perform 403 * allocation under our spinlock. 404 */ 405 idr_preload(GFP_KERNEL); 406 spin_lock(&file_priv->table_lock); 407 408 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 409 410 spin_unlock(&file_priv->table_lock); 411 idr_preload_end(); 412 413 mutex_unlock(&dev->object_name_lock); 414 if (ret < 0) 415 goto err_unref; 416 417 handle = ret; 418 419 ret = drm_vma_node_allow(&obj->vma_node, file_priv); 420 if (ret) 421 goto err_remove; 422 423 if (obj->funcs->open) { 424 ret = obj->funcs->open(obj, file_priv); 425 if (ret) 426 goto err_revoke; 427 } 428 429 *handlep = handle; 430 return 0; 431 432 err_revoke: 433 drm_vma_node_revoke(&obj->vma_node, file_priv); 434 err_remove: 435 spin_lock(&file_priv->table_lock); 436 idr_remove(&file_priv->object_idr, handle); 437 spin_unlock(&file_priv->table_lock); 438 err_unref: 439 drm_gem_object_handle_put_unlocked(obj); 440 return ret; 441 } 442 443 /** 444 * drm_gem_handle_create - create a gem handle for an object 445 * @file_priv: drm file-private structure to register the handle for 446 * @obj: object to register 447 * @handlep: pointer to return the created handle to the caller 448 * 449 * Create a handle for this object. This adds a handle reference to the object, 450 * which includes a regular reference count. Callers will likely want to 451 * dereference the object afterwards. 452 * 453 * Since this publishes @obj to userspace it must be fully set up by this point, 454 * drivers must call this last in their buffer object creation callbacks. 455 */ 456 int drm_gem_handle_create(struct drm_file *file_priv, 457 struct drm_gem_object *obj, 458 u32 *handlep) 459 { 460 mutex_lock(&obj->dev->object_name_lock); 461 462 return drm_gem_handle_create_tail(file_priv, obj, handlep); 463 } 464 EXPORT_SYMBOL(drm_gem_handle_create); 465 466 467 /** 468 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 469 * @obj: obj in question 470 * 471 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 472 * 473 * Note that drm_gem_object_release() already calls this function, so drivers 474 * don't have to take care of releasing the mmap offset themselves when freeing 475 * the GEM object. 476 */ 477 void 478 drm_gem_free_mmap_offset(struct drm_gem_object *obj) 479 { 480 struct drm_device *dev = obj->dev; 481 482 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); 483 } 484 EXPORT_SYMBOL(drm_gem_free_mmap_offset); 485 486 /** 487 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 488 * @obj: obj in question 489 * @size: the virtual size 490 * 491 * GEM memory mapping works by handing back to userspace a fake mmap offset 492 * it can use in a subsequent mmap(2) call. The DRM core code then looks 493 * up the object based on the offset and sets up the various memory mapping 494 * structures. 495 * 496 * This routine allocates and attaches a fake offset for @obj, in cases where 497 * the virtual size differs from the physical size (ie. &drm_gem_object.size). 498 * Otherwise just use drm_gem_create_mmap_offset(). 499 * 500 * This function is idempotent and handles an already allocated mmap offset 501 * transparently. Drivers do not need to check for this case. 502 */ 503 int 504 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 505 { 506 struct drm_device *dev = obj->dev; 507 508 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, 509 size / PAGE_SIZE); 510 } 511 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 512 513 /** 514 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 515 * @obj: obj in question 516 * 517 * GEM memory mapping works by handing back to userspace a fake mmap offset 518 * it can use in a subsequent mmap(2) call. The DRM core code then looks 519 * up the object based on the offset and sets up the various memory mapping 520 * structures. 521 * 522 * This routine allocates and attaches a fake offset for @obj. 523 * 524 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release 525 * the fake offset again. 526 */ 527 int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 528 { 529 return drm_gem_create_mmap_offset_size(obj, obj->size); 530 } 531 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 532 533 /* 534 * Move folios to appropriate lru and release the folios, decrementing the 535 * ref count of those folios. 536 */ 537 static void drm_gem_check_release_batch(struct folio_batch *fbatch) 538 { 539 check_move_unevictable_folios(fbatch); 540 __folio_batch_release(fbatch); 541 cond_resched(); 542 } 543 544 /** 545 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 546 * from shmem 547 * @obj: obj in question 548 * 549 * This reads the page-array of the shmem-backing storage of the given gem 550 * object. An array of pages is returned. If a page is not allocated or 551 * swapped-out, this will allocate/swap-in the required pages. Note that the 552 * whole object is covered by the page-array and pinned in memory. 553 * 554 * Use drm_gem_put_pages() to release the array and unpin all pages. 555 * 556 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). 557 * If you require other GFP-masks, you have to do those allocations yourself. 558 * 559 * Note that you are not allowed to change gfp-zones during runtime. That is, 560 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as 561 * set during initialization. If you have special zone constraints, set them 562 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care 563 * to keep pages in the required zone during swap-in. 564 * 565 * This function is only valid on objects initialized with 566 * drm_gem_object_init(), but not for those initialized with 567 * drm_gem_private_object_init() only. 568 */ 569 struct page **drm_gem_get_pages(struct drm_gem_object *obj) 570 { 571 struct address_space *mapping; 572 struct page **pages; 573 struct folio *folio; 574 struct folio_batch fbatch; 575 long i, j, npages; 576 577 if (WARN_ON(!obj->filp)) 578 return ERR_PTR(-EINVAL); 579 580 /* This is the shared memory object that backs the GEM resource */ 581 mapping = obj->filp->f_mapping; 582 583 /* We already BUG_ON() for non-page-aligned sizes in 584 * drm_gem_object_init(), so we should never hit this unless 585 * driver author is doing something really wrong: 586 */ 587 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 588 589 npages = obj->size >> PAGE_SHIFT; 590 591 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 592 if (pages == NULL) 593 return ERR_PTR(-ENOMEM); 594 595 mapping_set_unevictable(mapping); 596 597 i = 0; 598 while (i < npages) { 599 long nr; 600 folio = shmem_read_folio_gfp(mapping, i, 601 mapping_gfp_mask(mapping)); 602 if (IS_ERR(folio)) 603 goto fail; 604 nr = min(npages - i, folio_nr_pages(folio)); 605 for (j = 0; j < nr; j++, i++) 606 pages[i] = folio_file_page(folio, i); 607 608 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the 609 * correct region during swapin. Note that this requires 610 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 611 * so shmem can relocate pages during swapin if required. 612 */ 613 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && 614 (folio_pfn(folio) >= 0x00100000UL)); 615 } 616 617 return pages; 618 619 fail: 620 mapping_clear_unevictable(mapping); 621 folio_batch_init(&fbatch); 622 j = 0; 623 while (j < i) { 624 struct folio *f = page_folio(pages[j]); 625 if (!folio_batch_add(&fbatch, f)) 626 drm_gem_check_release_batch(&fbatch); 627 j += folio_nr_pages(f); 628 } 629 if (fbatch.nr) 630 drm_gem_check_release_batch(&fbatch); 631 632 kvfree(pages); 633 return ERR_CAST(folio); 634 } 635 EXPORT_SYMBOL(drm_gem_get_pages); 636 637 /** 638 * drm_gem_put_pages - helper to free backing pages for a GEM object 639 * @obj: obj in question 640 * @pages: pages to free 641 * @dirty: if true, pages will be marked as dirty 642 * @accessed: if true, the pages will be marked as accessed 643 */ 644 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 645 bool dirty, bool accessed) 646 { 647 int i, npages; 648 struct address_space *mapping; 649 struct folio_batch fbatch; 650 651 mapping = file_inode(obj->filp)->i_mapping; 652 mapping_clear_unevictable(mapping); 653 654 /* We already BUG_ON() for non-page-aligned sizes in 655 * drm_gem_object_init(), so we should never hit this unless 656 * driver author is doing something really wrong: 657 */ 658 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 659 660 npages = obj->size >> PAGE_SHIFT; 661 662 folio_batch_init(&fbatch); 663 for (i = 0; i < npages; i++) { 664 struct folio *folio; 665 666 if (!pages[i]) 667 continue; 668 folio = page_folio(pages[i]); 669 670 if (dirty) 671 folio_mark_dirty(folio); 672 673 if (accessed) 674 folio_mark_accessed(folio); 675 676 /* Undo the reference we took when populating the table */ 677 if (!folio_batch_add(&fbatch, folio)) 678 drm_gem_check_release_batch(&fbatch); 679 i += folio_nr_pages(folio) - 1; 680 } 681 if (folio_batch_count(&fbatch)) 682 drm_gem_check_release_batch(&fbatch); 683 684 kvfree(pages); 685 } 686 EXPORT_SYMBOL(drm_gem_put_pages); 687 688 static int objects_lookup(struct drm_file *filp, u32 *handle, int count, 689 struct drm_gem_object **objs) 690 { 691 int i, ret = 0; 692 struct drm_gem_object *obj; 693 694 spin_lock(&filp->table_lock); 695 696 for (i = 0; i < count; i++) { 697 /* Check if we currently have a reference on the object */ 698 obj = idr_find(&filp->object_idr, handle[i]); 699 if (!obj) { 700 ret = -ENOENT; 701 break; 702 } 703 drm_gem_object_get(obj); 704 objs[i] = obj; 705 } 706 spin_unlock(&filp->table_lock); 707 708 return ret; 709 } 710 711 /** 712 * drm_gem_objects_lookup - look up GEM objects from an array of handles 713 * @filp: DRM file private date 714 * @bo_handles: user pointer to array of userspace handle 715 * @count: size of handle array 716 * @objs_out: returned pointer to array of drm_gem_object pointers 717 * 718 * Takes an array of userspace handles and returns a newly allocated array of 719 * GEM objects. 720 * 721 * For a single handle lookup, use drm_gem_object_lookup(). 722 * 723 * Returns: 724 * @objs filled in with GEM object pointers. Returned GEM objects need to be 725 * released with drm_gem_object_put(). -ENOENT is returned on a lookup 726 * failure. 0 is returned on success. 727 * 728 */ 729 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 730 int count, struct drm_gem_object ***objs_out) 731 { 732 int ret; 733 u32 *handles; 734 struct drm_gem_object **objs; 735 736 if (!count) 737 return 0; 738 739 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), 740 GFP_KERNEL | __GFP_ZERO); 741 if (!objs) 742 return -ENOMEM; 743 744 *objs_out = objs; 745 746 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); 747 if (!handles) { 748 ret = -ENOMEM; 749 goto out; 750 } 751 752 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { 753 ret = -EFAULT; 754 DRM_DEBUG("Failed to copy in GEM handles\n"); 755 goto out; 756 } 757 758 ret = objects_lookup(filp, handles, count, objs); 759 out: 760 kvfree(handles); 761 return ret; 762 763 } 764 EXPORT_SYMBOL(drm_gem_objects_lookup); 765 766 /** 767 * drm_gem_object_lookup - look up a GEM object from its handle 768 * @filp: DRM file private date 769 * @handle: userspace handle 770 * 771 * If looking up an array of handles, use drm_gem_objects_lookup(). 772 * 773 * Returns: 774 * A reference to the object named by the handle if such exists on @filp, NULL 775 * otherwise. 776 */ 777 struct drm_gem_object * 778 drm_gem_object_lookup(struct drm_file *filp, u32 handle) 779 { 780 struct drm_gem_object *obj = NULL; 781 782 objects_lookup(filp, &handle, 1, &obj); 783 return obj; 784 } 785 EXPORT_SYMBOL(drm_gem_object_lookup); 786 787 /** 788 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects 789 * shared and/or exclusive fences. 790 * @filep: DRM file private date 791 * @handle: userspace handle 792 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 793 * @timeout: timeout value in jiffies or zero to return immediately 794 * 795 * Returns: 796 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 797 * greater than 0 on success. 798 */ 799 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 800 bool wait_all, unsigned long timeout) 801 { 802 long ret; 803 struct drm_gem_object *obj; 804 805 obj = drm_gem_object_lookup(filep, handle); 806 if (!obj) { 807 DRM_DEBUG("Failed to look up GEM BO %d\n", handle); 808 return -EINVAL; 809 } 810 811 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), 812 true, timeout); 813 if (ret == 0) 814 ret = -ETIME; 815 else if (ret > 0) 816 ret = 0; 817 818 drm_gem_object_put(obj); 819 820 return ret; 821 } 822 EXPORT_SYMBOL(drm_gem_dma_resv_wait); 823 824 int 825 drm_gem_close_ioctl(struct drm_device *dev, void *data, 826 struct drm_file *file_priv) 827 { 828 struct drm_gem_close *args = data; 829 int ret; 830 831 if (!drm_core_check_feature(dev, DRIVER_GEM)) 832 return -EOPNOTSUPP; 833 834 ret = drm_gem_handle_delete(file_priv, args->handle); 835 836 return ret; 837 } 838 839 int 840 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 841 struct drm_file *file_priv) 842 { 843 struct drm_gem_flink *args = data; 844 struct drm_gem_object *obj; 845 int ret; 846 847 if (!drm_core_check_feature(dev, DRIVER_GEM)) 848 return -EOPNOTSUPP; 849 850 obj = drm_gem_object_lookup(file_priv, args->handle); 851 if (obj == NULL) 852 return -ENOENT; 853 854 mutex_lock(&dev->object_name_lock); 855 /* prevent races with concurrent gem_close. */ 856 if (obj->handle_count == 0) { 857 ret = -ENOENT; 858 goto err; 859 } 860 861 if (!obj->name) { 862 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); 863 if (ret < 0) 864 goto err; 865 866 obj->name = ret; 867 } 868 869 args->name = (uint64_t) obj->name; 870 ret = 0; 871 872 err: 873 mutex_unlock(&dev->object_name_lock); 874 drm_gem_object_put(obj); 875 return ret; 876 } 877 878 int 879 drm_gem_open_ioctl(struct drm_device *dev, void *data, 880 struct drm_file *file_priv) 881 { 882 struct drm_gem_open *args = data; 883 struct drm_gem_object *obj; 884 int ret; 885 u32 handle; 886 887 if (!drm_core_check_feature(dev, DRIVER_GEM)) 888 return -EOPNOTSUPP; 889 890 mutex_lock(&dev->object_name_lock); 891 obj = idr_find(&dev->object_name_idr, (int) args->name); 892 if (obj) { 893 drm_gem_object_get(obj); 894 } else { 895 mutex_unlock(&dev->object_name_lock); 896 return -ENOENT; 897 } 898 899 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 900 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 901 if (ret) 902 goto err; 903 904 args->handle = handle; 905 args->size = obj->size; 906 907 err: 908 drm_gem_object_put(obj); 909 return ret; 910 } 911 912 int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data, 913 struct drm_file *file_priv) 914 { 915 struct drm_gem_change_handle *args = data; 916 struct drm_gem_object *obj; 917 int ret; 918 919 if (!drm_core_check_feature(dev, DRIVER_GEM)) 920 return -EOPNOTSUPP; 921 922 obj = drm_gem_object_lookup(file_priv, args->handle); 923 if (!obj) 924 return -ENOENT; 925 926 if (args->handle == args->new_handle) 927 return 0; 928 929 mutex_lock(&file_priv->prime.lock); 930 931 spin_lock(&file_priv->table_lock); 932 ret = idr_alloc(&file_priv->object_idr, obj, 933 args->new_handle, args->new_handle + 1, GFP_NOWAIT); 934 spin_unlock(&file_priv->table_lock); 935 936 if (ret < 0) 937 goto out_unlock; 938 939 if (obj->dma_buf) { 940 ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf, args->new_handle); 941 if (ret < 0) { 942 spin_lock(&file_priv->table_lock); 943 idr_remove(&file_priv->object_idr, args->new_handle); 944 spin_unlock(&file_priv->table_lock); 945 goto out_unlock; 946 } 947 948 drm_prime_remove_buf_handle(&file_priv->prime, args->handle); 949 } 950 951 ret = 0; 952 953 spin_lock(&file_priv->table_lock); 954 idr_remove(&file_priv->object_idr, args->handle); 955 spin_unlock(&file_priv->table_lock); 956 957 out_unlock: 958 mutex_unlock(&file_priv->prime.lock); 959 960 return ret; 961 } 962 963 /** 964 * drm_gem_open - initializes GEM file-private structures at devnode open time 965 * @dev: drm_device which is being opened by userspace 966 * @file_private: drm file-private structure to set up 967 * 968 * Called at device open time, sets up the structure for handling refcounting 969 * of mm objects. 970 */ 971 void 972 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 973 { 974 idr_init_base(&file_private->object_idr, 1); 975 spin_lock_init(&file_private->table_lock); 976 } 977 978 /** 979 * drm_gem_release - release file-private GEM resources 980 * @dev: drm_device which is being closed by userspace 981 * @file_private: drm file-private structure to clean up 982 * 983 * Called at close time when the filp is going away. 984 * 985 * Releases any remaining references on objects by this filp. 986 */ 987 void 988 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 989 { 990 idr_for_each(&file_private->object_idr, 991 &drm_gem_object_release_handle, file_private); 992 idr_destroy(&file_private->object_idr); 993 } 994 995 /** 996 * drm_gem_object_release - release GEM buffer object resources 997 * @obj: GEM buffer object 998 * 999 * This releases any structures and resources used by @obj and is the inverse of 1000 * drm_gem_object_init(). 1001 */ 1002 void 1003 drm_gem_object_release(struct drm_gem_object *obj) 1004 { 1005 if (obj->filp) 1006 fput(obj->filp); 1007 1008 drm_gem_private_object_fini(obj); 1009 1010 drm_gem_free_mmap_offset(obj); 1011 drm_gem_lru_remove(obj); 1012 } 1013 EXPORT_SYMBOL(drm_gem_object_release); 1014 1015 /** 1016 * drm_gem_object_free - free a GEM object 1017 * @kref: kref of the object to free 1018 * 1019 * Called after the last reference to the object has been lost. 1020 * 1021 * Frees the object 1022 */ 1023 void 1024 drm_gem_object_free(struct kref *kref) 1025 { 1026 struct drm_gem_object *obj = 1027 container_of(kref, struct drm_gem_object, refcount); 1028 1029 if (WARN_ON(!obj->funcs->free)) 1030 return; 1031 1032 obj->funcs->free(obj); 1033 } 1034 EXPORT_SYMBOL(drm_gem_object_free); 1035 1036 /** 1037 * drm_gem_vm_open - vma->ops->open implementation for GEM 1038 * @vma: VM area structure 1039 * 1040 * This function implements the #vm_operations_struct open() callback for GEM 1041 * drivers. This must be used together with drm_gem_vm_close(). 1042 */ 1043 void drm_gem_vm_open(struct vm_area_struct *vma) 1044 { 1045 struct drm_gem_object *obj = vma->vm_private_data; 1046 1047 drm_gem_object_get(obj); 1048 } 1049 EXPORT_SYMBOL(drm_gem_vm_open); 1050 1051 /** 1052 * drm_gem_vm_close - vma->ops->close implementation for GEM 1053 * @vma: VM area structure 1054 * 1055 * This function implements the #vm_operations_struct close() callback for GEM 1056 * drivers. This must be used together with drm_gem_vm_open(). 1057 */ 1058 void drm_gem_vm_close(struct vm_area_struct *vma) 1059 { 1060 struct drm_gem_object *obj = vma->vm_private_data; 1061 1062 drm_gem_object_put(obj); 1063 } 1064 EXPORT_SYMBOL(drm_gem_vm_close); 1065 1066 /** 1067 * drm_gem_mmap_obj - memory map a GEM object 1068 * @obj: the GEM object to map 1069 * @obj_size: the object size to be mapped, in bytes 1070 * @vma: VMA for the area to be mapped 1071 * 1072 * Set up the VMA to prepare mapping of the GEM object using the GEM object's 1073 * vm_ops. Depending on their requirements, GEM objects can either 1074 * provide a fault handler in their vm_ops (in which case any accesses to 1075 * the object will be trapped, to perform migration, GTT binding, surface 1076 * register allocation, or performance monitoring), or mmap the buffer memory 1077 * synchronously after calling drm_gem_mmap_obj. 1078 * 1079 * This function is mainly intended to implement the DMABUF mmap operation, when 1080 * the GEM object is not looked up based on its fake offset. To implement the 1081 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 1082 * 1083 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while 1084 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 1085 * callers must verify access restrictions before calling this helper. 1086 * 1087 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 1088 * size, or if no vm_ops are provided. 1089 */ 1090 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 1091 struct vm_area_struct *vma) 1092 { 1093 int ret; 1094 1095 /* Check for valid size. */ 1096 if (obj_size < vma->vm_end - vma->vm_start) 1097 return -EINVAL; 1098 1099 /* Take a ref for this mapping of the object, so that the fault 1100 * handler can dereference the mmap offset's pointer to the object. 1101 * This reference is cleaned up by the corresponding vm_close 1102 * (which should happen whether the vma was created by this call, or 1103 * by a vm_open due to mremap or partial unmap or whatever). 1104 */ 1105 drm_gem_object_get(obj); 1106 1107 vma->vm_private_data = obj; 1108 vma->vm_ops = obj->funcs->vm_ops; 1109 1110 if (obj->funcs->mmap) { 1111 ret = obj->funcs->mmap(obj, vma); 1112 if (ret) 1113 goto err_drm_gem_object_put; 1114 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); 1115 } else { 1116 if (!vma->vm_ops) { 1117 ret = -EINVAL; 1118 goto err_drm_gem_object_put; 1119 } 1120 1121 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); 1122 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1123 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1124 } 1125 1126 return 0; 1127 1128 err_drm_gem_object_put: 1129 drm_gem_object_put(obj); 1130 return ret; 1131 } 1132 EXPORT_SYMBOL(drm_gem_mmap_obj); 1133 1134 /** 1135 * drm_gem_mmap - memory map routine for GEM objects 1136 * @filp: DRM file pointer 1137 * @vma: VMA for the area to be mapped 1138 * 1139 * If a driver supports GEM object mapping, mmap calls on the DRM file 1140 * descriptor will end up here. 1141 * 1142 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 1143 * contain the fake offset we created when the GTT map ioctl was called on 1144 * the object) and map it with a call to drm_gem_mmap_obj(). 1145 * 1146 * If the caller is not granted access to the buffer object, the mmap will fail 1147 * with EACCES. Please see the vma manager for more information. 1148 */ 1149 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 1150 { 1151 struct drm_file *priv = filp->private_data; 1152 struct drm_device *dev = priv->minor->dev; 1153 struct drm_gem_object *obj = NULL; 1154 struct drm_vma_offset_node *node; 1155 int ret; 1156 1157 if (drm_dev_is_unplugged(dev)) 1158 return -ENODEV; 1159 1160 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1161 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1162 vma->vm_pgoff, 1163 vma_pages(vma)); 1164 if (likely(node)) { 1165 obj = container_of(node, struct drm_gem_object, vma_node); 1166 /* 1167 * When the object is being freed, after it hits 0-refcnt it 1168 * proceeds to tear down the object. In the process it will 1169 * attempt to remove the VMA offset and so acquire this 1170 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 1171 * that matches our range, we know it is in the process of being 1172 * destroyed and will be freed as soon as we release the lock - 1173 * so we have to check for the 0-refcnted object and treat it as 1174 * invalid. 1175 */ 1176 if (!kref_get_unless_zero(&obj->refcount)) 1177 obj = NULL; 1178 } 1179 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1180 1181 if (!obj) 1182 return -EINVAL; 1183 1184 if (!drm_vma_node_is_allowed(node, priv)) { 1185 drm_gem_object_put(obj); 1186 return -EACCES; 1187 } 1188 1189 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, 1190 vma); 1191 1192 drm_gem_object_put(obj); 1193 1194 return ret; 1195 } 1196 EXPORT_SYMBOL(drm_gem_mmap); 1197 1198 void drm_gem_print_info(struct drm_printer *p, unsigned int indent, 1199 const struct drm_gem_object *obj) 1200 { 1201 drm_printf_indent(p, indent, "name=%d\n", obj->name); 1202 drm_printf_indent(p, indent, "refcount=%u\n", 1203 kref_read(&obj->refcount)); 1204 drm_printf_indent(p, indent, "start=%08lx\n", 1205 drm_vma_node_start(&obj->vma_node)); 1206 drm_printf_indent(p, indent, "size=%zu\n", obj->size); 1207 drm_printf_indent(p, indent, "imported=%s\n", 1208 str_yes_no(drm_gem_is_imported(obj))); 1209 1210 if (obj->funcs->print_info) 1211 obj->funcs->print_info(p, indent, obj); 1212 } 1213 1214 int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map) 1215 { 1216 int ret; 1217 1218 dma_resv_assert_held(obj->resv); 1219 1220 if (!obj->funcs->vmap) 1221 return -EOPNOTSUPP; 1222 1223 ret = obj->funcs->vmap(obj, map); 1224 if (ret) 1225 return ret; 1226 else if (iosys_map_is_null(map)) 1227 return -ENOMEM; 1228 1229 return 0; 1230 } 1231 EXPORT_SYMBOL(drm_gem_vmap_locked); 1232 1233 void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map) 1234 { 1235 dma_resv_assert_held(obj->resv); 1236 1237 if (iosys_map_is_null(map)) 1238 return; 1239 1240 if (obj->funcs->vunmap) 1241 obj->funcs->vunmap(obj, map); 1242 1243 /* Always set the mapping to NULL. Callers may rely on this. */ 1244 iosys_map_clear(map); 1245 } 1246 EXPORT_SYMBOL(drm_gem_vunmap_locked); 1247 1248 void drm_gem_lock(struct drm_gem_object *obj) 1249 { 1250 dma_resv_lock(obj->resv, NULL); 1251 } 1252 EXPORT_SYMBOL(drm_gem_lock); 1253 1254 void drm_gem_unlock(struct drm_gem_object *obj) 1255 { 1256 dma_resv_unlock(obj->resv); 1257 } 1258 EXPORT_SYMBOL(drm_gem_unlock); 1259 1260 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) 1261 { 1262 int ret; 1263 1264 dma_resv_lock(obj->resv, NULL); 1265 ret = drm_gem_vmap_locked(obj, map); 1266 dma_resv_unlock(obj->resv); 1267 1268 return ret; 1269 } 1270 EXPORT_SYMBOL(drm_gem_vmap); 1271 1272 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) 1273 { 1274 dma_resv_lock(obj->resv, NULL); 1275 drm_gem_vunmap_locked(obj, map); 1276 dma_resv_unlock(obj->resv); 1277 } 1278 EXPORT_SYMBOL(drm_gem_vunmap); 1279 1280 /** 1281 * drm_gem_lock_reservations - Sets up the ww context and acquires 1282 * the lock on an array of GEM objects. 1283 * 1284 * Once you've locked your reservations, you'll want to set up space 1285 * for your shared fences (if applicable), submit your job, then 1286 * drm_gem_unlock_reservations(). 1287 * 1288 * @objs: drm_gem_objects to lock 1289 * @count: Number of objects in @objs 1290 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as 1291 * part of tracking this set of locked reservations. 1292 */ 1293 int 1294 drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 1295 struct ww_acquire_ctx *acquire_ctx) 1296 { 1297 int contended = -1; 1298 int i, ret; 1299 1300 ww_acquire_init(acquire_ctx, &reservation_ww_class); 1301 1302 retry: 1303 if (contended != -1) { 1304 struct drm_gem_object *obj = objs[contended]; 1305 1306 ret = dma_resv_lock_slow_interruptible(obj->resv, 1307 acquire_ctx); 1308 if (ret) { 1309 ww_acquire_fini(acquire_ctx); 1310 return ret; 1311 } 1312 } 1313 1314 for (i = 0; i < count; i++) { 1315 if (i == contended) 1316 continue; 1317 1318 ret = dma_resv_lock_interruptible(objs[i]->resv, 1319 acquire_ctx); 1320 if (ret) { 1321 int j; 1322 1323 for (j = 0; j < i; j++) 1324 dma_resv_unlock(objs[j]->resv); 1325 1326 if (contended != -1 && contended >= i) 1327 dma_resv_unlock(objs[contended]->resv); 1328 1329 if (ret == -EDEADLK) { 1330 contended = i; 1331 goto retry; 1332 } 1333 1334 ww_acquire_fini(acquire_ctx); 1335 return ret; 1336 } 1337 } 1338 1339 ww_acquire_done(acquire_ctx); 1340 1341 return 0; 1342 } 1343 EXPORT_SYMBOL(drm_gem_lock_reservations); 1344 1345 void 1346 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, 1347 struct ww_acquire_ctx *acquire_ctx) 1348 { 1349 int i; 1350 1351 for (i = 0; i < count; i++) 1352 dma_resv_unlock(objs[i]->resv); 1353 1354 ww_acquire_fini(acquire_ctx); 1355 } 1356 EXPORT_SYMBOL(drm_gem_unlock_reservations); 1357 1358 /** 1359 * drm_gem_lru_init - initialize a LRU 1360 * 1361 * @lru: The LRU to initialize 1362 * @lock: The lock protecting the LRU 1363 */ 1364 void 1365 drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock) 1366 { 1367 lru->lock = lock; 1368 lru->count = 0; 1369 INIT_LIST_HEAD(&lru->list); 1370 } 1371 EXPORT_SYMBOL(drm_gem_lru_init); 1372 1373 static void 1374 drm_gem_lru_remove_locked(struct drm_gem_object *obj) 1375 { 1376 obj->lru->count -= obj->size >> PAGE_SHIFT; 1377 WARN_ON(obj->lru->count < 0); 1378 list_del(&obj->lru_node); 1379 obj->lru = NULL; 1380 } 1381 1382 /** 1383 * drm_gem_lru_remove - remove object from whatever LRU it is in 1384 * 1385 * If the object is currently in any LRU, remove it. 1386 * 1387 * @obj: The GEM object to remove from current LRU 1388 */ 1389 void 1390 drm_gem_lru_remove(struct drm_gem_object *obj) 1391 { 1392 struct drm_gem_lru *lru = obj->lru; 1393 1394 if (!lru) 1395 return; 1396 1397 mutex_lock(lru->lock); 1398 drm_gem_lru_remove_locked(obj); 1399 mutex_unlock(lru->lock); 1400 } 1401 EXPORT_SYMBOL(drm_gem_lru_remove); 1402 1403 /** 1404 * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU 1405 * 1406 * Like &drm_gem_lru_move_tail but lru lock must be held 1407 * 1408 * @lru: The LRU to move the object into. 1409 * @obj: The GEM object to move into this LRU 1410 */ 1411 void 1412 drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj) 1413 { 1414 lockdep_assert_held_once(lru->lock); 1415 1416 if (obj->lru) 1417 drm_gem_lru_remove_locked(obj); 1418 1419 lru->count += obj->size >> PAGE_SHIFT; 1420 list_add_tail(&obj->lru_node, &lru->list); 1421 obj->lru = lru; 1422 } 1423 EXPORT_SYMBOL(drm_gem_lru_move_tail_locked); 1424 1425 /** 1426 * drm_gem_lru_move_tail - move the object to the tail of the LRU 1427 * 1428 * If the object is already in this LRU it will be moved to the 1429 * tail. Otherwise it will be removed from whichever other LRU 1430 * it is in (if any) and moved into this LRU. 1431 * 1432 * @lru: The LRU to move the object into. 1433 * @obj: The GEM object to move into this LRU 1434 */ 1435 void 1436 drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj) 1437 { 1438 mutex_lock(lru->lock); 1439 drm_gem_lru_move_tail_locked(lru, obj); 1440 mutex_unlock(lru->lock); 1441 } 1442 EXPORT_SYMBOL(drm_gem_lru_move_tail); 1443 1444 /** 1445 * drm_gem_lru_scan - helper to implement shrinker.scan_objects 1446 * 1447 * If the shrink callback succeeds, it is expected that the driver 1448 * move the object out of this LRU. 1449 * 1450 * If the LRU possibly contain active buffers, it is the responsibility 1451 * of the shrink callback to check for this (ie. dma_resv_test_signaled()) 1452 * or if necessary block until the buffer becomes idle. 1453 * 1454 * @lru: The LRU to scan 1455 * @nr_to_scan: The number of pages to try to reclaim 1456 * @remaining: The number of pages left to reclaim, should be initialized by caller 1457 * @shrink: Callback to try to shrink/reclaim the object. 1458 * @ticket: Optional ww_acquire_ctx context to use for locking 1459 */ 1460 unsigned long 1461 drm_gem_lru_scan(struct drm_gem_lru *lru, 1462 unsigned int nr_to_scan, 1463 unsigned long *remaining, 1464 bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket), 1465 struct ww_acquire_ctx *ticket) 1466 { 1467 struct drm_gem_lru still_in_lru; 1468 struct drm_gem_object *obj; 1469 unsigned freed = 0; 1470 1471 drm_gem_lru_init(&still_in_lru, lru->lock); 1472 1473 mutex_lock(lru->lock); 1474 1475 while (freed < nr_to_scan) { 1476 obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node); 1477 1478 if (!obj) 1479 break; 1480 1481 drm_gem_lru_move_tail_locked(&still_in_lru, obj); 1482 1483 /* 1484 * If it's in the process of being freed, gem_object->free() 1485 * may be blocked on lock waiting to remove it. So just 1486 * skip it. 1487 */ 1488 if (!kref_get_unless_zero(&obj->refcount)) 1489 continue; 1490 1491 /* 1492 * Now that we own a reference, we can drop the lock for the 1493 * rest of the loop body, to reduce contention with other 1494 * code paths that need the LRU lock 1495 */ 1496 mutex_unlock(lru->lock); 1497 1498 if (ticket) 1499 ww_acquire_init(ticket, &reservation_ww_class); 1500 1501 /* 1502 * Note that this still needs to be trylock, since we can 1503 * hit shrinker in response to trying to get backing pages 1504 * for this obj (ie. while it's lock is already held) 1505 */ 1506 if (!ww_mutex_trylock(&obj->resv->lock, ticket)) { 1507 *remaining += obj->size >> PAGE_SHIFT; 1508 goto tail; 1509 } 1510 1511 if (shrink(obj, ticket)) { 1512 freed += obj->size >> PAGE_SHIFT; 1513 1514 /* 1515 * If we succeeded in releasing the object's backing 1516 * pages, we expect the driver to have moved the object 1517 * out of this LRU 1518 */ 1519 WARN_ON(obj->lru == &still_in_lru); 1520 WARN_ON(obj->lru == lru); 1521 } 1522 1523 dma_resv_unlock(obj->resv); 1524 1525 if (ticket) 1526 ww_acquire_fini(ticket); 1527 1528 tail: 1529 drm_gem_object_put(obj); 1530 mutex_lock(lru->lock); 1531 } 1532 1533 /* 1534 * Move objects we've skipped over out of the temporary still_in_lru 1535 * back into this LRU 1536 */ 1537 list_for_each_entry (obj, &still_in_lru.list, lru_node) 1538 obj->lru = lru; 1539 list_splice_tail(&still_in_lru.list, &lru->list); 1540 lru->count += still_in_lru.count; 1541 1542 mutex_unlock(lru->lock); 1543 1544 return freed; 1545 } 1546 EXPORT_SYMBOL(drm_gem_lru_scan); 1547 1548 /** 1549 * drm_gem_evict_locked - helper to evict backing pages for a GEM object 1550 * @obj: obj in question 1551 */ 1552 int drm_gem_evict_locked(struct drm_gem_object *obj) 1553 { 1554 dma_resv_assert_held(obj->resv); 1555 1556 if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)) 1557 return -EBUSY; 1558 1559 if (obj->funcs->evict) 1560 return obj->funcs->evict(obj); 1561 1562 return 0; 1563 } 1564 EXPORT_SYMBOL(drm_gem_evict_locked); 1565