1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/dma-buf.h> 29 #include <linux/file.h> 30 #include <linux/fs.h> 31 #include <linux/iosys-map.h> 32 #include <linux/mem_encrypt.h> 33 #include <linux/mm.h> 34 #include <linux/mman.h> 35 #include <linux/module.h> 36 #include <linux/pagemap.h> 37 #include <linux/pagevec.h> 38 #include <linux/shmem_fs.h> 39 #include <linux/slab.h> 40 #include <linux/string_helpers.h> 41 #include <linux/types.h> 42 #include <linux/uaccess.h> 43 44 #include <drm/drm.h> 45 #include <drm/drm_device.h> 46 #include <drm/drm_drv.h> 47 #include <drm/drm_file.h> 48 #include <drm/drm_gem.h> 49 #include <drm/drm_managed.h> 50 #include <drm/drm_print.h> 51 #include <drm/drm_vma_manager.h> 52 53 #include "drm_internal.h" 54 55 /** @file drm_gem.c 56 * 57 * This file provides some of the base ioctls and library routines for 58 * the graphics memory manager implemented by each device driver. 59 * 60 * Because various devices have different requirements in terms of 61 * synchronization and migration strategies, implementing that is left up to 62 * the driver, and all that the general API provides should be generic -- 63 * allocating objects, reading/writing data with the cpu, freeing objects. 64 * Even there, platform-dependent optimizations for reading/writing data with 65 * the CPU mean we'll likely hook those out to driver-specific calls. However, 66 * the DRI2 implementation wants to have at least allocate/mmap be generic. 67 * 68 * The goal was to have swap-backed object allocation managed through 69 * struct file. However, file descriptors as handles to a struct file have 70 * two major failings: 71 * - Process limits prevent more than 1024 or so being used at a time by 72 * default. 73 * - Inability to allocate high fds will aggravate the X Server's select() 74 * handling, and likely that of many GL client applications as well. 75 * 76 * This led to a plan of using our own integer IDs (called handles, following 77 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 78 * ioctls. The objects themselves will still include the struct file so 79 * that we can transition to fds if the required kernel infrastructure shows 80 * up at a later date, and as our interface with shmfs for memory allocation. 81 */ 82 83 static void 84 drm_gem_init_release(struct drm_device *dev, void *ptr) 85 { 86 drm_vma_offset_manager_destroy(dev->vma_offset_manager); 87 } 88 89 /** 90 * drm_gem_init - Initialize the GEM device fields 91 * @dev: drm_devic structure to initialize 92 */ 93 int 94 drm_gem_init(struct drm_device *dev) 95 { 96 struct drm_vma_offset_manager *vma_offset_manager; 97 98 mutex_init(&dev->object_name_lock); 99 idr_init_base(&dev->object_name_idr, 1); 100 101 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), 102 GFP_KERNEL); 103 if (!vma_offset_manager) { 104 DRM_ERROR("out of memory\n"); 105 return -ENOMEM; 106 } 107 108 dev->vma_offset_manager = vma_offset_manager; 109 drm_vma_offset_manager_init(vma_offset_manager, 110 DRM_FILE_PAGE_OFFSET_START, 111 DRM_FILE_PAGE_OFFSET_SIZE); 112 113 return drmm_add_action(dev, drm_gem_init_release, NULL); 114 } 115 116 /** 117 * drm_gem_object_init - initialize an allocated shmem-backed GEM object 118 * @dev: drm_device the object should be initialized for 119 * @obj: drm_gem_object to initialize 120 * @size: object size 121 * 122 * Initialize an already allocated GEM object of the specified size with 123 * shmfs backing store. 124 */ 125 int drm_gem_object_init(struct drm_device *dev, 126 struct drm_gem_object *obj, size_t size) 127 { 128 struct file *filp; 129 130 drm_gem_private_object_init(dev, obj, size); 131 132 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 133 if (IS_ERR(filp)) 134 return PTR_ERR(filp); 135 136 obj->filp = filp; 137 138 return 0; 139 } 140 EXPORT_SYMBOL(drm_gem_object_init); 141 142 /** 143 * drm_gem_private_object_init - initialize an allocated private GEM object 144 * @dev: drm_device the object should be initialized for 145 * @obj: drm_gem_object to initialize 146 * @size: object size 147 * 148 * Initialize an already allocated GEM object of the specified size with 149 * no GEM provided backing store. Instead the caller is responsible for 150 * backing the object and handling it. 151 */ 152 void drm_gem_private_object_init(struct drm_device *dev, 153 struct drm_gem_object *obj, size_t size) 154 { 155 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 156 157 obj->dev = dev; 158 obj->filp = NULL; 159 160 kref_init(&obj->refcount); 161 obj->handle_count = 0; 162 obj->size = size; 163 dma_resv_init(&obj->_resv); 164 if (!obj->resv) 165 obj->resv = &obj->_resv; 166 167 if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA)) 168 drm_gem_gpuva_init(obj); 169 170 drm_vma_node_reset(&obj->vma_node); 171 INIT_LIST_HEAD(&obj->lru_node); 172 } 173 EXPORT_SYMBOL(drm_gem_private_object_init); 174 175 /** 176 * drm_gem_private_object_fini - Finalize a failed drm_gem_object 177 * @obj: drm_gem_object 178 * 179 * Uninitialize an already allocated GEM object when it initialized failed 180 */ 181 void drm_gem_private_object_fini(struct drm_gem_object *obj) 182 { 183 WARN_ON(obj->dma_buf); 184 185 dma_resv_fini(&obj->_resv); 186 } 187 EXPORT_SYMBOL(drm_gem_private_object_fini); 188 189 /** 190 * drm_gem_object_handle_free - release resources bound to userspace handles 191 * @obj: GEM object to clean up. 192 * 193 * Called after the last handle to the object has been closed 194 * 195 * Removes any name for the object. Note that this must be 196 * called before drm_gem_object_free or we'll be touching 197 * freed memory 198 */ 199 static void drm_gem_object_handle_free(struct drm_gem_object *obj) 200 { 201 struct drm_device *dev = obj->dev; 202 203 /* Remove any name for this object */ 204 if (obj->name) { 205 idr_remove(&dev->object_name_idr, obj->name); 206 obj->name = 0; 207 } 208 } 209 210 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) 211 { 212 /* Unbreak the reference cycle if we have an exported dma_buf. */ 213 if (obj->dma_buf) { 214 dma_buf_put(obj->dma_buf); 215 obj->dma_buf = NULL; 216 } 217 } 218 219 static void 220 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) 221 { 222 struct drm_device *dev = obj->dev; 223 bool final = false; 224 225 if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) 226 return; 227 228 /* 229 * Must bump handle count first as this may be the last 230 * ref, in which case the object would disappear before we 231 * checked for a name 232 */ 233 234 mutex_lock(&dev->object_name_lock); 235 if (--obj->handle_count == 0) { 236 drm_gem_object_handle_free(obj); 237 drm_gem_object_exported_dma_buf_free(obj); 238 final = true; 239 } 240 mutex_unlock(&dev->object_name_lock); 241 242 if (final) 243 drm_gem_object_put(obj); 244 } 245 246 /* 247 * Called at device or object close to release the file's 248 * handle references on objects. 249 */ 250 static int 251 drm_gem_object_release_handle(int id, void *ptr, void *data) 252 { 253 struct drm_file *file_priv = data; 254 struct drm_gem_object *obj = ptr; 255 256 if (obj->funcs->close) 257 obj->funcs->close(obj, file_priv); 258 259 drm_prime_remove_buf_handle(&file_priv->prime, id); 260 drm_vma_node_revoke(&obj->vma_node, file_priv); 261 262 drm_gem_object_handle_put_unlocked(obj); 263 264 return 0; 265 } 266 267 /** 268 * drm_gem_handle_delete - deletes the given file-private handle 269 * @filp: drm file-private structure to use for the handle look up 270 * @handle: userspace handle to delete 271 * 272 * Removes the GEM handle from the @filp lookup table which has been added with 273 * drm_gem_handle_create(). If this is the last handle also cleans up linked 274 * resources like GEM names. 275 */ 276 int 277 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 278 { 279 struct drm_gem_object *obj; 280 281 spin_lock(&filp->table_lock); 282 283 /* Check if we currently have a reference on the object */ 284 obj = idr_replace(&filp->object_idr, NULL, handle); 285 spin_unlock(&filp->table_lock); 286 if (IS_ERR_OR_NULL(obj)) 287 return -EINVAL; 288 289 /* Release driver's reference and decrement refcount. */ 290 drm_gem_object_release_handle(handle, obj, filp); 291 292 /* And finally make the handle available for future allocations. */ 293 spin_lock(&filp->table_lock); 294 idr_remove(&filp->object_idr, handle); 295 spin_unlock(&filp->table_lock); 296 297 return 0; 298 } 299 EXPORT_SYMBOL(drm_gem_handle_delete); 300 301 /** 302 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object 303 * @file: drm file-private structure containing the gem object 304 * @dev: corresponding drm_device 305 * @handle: gem object handle 306 * @offset: return location for the fake mmap offset 307 * 308 * This implements the &drm_driver.dumb_map_offset kms driver callback for 309 * drivers which use gem to manage their backing storage. 310 * 311 * Returns: 312 * 0 on success or a negative error code on failure. 313 */ 314 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 315 u32 handle, u64 *offset) 316 { 317 struct drm_gem_object *obj; 318 int ret; 319 320 obj = drm_gem_object_lookup(file, handle); 321 if (!obj) 322 return -ENOENT; 323 324 /* Don't allow imported objects to be mapped */ 325 if (obj->import_attach) { 326 ret = -EINVAL; 327 goto out; 328 } 329 330 ret = drm_gem_create_mmap_offset(obj); 331 if (ret) 332 goto out; 333 334 *offset = drm_vma_node_offset_addr(&obj->vma_node); 335 out: 336 drm_gem_object_put(obj); 337 338 return ret; 339 } 340 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); 341 342 /** 343 * drm_gem_handle_create_tail - internal functions to create a handle 344 * @file_priv: drm file-private structure to register the handle for 345 * @obj: object to register 346 * @handlep: pointer to return the created handle to the caller 347 * 348 * This expects the &drm_device.object_name_lock to be held already and will 349 * drop it before returning. Used to avoid races in establishing new handles 350 * when importing an object from either an flink name or a dma-buf. 351 * 352 * Handles must be release again through drm_gem_handle_delete(). This is done 353 * when userspace closes @file_priv for all attached handles, or through the 354 * GEM_CLOSE ioctl for individual handles. 355 */ 356 int 357 drm_gem_handle_create_tail(struct drm_file *file_priv, 358 struct drm_gem_object *obj, 359 u32 *handlep) 360 { 361 struct drm_device *dev = obj->dev; 362 u32 handle; 363 int ret; 364 365 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 366 if (obj->handle_count++ == 0) 367 drm_gem_object_get(obj); 368 369 /* 370 * Get the user-visible handle using idr. Preload and perform 371 * allocation under our spinlock. 372 */ 373 idr_preload(GFP_KERNEL); 374 spin_lock(&file_priv->table_lock); 375 376 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 377 378 spin_unlock(&file_priv->table_lock); 379 idr_preload_end(); 380 381 mutex_unlock(&dev->object_name_lock); 382 if (ret < 0) 383 goto err_unref; 384 385 handle = ret; 386 387 ret = drm_vma_node_allow(&obj->vma_node, file_priv); 388 if (ret) 389 goto err_remove; 390 391 if (obj->funcs->open) { 392 ret = obj->funcs->open(obj, file_priv); 393 if (ret) 394 goto err_revoke; 395 } 396 397 *handlep = handle; 398 return 0; 399 400 err_revoke: 401 drm_vma_node_revoke(&obj->vma_node, file_priv); 402 err_remove: 403 spin_lock(&file_priv->table_lock); 404 idr_remove(&file_priv->object_idr, handle); 405 spin_unlock(&file_priv->table_lock); 406 err_unref: 407 drm_gem_object_handle_put_unlocked(obj); 408 return ret; 409 } 410 411 /** 412 * drm_gem_handle_create - create a gem handle for an object 413 * @file_priv: drm file-private structure to register the handle for 414 * @obj: object to register 415 * @handlep: pointer to return the created handle to the caller 416 * 417 * Create a handle for this object. This adds a handle reference to the object, 418 * which includes a regular reference count. Callers will likely want to 419 * dereference the object afterwards. 420 * 421 * Since this publishes @obj to userspace it must be fully set up by this point, 422 * drivers must call this last in their buffer object creation callbacks. 423 */ 424 int drm_gem_handle_create(struct drm_file *file_priv, 425 struct drm_gem_object *obj, 426 u32 *handlep) 427 { 428 mutex_lock(&obj->dev->object_name_lock); 429 430 return drm_gem_handle_create_tail(file_priv, obj, handlep); 431 } 432 EXPORT_SYMBOL(drm_gem_handle_create); 433 434 435 /** 436 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 437 * @obj: obj in question 438 * 439 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 440 * 441 * Note that drm_gem_object_release() already calls this function, so drivers 442 * don't have to take care of releasing the mmap offset themselves when freeing 443 * the GEM object. 444 */ 445 void 446 drm_gem_free_mmap_offset(struct drm_gem_object *obj) 447 { 448 struct drm_device *dev = obj->dev; 449 450 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); 451 } 452 EXPORT_SYMBOL(drm_gem_free_mmap_offset); 453 454 /** 455 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 456 * @obj: obj in question 457 * @size: the virtual size 458 * 459 * GEM memory mapping works by handing back to userspace a fake mmap offset 460 * it can use in a subsequent mmap(2) call. The DRM core code then looks 461 * up the object based on the offset and sets up the various memory mapping 462 * structures. 463 * 464 * This routine allocates and attaches a fake offset for @obj, in cases where 465 * the virtual size differs from the physical size (ie. &drm_gem_object.size). 466 * Otherwise just use drm_gem_create_mmap_offset(). 467 * 468 * This function is idempotent and handles an already allocated mmap offset 469 * transparently. Drivers do not need to check for this case. 470 */ 471 int 472 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 473 { 474 struct drm_device *dev = obj->dev; 475 476 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, 477 size / PAGE_SIZE); 478 } 479 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 480 481 /** 482 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 483 * @obj: obj in question 484 * 485 * GEM memory mapping works by handing back to userspace a fake mmap offset 486 * it can use in a subsequent mmap(2) call. The DRM core code then looks 487 * up the object based on the offset and sets up the various memory mapping 488 * structures. 489 * 490 * This routine allocates and attaches a fake offset for @obj. 491 * 492 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release 493 * the fake offset again. 494 */ 495 int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 496 { 497 return drm_gem_create_mmap_offset_size(obj, obj->size); 498 } 499 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 500 501 /* 502 * Move folios to appropriate lru and release the folios, decrementing the 503 * ref count of those folios. 504 */ 505 static void drm_gem_check_release_batch(struct folio_batch *fbatch) 506 { 507 check_move_unevictable_folios(fbatch); 508 __folio_batch_release(fbatch); 509 cond_resched(); 510 } 511 512 /** 513 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 514 * from shmem 515 * @obj: obj in question 516 * 517 * This reads the page-array of the shmem-backing storage of the given gem 518 * object. An array of pages is returned. If a page is not allocated or 519 * swapped-out, this will allocate/swap-in the required pages. Note that the 520 * whole object is covered by the page-array and pinned in memory. 521 * 522 * Use drm_gem_put_pages() to release the array and unpin all pages. 523 * 524 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). 525 * If you require other GFP-masks, you have to do those allocations yourself. 526 * 527 * Note that you are not allowed to change gfp-zones during runtime. That is, 528 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as 529 * set during initialization. If you have special zone constraints, set them 530 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care 531 * to keep pages in the required zone during swap-in. 532 * 533 * This function is only valid on objects initialized with 534 * drm_gem_object_init(), but not for those initialized with 535 * drm_gem_private_object_init() only. 536 */ 537 struct page **drm_gem_get_pages(struct drm_gem_object *obj) 538 { 539 struct address_space *mapping; 540 struct page **pages; 541 struct folio *folio; 542 struct folio_batch fbatch; 543 long i, j, npages; 544 545 if (WARN_ON(!obj->filp)) 546 return ERR_PTR(-EINVAL); 547 548 /* This is the shared memory object that backs the GEM resource */ 549 mapping = obj->filp->f_mapping; 550 551 /* We already BUG_ON() for non-page-aligned sizes in 552 * drm_gem_object_init(), so we should never hit this unless 553 * driver author is doing something really wrong: 554 */ 555 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 556 557 npages = obj->size >> PAGE_SHIFT; 558 559 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 560 if (pages == NULL) 561 return ERR_PTR(-ENOMEM); 562 563 mapping_set_unevictable(mapping); 564 565 i = 0; 566 while (i < npages) { 567 long nr; 568 folio = shmem_read_folio_gfp(mapping, i, 569 mapping_gfp_mask(mapping)); 570 if (IS_ERR(folio)) 571 goto fail; 572 nr = min(npages - i, folio_nr_pages(folio)); 573 for (j = 0; j < nr; j++, i++) 574 pages[i] = folio_file_page(folio, i); 575 576 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the 577 * correct region during swapin. Note that this requires 578 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 579 * so shmem can relocate pages during swapin if required. 580 */ 581 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && 582 (folio_pfn(folio) >= 0x00100000UL)); 583 } 584 585 return pages; 586 587 fail: 588 mapping_clear_unevictable(mapping); 589 folio_batch_init(&fbatch); 590 j = 0; 591 while (j < i) { 592 struct folio *f = page_folio(pages[j]); 593 if (!folio_batch_add(&fbatch, f)) 594 drm_gem_check_release_batch(&fbatch); 595 j += folio_nr_pages(f); 596 } 597 if (fbatch.nr) 598 drm_gem_check_release_batch(&fbatch); 599 600 kvfree(pages); 601 return ERR_CAST(folio); 602 } 603 EXPORT_SYMBOL(drm_gem_get_pages); 604 605 /** 606 * drm_gem_put_pages - helper to free backing pages for a GEM object 607 * @obj: obj in question 608 * @pages: pages to free 609 * @dirty: if true, pages will be marked as dirty 610 * @accessed: if true, the pages will be marked as accessed 611 */ 612 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 613 bool dirty, bool accessed) 614 { 615 int i, npages; 616 struct address_space *mapping; 617 struct folio_batch fbatch; 618 619 mapping = file_inode(obj->filp)->i_mapping; 620 mapping_clear_unevictable(mapping); 621 622 /* We already BUG_ON() for non-page-aligned sizes in 623 * drm_gem_object_init(), so we should never hit this unless 624 * driver author is doing something really wrong: 625 */ 626 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 627 628 npages = obj->size >> PAGE_SHIFT; 629 630 folio_batch_init(&fbatch); 631 for (i = 0; i < npages; i++) { 632 struct folio *folio; 633 634 if (!pages[i]) 635 continue; 636 folio = page_folio(pages[i]); 637 638 if (dirty) 639 folio_mark_dirty(folio); 640 641 if (accessed) 642 folio_mark_accessed(folio); 643 644 /* Undo the reference we took when populating the table */ 645 if (!folio_batch_add(&fbatch, folio)) 646 drm_gem_check_release_batch(&fbatch); 647 i += folio_nr_pages(folio) - 1; 648 } 649 if (folio_batch_count(&fbatch)) 650 drm_gem_check_release_batch(&fbatch); 651 652 kvfree(pages); 653 } 654 EXPORT_SYMBOL(drm_gem_put_pages); 655 656 static int objects_lookup(struct drm_file *filp, u32 *handle, int count, 657 struct drm_gem_object **objs) 658 { 659 int i, ret = 0; 660 struct drm_gem_object *obj; 661 662 spin_lock(&filp->table_lock); 663 664 for (i = 0; i < count; i++) { 665 /* Check if we currently have a reference on the object */ 666 obj = idr_find(&filp->object_idr, handle[i]); 667 if (!obj) { 668 ret = -ENOENT; 669 break; 670 } 671 drm_gem_object_get(obj); 672 objs[i] = obj; 673 } 674 spin_unlock(&filp->table_lock); 675 676 return ret; 677 } 678 679 /** 680 * drm_gem_objects_lookup - look up GEM objects from an array of handles 681 * @filp: DRM file private date 682 * @bo_handles: user pointer to array of userspace handle 683 * @count: size of handle array 684 * @objs_out: returned pointer to array of drm_gem_object pointers 685 * 686 * Takes an array of userspace handles and returns a newly allocated array of 687 * GEM objects. 688 * 689 * For a single handle lookup, use drm_gem_object_lookup(). 690 * 691 * Returns: 692 * @objs filled in with GEM object pointers. Returned GEM objects need to be 693 * released with drm_gem_object_put(). -ENOENT is returned on a lookup 694 * failure. 0 is returned on success. 695 * 696 */ 697 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 698 int count, struct drm_gem_object ***objs_out) 699 { 700 int ret; 701 u32 *handles; 702 struct drm_gem_object **objs; 703 704 if (!count) 705 return 0; 706 707 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), 708 GFP_KERNEL | __GFP_ZERO); 709 if (!objs) 710 return -ENOMEM; 711 712 *objs_out = objs; 713 714 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); 715 if (!handles) { 716 ret = -ENOMEM; 717 goto out; 718 } 719 720 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { 721 ret = -EFAULT; 722 DRM_DEBUG("Failed to copy in GEM handles\n"); 723 goto out; 724 } 725 726 ret = objects_lookup(filp, handles, count, objs); 727 out: 728 kvfree(handles); 729 return ret; 730 731 } 732 EXPORT_SYMBOL(drm_gem_objects_lookup); 733 734 /** 735 * drm_gem_object_lookup - look up a GEM object from its handle 736 * @filp: DRM file private date 737 * @handle: userspace handle 738 * 739 * If looking up an array of handles, use drm_gem_objects_lookup(). 740 * 741 * Returns: 742 * A reference to the object named by the handle if such exists on @filp, NULL 743 * otherwise. 744 */ 745 struct drm_gem_object * 746 drm_gem_object_lookup(struct drm_file *filp, u32 handle) 747 { 748 struct drm_gem_object *obj = NULL; 749 750 objects_lookup(filp, &handle, 1, &obj); 751 return obj; 752 } 753 EXPORT_SYMBOL(drm_gem_object_lookup); 754 755 /** 756 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects 757 * shared and/or exclusive fences. 758 * @filep: DRM file private date 759 * @handle: userspace handle 760 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 761 * @timeout: timeout value in jiffies or zero to return immediately 762 * 763 * Returns: 764 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 765 * greater than 0 on success. 766 */ 767 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 768 bool wait_all, unsigned long timeout) 769 { 770 long ret; 771 struct drm_gem_object *obj; 772 773 obj = drm_gem_object_lookup(filep, handle); 774 if (!obj) { 775 DRM_DEBUG("Failed to look up GEM BO %d\n", handle); 776 return -EINVAL; 777 } 778 779 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), 780 true, timeout); 781 if (ret == 0) 782 ret = -ETIME; 783 else if (ret > 0) 784 ret = 0; 785 786 drm_gem_object_put(obj); 787 788 return ret; 789 } 790 EXPORT_SYMBOL(drm_gem_dma_resv_wait); 791 792 /** 793 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl 794 * @dev: drm_device 795 * @data: ioctl data 796 * @file_priv: drm file-private structure 797 * 798 * Releases the handle to an mm object. 799 */ 800 int 801 drm_gem_close_ioctl(struct drm_device *dev, void *data, 802 struct drm_file *file_priv) 803 { 804 struct drm_gem_close *args = data; 805 int ret; 806 807 if (!drm_core_check_feature(dev, DRIVER_GEM)) 808 return -EOPNOTSUPP; 809 810 ret = drm_gem_handle_delete(file_priv, args->handle); 811 812 return ret; 813 } 814 815 /** 816 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl 817 * @dev: drm_device 818 * @data: ioctl data 819 * @file_priv: drm file-private structure 820 * 821 * Create a global name for an object, returning the name. 822 * 823 * Note that the name does not hold a reference; when the object 824 * is freed, the name goes away. 825 */ 826 int 827 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 828 struct drm_file *file_priv) 829 { 830 struct drm_gem_flink *args = data; 831 struct drm_gem_object *obj; 832 int ret; 833 834 if (!drm_core_check_feature(dev, DRIVER_GEM)) 835 return -EOPNOTSUPP; 836 837 obj = drm_gem_object_lookup(file_priv, args->handle); 838 if (obj == NULL) 839 return -ENOENT; 840 841 mutex_lock(&dev->object_name_lock); 842 /* prevent races with concurrent gem_close. */ 843 if (obj->handle_count == 0) { 844 ret = -ENOENT; 845 goto err; 846 } 847 848 if (!obj->name) { 849 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); 850 if (ret < 0) 851 goto err; 852 853 obj->name = ret; 854 } 855 856 args->name = (uint64_t) obj->name; 857 ret = 0; 858 859 err: 860 mutex_unlock(&dev->object_name_lock); 861 drm_gem_object_put(obj); 862 return ret; 863 } 864 865 /** 866 * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl 867 * @dev: drm_device 868 * @data: ioctl data 869 * @file_priv: drm file-private structure 870 * 871 * Open an object using the global name, returning a handle and the size. 872 * 873 * This handle (of course) holds a reference to the object, so the object 874 * will not go away until the handle is deleted. 875 */ 876 int 877 drm_gem_open_ioctl(struct drm_device *dev, void *data, 878 struct drm_file *file_priv) 879 { 880 struct drm_gem_open *args = data; 881 struct drm_gem_object *obj; 882 int ret; 883 u32 handle; 884 885 if (!drm_core_check_feature(dev, DRIVER_GEM)) 886 return -EOPNOTSUPP; 887 888 mutex_lock(&dev->object_name_lock); 889 obj = idr_find(&dev->object_name_idr, (int) args->name); 890 if (obj) { 891 drm_gem_object_get(obj); 892 } else { 893 mutex_unlock(&dev->object_name_lock); 894 return -ENOENT; 895 } 896 897 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 898 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 899 if (ret) 900 goto err; 901 902 args->handle = handle; 903 args->size = obj->size; 904 905 err: 906 drm_gem_object_put(obj); 907 return ret; 908 } 909 910 /** 911 * drm_gem_open - initializes GEM file-private structures at devnode open time 912 * @dev: drm_device which is being opened by userspace 913 * @file_private: drm file-private structure to set up 914 * 915 * Called at device open time, sets up the structure for handling refcounting 916 * of mm objects. 917 */ 918 void 919 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 920 { 921 idr_init_base(&file_private->object_idr, 1); 922 spin_lock_init(&file_private->table_lock); 923 } 924 925 /** 926 * drm_gem_release - release file-private GEM resources 927 * @dev: drm_device which is being closed by userspace 928 * @file_private: drm file-private structure to clean up 929 * 930 * Called at close time when the filp is going away. 931 * 932 * Releases any remaining references on objects by this filp. 933 */ 934 void 935 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 936 { 937 idr_for_each(&file_private->object_idr, 938 &drm_gem_object_release_handle, file_private); 939 idr_destroy(&file_private->object_idr); 940 } 941 942 /** 943 * drm_gem_object_release - release GEM buffer object resources 944 * @obj: GEM buffer object 945 * 946 * This releases any structures and resources used by @obj and is the inverse of 947 * drm_gem_object_init(). 948 */ 949 void 950 drm_gem_object_release(struct drm_gem_object *obj) 951 { 952 if (obj->filp) 953 fput(obj->filp); 954 955 drm_gem_private_object_fini(obj); 956 957 drm_gem_free_mmap_offset(obj); 958 drm_gem_lru_remove(obj); 959 } 960 EXPORT_SYMBOL(drm_gem_object_release); 961 962 /** 963 * drm_gem_object_free - free a GEM object 964 * @kref: kref of the object to free 965 * 966 * Called after the last reference to the object has been lost. 967 * 968 * Frees the object 969 */ 970 void 971 drm_gem_object_free(struct kref *kref) 972 { 973 struct drm_gem_object *obj = 974 container_of(kref, struct drm_gem_object, refcount); 975 976 if (WARN_ON(!obj->funcs->free)) 977 return; 978 979 obj->funcs->free(obj); 980 } 981 EXPORT_SYMBOL(drm_gem_object_free); 982 983 /** 984 * drm_gem_vm_open - vma->ops->open implementation for GEM 985 * @vma: VM area structure 986 * 987 * This function implements the #vm_operations_struct open() callback for GEM 988 * drivers. This must be used together with drm_gem_vm_close(). 989 */ 990 void drm_gem_vm_open(struct vm_area_struct *vma) 991 { 992 struct drm_gem_object *obj = vma->vm_private_data; 993 994 drm_gem_object_get(obj); 995 } 996 EXPORT_SYMBOL(drm_gem_vm_open); 997 998 /** 999 * drm_gem_vm_close - vma->ops->close implementation for GEM 1000 * @vma: VM area structure 1001 * 1002 * This function implements the #vm_operations_struct close() callback for GEM 1003 * drivers. This must be used together with drm_gem_vm_open(). 1004 */ 1005 void drm_gem_vm_close(struct vm_area_struct *vma) 1006 { 1007 struct drm_gem_object *obj = vma->vm_private_data; 1008 1009 drm_gem_object_put(obj); 1010 } 1011 EXPORT_SYMBOL(drm_gem_vm_close); 1012 1013 /** 1014 * drm_gem_mmap_obj - memory map a GEM object 1015 * @obj: the GEM object to map 1016 * @obj_size: the object size to be mapped, in bytes 1017 * @vma: VMA for the area to be mapped 1018 * 1019 * Set up the VMA to prepare mapping of the GEM object using the GEM object's 1020 * vm_ops. Depending on their requirements, GEM objects can either 1021 * provide a fault handler in their vm_ops (in which case any accesses to 1022 * the object will be trapped, to perform migration, GTT binding, surface 1023 * register allocation, or performance monitoring), or mmap the buffer memory 1024 * synchronously after calling drm_gem_mmap_obj. 1025 * 1026 * This function is mainly intended to implement the DMABUF mmap operation, when 1027 * the GEM object is not looked up based on its fake offset. To implement the 1028 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 1029 * 1030 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while 1031 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 1032 * callers must verify access restrictions before calling this helper. 1033 * 1034 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 1035 * size, or if no vm_ops are provided. 1036 */ 1037 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 1038 struct vm_area_struct *vma) 1039 { 1040 int ret; 1041 1042 /* Check for valid size. */ 1043 if (obj_size < vma->vm_end - vma->vm_start) 1044 return -EINVAL; 1045 1046 /* Take a ref for this mapping of the object, so that the fault 1047 * handler can dereference the mmap offset's pointer to the object. 1048 * This reference is cleaned up by the corresponding vm_close 1049 * (which should happen whether the vma was created by this call, or 1050 * by a vm_open due to mremap or partial unmap or whatever). 1051 */ 1052 drm_gem_object_get(obj); 1053 1054 vma->vm_private_data = obj; 1055 vma->vm_ops = obj->funcs->vm_ops; 1056 1057 if (obj->funcs->mmap) { 1058 ret = obj->funcs->mmap(obj, vma); 1059 if (ret) 1060 goto err_drm_gem_object_put; 1061 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); 1062 } else { 1063 if (!vma->vm_ops) { 1064 ret = -EINVAL; 1065 goto err_drm_gem_object_put; 1066 } 1067 1068 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); 1069 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1070 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1071 } 1072 1073 return 0; 1074 1075 err_drm_gem_object_put: 1076 drm_gem_object_put(obj); 1077 return ret; 1078 } 1079 EXPORT_SYMBOL(drm_gem_mmap_obj); 1080 1081 /** 1082 * drm_gem_mmap - memory map routine for GEM objects 1083 * @filp: DRM file pointer 1084 * @vma: VMA for the area to be mapped 1085 * 1086 * If a driver supports GEM object mapping, mmap calls on the DRM file 1087 * descriptor will end up here. 1088 * 1089 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 1090 * contain the fake offset we created when the GTT map ioctl was called on 1091 * the object) and map it with a call to drm_gem_mmap_obj(). 1092 * 1093 * If the caller is not granted access to the buffer object, the mmap will fail 1094 * with EACCES. Please see the vma manager for more information. 1095 */ 1096 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 1097 { 1098 struct drm_file *priv = filp->private_data; 1099 struct drm_device *dev = priv->minor->dev; 1100 struct drm_gem_object *obj = NULL; 1101 struct drm_vma_offset_node *node; 1102 int ret; 1103 1104 if (drm_dev_is_unplugged(dev)) 1105 return -ENODEV; 1106 1107 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1108 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1109 vma->vm_pgoff, 1110 vma_pages(vma)); 1111 if (likely(node)) { 1112 obj = container_of(node, struct drm_gem_object, vma_node); 1113 /* 1114 * When the object is being freed, after it hits 0-refcnt it 1115 * proceeds to tear down the object. In the process it will 1116 * attempt to remove the VMA offset and so acquire this 1117 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 1118 * that matches our range, we know it is in the process of being 1119 * destroyed and will be freed as soon as we release the lock - 1120 * so we have to check for the 0-refcnted object and treat it as 1121 * invalid. 1122 */ 1123 if (!kref_get_unless_zero(&obj->refcount)) 1124 obj = NULL; 1125 } 1126 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1127 1128 if (!obj) 1129 return -EINVAL; 1130 1131 if (!drm_vma_node_is_allowed(node, priv)) { 1132 drm_gem_object_put(obj); 1133 return -EACCES; 1134 } 1135 1136 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, 1137 vma); 1138 1139 drm_gem_object_put(obj); 1140 1141 return ret; 1142 } 1143 EXPORT_SYMBOL(drm_gem_mmap); 1144 1145 void drm_gem_print_info(struct drm_printer *p, unsigned int indent, 1146 const struct drm_gem_object *obj) 1147 { 1148 drm_printf_indent(p, indent, "name=%d\n", obj->name); 1149 drm_printf_indent(p, indent, "refcount=%u\n", 1150 kref_read(&obj->refcount)); 1151 drm_printf_indent(p, indent, "start=%08lx\n", 1152 drm_vma_node_start(&obj->vma_node)); 1153 drm_printf_indent(p, indent, "size=%zu\n", obj->size); 1154 drm_printf_indent(p, indent, "imported=%s\n", 1155 str_yes_no(obj->import_attach)); 1156 1157 if (obj->funcs->print_info) 1158 obj->funcs->print_info(p, indent, obj); 1159 } 1160 1161 int drm_gem_pin_locked(struct drm_gem_object *obj) 1162 { 1163 if (obj->funcs->pin) 1164 return obj->funcs->pin(obj); 1165 1166 return 0; 1167 } 1168 1169 void drm_gem_unpin_locked(struct drm_gem_object *obj) 1170 { 1171 if (obj->funcs->unpin) 1172 obj->funcs->unpin(obj); 1173 } 1174 1175 int drm_gem_pin(struct drm_gem_object *obj) 1176 { 1177 int ret; 1178 1179 dma_resv_lock(obj->resv, NULL); 1180 ret = drm_gem_pin_locked(obj); 1181 dma_resv_unlock(obj->resv); 1182 1183 return ret; 1184 } 1185 1186 void drm_gem_unpin(struct drm_gem_object *obj) 1187 { 1188 dma_resv_lock(obj->resv, NULL); 1189 drm_gem_unpin_locked(obj); 1190 dma_resv_unlock(obj->resv); 1191 } 1192 1193 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) 1194 { 1195 int ret; 1196 1197 dma_resv_assert_held(obj->resv); 1198 1199 if (!obj->funcs->vmap) 1200 return -EOPNOTSUPP; 1201 1202 ret = obj->funcs->vmap(obj, map); 1203 if (ret) 1204 return ret; 1205 else if (iosys_map_is_null(map)) 1206 return -ENOMEM; 1207 1208 return 0; 1209 } 1210 EXPORT_SYMBOL(drm_gem_vmap); 1211 1212 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) 1213 { 1214 dma_resv_assert_held(obj->resv); 1215 1216 if (iosys_map_is_null(map)) 1217 return; 1218 1219 if (obj->funcs->vunmap) 1220 obj->funcs->vunmap(obj, map); 1221 1222 /* Always set the mapping to NULL. Callers may rely on this. */ 1223 iosys_map_clear(map); 1224 } 1225 EXPORT_SYMBOL(drm_gem_vunmap); 1226 1227 void drm_gem_lock(struct drm_gem_object *obj) 1228 { 1229 dma_resv_lock(obj->resv, NULL); 1230 } 1231 EXPORT_SYMBOL(drm_gem_lock); 1232 1233 void drm_gem_unlock(struct drm_gem_object *obj) 1234 { 1235 dma_resv_unlock(obj->resv); 1236 } 1237 EXPORT_SYMBOL(drm_gem_unlock); 1238 1239 int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map) 1240 { 1241 int ret; 1242 1243 dma_resv_lock(obj->resv, NULL); 1244 ret = drm_gem_vmap(obj, map); 1245 dma_resv_unlock(obj->resv); 1246 1247 return ret; 1248 } 1249 EXPORT_SYMBOL(drm_gem_vmap_unlocked); 1250 1251 void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map) 1252 { 1253 dma_resv_lock(obj->resv, NULL); 1254 drm_gem_vunmap(obj, map); 1255 dma_resv_unlock(obj->resv); 1256 } 1257 EXPORT_SYMBOL(drm_gem_vunmap_unlocked); 1258 1259 /** 1260 * drm_gem_lock_reservations - Sets up the ww context and acquires 1261 * the lock on an array of GEM objects. 1262 * 1263 * Once you've locked your reservations, you'll want to set up space 1264 * for your shared fences (if applicable), submit your job, then 1265 * drm_gem_unlock_reservations(). 1266 * 1267 * @objs: drm_gem_objects to lock 1268 * @count: Number of objects in @objs 1269 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as 1270 * part of tracking this set of locked reservations. 1271 */ 1272 int 1273 drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 1274 struct ww_acquire_ctx *acquire_ctx) 1275 { 1276 int contended = -1; 1277 int i, ret; 1278 1279 ww_acquire_init(acquire_ctx, &reservation_ww_class); 1280 1281 retry: 1282 if (contended != -1) { 1283 struct drm_gem_object *obj = objs[contended]; 1284 1285 ret = dma_resv_lock_slow_interruptible(obj->resv, 1286 acquire_ctx); 1287 if (ret) { 1288 ww_acquire_fini(acquire_ctx); 1289 return ret; 1290 } 1291 } 1292 1293 for (i = 0; i < count; i++) { 1294 if (i == contended) 1295 continue; 1296 1297 ret = dma_resv_lock_interruptible(objs[i]->resv, 1298 acquire_ctx); 1299 if (ret) { 1300 int j; 1301 1302 for (j = 0; j < i; j++) 1303 dma_resv_unlock(objs[j]->resv); 1304 1305 if (contended != -1 && contended >= i) 1306 dma_resv_unlock(objs[contended]->resv); 1307 1308 if (ret == -EDEADLK) { 1309 contended = i; 1310 goto retry; 1311 } 1312 1313 ww_acquire_fini(acquire_ctx); 1314 return ret; 1315 } 1316 } 1317 1318 ww_acquire_done(acquire_ctx); 1319 1320 return 0; 1321 } 1322 EXPORT_SYMBOL(drm_gem_lock_reservations); 1323 1324 void 1325 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, 1326 struct ww_acquire_ctx *acquire_ctx) 1327 { 1328 int i; 1329 1330 for (i = 0; i < count; i++) 1331 dma_resv_unlock(objs[i]->resv); 1332 1333 ww_acquire_fini(acquire_ctx); 1334 } 1335 EXPORT_SYMBOL(drm_gem_unlock_reservations); 1336 1337 /** 1338 * drm_gem_lru_init - initialize a LRU 1339 * 1340 * @lru: The LRU to initialize 1341 * @lock: The lock protecting the LRU 1342 */ 1343 void 1344 drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock) 1345 { 1346 lru->lock = lock; 1347 lru->count = 0; 1348 INIT_LIST_HEAD(&lru->list); 1349 } 1350 EXPORT_SYMBOL(drm_gem_lru_init); 1351 1352 static void 1353 drm_gem_lru_remove_locked(struct drm_gem_object *obj) 1354 { 1355 obj->lru->count -= obj->size >> PAGE_SHIFT; 1356 WARN_ON(obj->lru->count < 0); 1357 list_del(&obj->lru_node); 1358 obj->lru = NULL; 1359 } 1360 1361 /** 1362 * drm_gem_lru_remove - remove object from whatever LRU it is in 1363 * 1364 * If the object is currently in any LRU, remove it. 1365 * 1366 * @obj: The GEM object to remove from current LRU 1367 */ 1368 void 1369 drm_gem_lru_remove(struct drm_gem_object *obj) 1370 { 1371 struct drm_gem_lru *lru = obj->lru; 1372 1373 if (!lru) 1374 return; 1375 1376 mutex_lock(lru->lock); 1377 drm_gem_lru_remove_locked(obj); 1378 mutex_unlock(lru->lock); 1379 } 1380 EXPORT_SYMBOL(drm_gem_lru_remove); 1381 1382 /** 1383 * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU 1384 * 1385 * Like &drm_gem_lru_move_tail but lru lock must be held 1386 * 1387 * @lru: The LRU to move the object into. 1388 * @obj: The GEM object to move into this LRU 1389 */ 1390 void 1391 drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj) 1392 { 1393 lockdep_assert_held_once(lru->lock); 1394 1395 if (obj->lru) 1396 drm_gem_lru_remove_locked(obj); 1397 1398 lru->count += obj->size >> PAGE_SHIFT; 1399 list_add_tail(&obj->lru_node, &lru->list); 1400 obj->lru = lru; 1401 } 1402 EXPORT_SYMBOL(drm_gem_lru_move_tail_locked); 1403 1404 /** 1405 * drm_gem_lru_move_tail - move the object to the tail of the LRU 1406 * 1407 * If the object is already in this LRU it will be moved to the 1408 * tail. Otherwise it will be removed from whichever other LRU 1409 * it is in (if any) and moved into this LRU. 1410 * 1411 * @lru: The LRU to move the object into. 1412 * @obj: The GEM object to move into this LRU 1413 */ 1414 void 1415 drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj) 1416 { 1417 mutex_lock(lru->lock); 1418 drm_gem_lru_move_tail_locked(lru, obj); 1419 mutex_unlock(lru->lock); 1420 } 1421 EXPORT_SYMBOL(drm_gem_lru_move_tail); 1422 1423 /** 1424 * drm_gem_lru_scan - helper to implement shrinker.scan_objects 1425 * 1426 * If the shrink callback succeeds, it is expected that the driver 1427 * move the object out of this LRU. 1428 * 1429 * If the LRU possibly contain active buffers, it is the responsibility 1430 * of the shrink callback to check for this (ie. dma_resv_test_signaled()) 1431 * or if necessary block until the buffer becomes idle. 1432 * 1433 * @lru: The LRU to scan 1434 * @nr_to_scan: The number of pages to try to reclaim 1435 * @remaining: The number of pages left to reclaim, should be initialized by caller 1436 * @shrink: Callback to try to shrink/reclaim the object. 1437 */ 1438 unsigned long 1439 drm_gem_lru_scan(struct drm_gem_lru *lru, 1440 unsigned int nr_to_scan, 1441 unsigned long *remaining, 1442 bool (*shrink)(struct drm_gem_object *obj)) 1443 { 1444 struct drm_gem_lru still_in_lru; 1445 struct drm_gem_object *obj; 1446 unsigned freed = 0; 1447 1448 drm_gem_lru_init(&still_in_lru, lru->lock); 1449 1450 mutex_lock(lru->lock); 1451 1452 while (freed < nr_to_scan) { 1453 obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node); 1454 1455 if (!obj) 1456 break; 1457 1458 drm_gem_lru_move_tail_locked(&still_in_lru, obj); 1459 1460 /* 1461 * If it's in the process of being freed, gem_object->free() 1462 * may be blocked on lock waiting to remove it. So just 1463 * skip it. 1464 */ 1465 if (!kref_get_unless_zero(&obj->refcount)) 1466 continue; 1467 1468 /* 1469 * Now that we own a reference, we can drop the lock for the 1470 * rest of the loop body, to reduce contention with other 1471 * code paths that need the LRU lock 1472 */ 1473 mutex_unlock(lru->lock); 1474 1475 /* 1476 * Note that this still needs to be trylock, since we can 1477 * hit shrinker in response to trying to get backing pages 1478 * for this obj (ie. while it's lock is already held) 1479 */ 1480 if (!dma_resv_trylock(obj->resv)) { 1481 *remaining += obj->size >> PAGE_SHIFT; 1482 goto tail; 1483 } 1484 1485 if (shrink(obj)) { 1486 freed += obj->size >> PAGE_SHIFT; 1487 1488 /* 1489 * If we succeeded in releasing the object's backing 1490 * pages, we expect the driver to have moved the object 1491 * out of this LRU 1492 */ 1493 WARN_ON(obj->lru == &still_in_lru); 1494 WARN_ON(obj->lru == lru); 1495 } 1496 1497 dma_resv_unlock(obj->resv); 1498 1499 tail: 1500 drm_gem_object_put(obj); 1501 mutex_lock(lru->lock); 1502 } 1503 1504 /* 1505 * Move objects we've skipped over out of the temporary still_in_lru 1506 * back into this LRU 1507 */ 1508 list_for_each_entry (obj, &still_in_lru.list, lru_node) 1509 obj->lru = lru; 1510 list_splice_tail(&still_in_lru.list, &lru->list); 1511 lru->count += still_in_lru.count; 1512 1513 mutex_unlock(lru->lock); 1514 1515 return freed; 1516 } 1517 EXPORT_SYMBOL(drm_gem_lru_scan); 1518 1519 /** 1520 * drm_gem_evict - helper to evict backing pages for a GEM object 1521 * @obj: obj in question 1522 */ 1523 int drm_gem_evict(struct drm_gem_object *obj) 1524 { 1525 dma_resv_assert_held(obj->resv); 1526 1527 if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)) 1528 return -EBUSY; 1529 1530 if (obj->funcs->evict) 1531 return obj->funcs->evict(obj); 1532 1533 return 0; 1534 } 1535 EXPORT_SYMBOL(drm_gem_evict); 1536