1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/dma-buf.h> 29 #include <linux/file.h> 30 #include <linux/fs.h> 31 #include <linux/iosys-map.h> 32 #include <linux/mem_encrypt.h> 33 #include <linux/mm.h> 34 #include <linux/mman.h> 35 #include <linux/module.h> 36 #include <linux/pagemap.h> 37 #include <linux/pagevec.h> 38 #include <linux/shmem_fs.h> 39 #include <linux/slab.h> 40 #include <linux/string_helpers.h> 41 #include <linux/types.h> 42 #include <linux/uaccess.h> 43 44 #include <drm/drm.h> 45 #include <drm/drm_device.h> 46 #include <drm/drm_drv.h> 47 #include <drm/drm_file.h> 48 #include <drm/drm_gem.h> 49 #include <drm/drm_managed.h> 50 #include <drm/drm_print.h> 51 #include <drm/drm_vma_manager.h> 52 53 #include "drm_internal.h" 54 55 /** @file drm_gem.c 56 * 57 * This file provides some of the base ioctls and library routines for 58 * the graphics memory manager implemented by each device driver. 59 * 60 * Because various devices have different requirements in terms of 61 * synchronization and migration strategies, implementing that is left up to 62 * the driver, and all that the general API provides should be generic -- 63 * allocating objects, reading/writing data with the cpu, freeing objects. 64 * Even there, platform-dependent optimizations for reading/writing data with 65 * the CPU mean we'll likely hook those out to driver-specific calls. However, 66 * the DRI2 implementation wants to have at least allocate/mmap be generic. 67 * 68 * The goal was to have swap-backed object allocation managed through 69 * struct file. However, file descriptors as handles to a struct file have 70 * two major failings: 71 * - Process limits prevent more than 1024 or so being used at a time by 72 * default. 73 * - Inability to allocate high fds will aggravate the X Server's select() 74 * handling, and likely that of many GL client applications as well. 75 * 76 * This led to a plan of using our own integer IDs (called handles, following 77 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 78 * ioctls. The objects themselves will still include the struct file so 79 * that we can transition to fds if the required kernel infrastructure shows 80 * up at a later date, and as our interface with shmfs for memory allocation. 81 */ 82 83 static void 84 drm_gem_init_release(struct drm_device *dev, void *ptr) 85 { 86 drm_vma_offset_manager_destroy(dev->vma_offset_manager); 87 } 88 89 /** 90 * drm_gem_init - Initialize the GEM device fields 91 * @dev: drm_devic structure to initialize 92 */ 93 int 94 drm_gem_init(struct drm_device *dev) 95 { 96 struct drm_vma_offset_manager *vma_offset_manager; 97 98 mutex_init(&dev->object_name_lock); 99 idr_init_base(&dev->object_name_idr, 1); 100 101 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), 102 GFP_KERNEL); 103 if (!vma_offset_manager) { 104 DRM_ERROR("out of memory\n"); 105 return -ENOMEM; 106 } 107 108 dev->vma_offset_manager = vma_offset_manager; 109 drm_vma_offset_manager_init(vma_offset_manager, 110 DRM_FILE_PAGE_OFFSET_START, 111 DRM_FILE_PAGE_OFFSET_SIZE); 112 113 return drmm_add_action(dev, drm_gem_init_release, NULL); 114 } 115 116 /** 117 * drm_gem_object_init - initialize an allocated shmem-backed GEM object 118 * @dev: drm_device the object should be initialized for 119 * @obj: drm_gem_object to initialize 120 * @size: object size 121 * 122 * Initialize an already allocated GEM object of the specified size with 123 * shmfs backing store. 124 */ 125 int drm_gem_object_init(struct drm_device *dev, 126 struct drm_gem_object *obj, size_t size) 127 { 128 struct file *filp; 129 130 drm_gem_private_object_init(dev, obj, size); 131 132 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 133 if (IS_ERR(filp)) 134 return PTR_ERR(filp); 135 136 obj->filp = filp; 137 138 return 0; 139 } 140 EXPORT_SYMBOL(drm_gem_object_init); 141 142 /** 143 * drm_gem_private_object_init - initialize an allocated private GEM object 144 * @dev: drm_device the object should be initialized for 145 * @obj: drm_gem_object to initialize 146 * @size: object size 147 * 148 * Initialize an already allocated GEM object of the specified size with 149 * no GEM provided backing store. Instead the caller is responsible for 150 * backing the object and handling it. 151 */ 152 void drm_gem_private_object_init(struct drm_device *dev, 153 struct drm_gem_object *obj, size_t size) 154 { 155 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 156 157 obj->dev = dev; 158 obj->filp = NULL; 159 160 kref_init(&obj->refcount); 161 obj->handle_count = 0; 162 obj->size = size; 163 dma_resv_init(&obj->_resv); 164 if (!obj->resv) 165 obj->resv = &obj->_resv; 166 167 drm_vma_node_reset(&obj->vma_node); 168 INIT_LIST_HEAD(&obj->lru_node); 169 } 170 EXPORT_SYMBOL(drm_gem_private_object_init); 171 172 /** 173 * drm_gem_private_object_fini - Finalize a failed drm_gem_object 174 * @obj: drm_gem_object 175 * 176 * Uninitialize an already allocated GEM object when it initialized failed 177 */ 178 void drm_gem_private_object_fini(struct drm_gem_object *obj) 179 { 180 WARN_ON(obj->dma_buf); 181 182 dma_resv_fini(&obj->_resv); 183 } 184 EXPORT_SYMBOL(drm_gem_private_object_fini); 185 186 /** 187 * drm_gem_object_handle_free - release resources bound to userspace handles 188 * @obj: GEM object to clean up. 189 * 190 * Called after the last handle to the object has been closed 191 * 192 * Removes any name for the object. Note that this must be 193 * called before drm_gem_object_free or we'll be touching 194 * freed memory 195 */ 196 static void drm_gem_object_handle_free(struct drm_gem_object *obj) 197 { 198 struct drm_device *dev = obj->dev; 199 200 /* Remove any name for this object */ 201 if (obj->name) { 202 idr_remove(&dev->object_name_idr, obj->name); 203 obj->name = 0; 204 } 205 } 206 207 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) 208 { 209 /* Unbreak the reference cycle if we have an exported dma_buf. */ 210 if (obj->dma_buf) { 211 dma_buf_put(obj->dma_buf); 212 obj->dma_buf = NULL; 213 } 214 } 215 216 static void 217 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) 218 { 219 struct drm_device *dev = obj->dev; 220 bool final = false; 221 222 if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) 223 return; 224 225 /* 226 * Must bump handle count first as this may be the last 227 * ref, in which case the object would disappear before we 228 * checked for a name 229 */ 230 231 mutex_lock(&dev->object_name_lock); 232 if (--obj->handle_count == 0) { 233 drm_gem_object_handle_free(obj); 234 drm_gem_object_exported_dma_buf_free(obj); 235 final = true; 236 } 237 mutex_unlock(&dev->object_name_lock); 238 239 if (final) 240 drm_gem_object_put(obj); 241 } 242 243 /* 244 * Called at device or object close to release the file's 245 * handle references on objects. 246 */ 247 static int 248 drm_gem_object_release_handle(int id, void *ptr, void *data) 249 { 250 struct drm_file *file_priv = data; 251 struct drm_gem_object *obj = ptr; 252 253 if (obj->funcs->close) 254 obj->funcs->close(obj, file_priv); 255 256 drm_prime_remove_buf_handle(&file_priv->prime, id); 257 drm_vma_node_revoke(&obj->vma_node, file_priv); 258 259 drm_gem_object_handle_put_unlocked(obj); 260 261 return 0; 262 } 263 264 /** 265 * drm_gem_handle_delete - deletes the given file-private handle 266 * @filp: drm file-private structure to use for the handle look up 267 * @handle: userspace handle to delete 268 * 269 * Removes the GEM handle from the @filp lookup table which has been added with 270 * drm_gem_handle_create(). If this is the last handle also cleans up linked 271 * resources like GEM names. 272 */ 273 int 274 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 275 { 276 struct drm_gem_object *obj; 277 278 spin_lock(&filp->table_lock); 279 280 /* Check if we currently have a reference on the object */ 281 obj = idr_replace(&filp->object_idr, NULL, handle); 282 spin_unlock(&filp->table_lock); 283 if (IS_ERR_OR_NULL(obj)) 284 return -EINVAL; 285 286 /* Release driver's reference and decrement refcount. */ 287 drm_gem_object_release_handle(handle, obj, filp); 288 289 /* And finally make the handle available for future allocations. */ 290 spin_lock(&filp->table_lock); 291 idr_remove(&filp->object_idr, handle); 292 spin_unlock(&filp->table_lock); 293 294 return 0; 295 } 296 EXPORT_SYMBOL(drm_gem_handle_delete); 297 298 /** 299 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object 300 * @file: drm file-private structure containing the gem object 301 * @dev: corresponding drm_device 302 * @handle: gem object handle 303 * @offset: return location for the fake mmap offset 304 * 305 * This implements the &drm_driver.dumb_map_offset kms driver callback for 306 * drivers which use gem to manage their backing storage. 307 * 308 * Returns: 309 * 0 on success or a negative error code on failure. 310 */ 311 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 312 u32 handle, u64 *offset) 313 { 314 struct drm_gem_object *obj; 315 int ret; 316 317 obj = drm_gem_object_lookup(file, handle); 318 if (!obj) 319 return -ENOENT; 320 321 /* Don't allow imported objects to be mapped */ 322 if (obj->import_attach) { 323 ret = -EINVAL; 324 goto out; 325 } 326 327 ret = drm_gem_create_mmap_offset(obj); 328 if (ret) 329 goto out; 330 331 *offset = drm_vma_node_offset_addr(&obj->vma_node); 332 out: 333 drm_gem_object_put(obj); 334 335 return ret; 336 } 337 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); 338 339 /** 340 * drm_gem_handle_create_tail - internal functions to create a handle 341 * @file_priv: drm file-private structure to register the handle for 342 * @obj: object to register 343 * @handlep: pointer to return the created handle to the caller 344 * 345 * This expects the &drm_device.object_name_lock to be held already and will 346 * drop it before returning. Used to avoid races in establishing new handles 347 * when importing an object from either an flink name or a dma-buf. 348 * 349 * Handles must be release again through drm_gem_handle_delete(). This is done 350 * when userspace closes @file_priv for all attached handles, or through the 351 * GEM_CLOSE ioctl for individual handles. 352 */ 353 int 354 drm_gem_handle_create_tail(struct drm_file *file_priv, 355 struct drm_gem_object *obj, 356 u32 *handlep) 357 { 358 struct drm_device *dev = obj->dev; 359 u32 handle; 360 int ret; 361 362 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 363 if (obj->handle_count++ == 0) 364 drm_gem_object_get(obj); 365 366 /* 367 * Get the user-visible handle using idr. Preload and perform 368 * allocation under our spinlock. 369 */ 370 idr_preload(GFP_KERNEL); 371 spin_lock(&file_priv->table_lock); 372 373 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 374 375 spin_unlock(&file_priv->table_lock); 376 idr_preload_end(); 377 378 mutex_unlock(&dev->object_name_lock); 379 if (ret < 0) 380 goto err_unref; 381 382 handle = ret; 383 384 ret = drm_vma_node_allow(&obj->vma_node, file_priv); 385 if (ret) 386 goto err_remove; 387 388 if (obj->funcs->open) { 389 ret = obj->funcs->open(obj, file_priv); 390 if (ret) 391 goto err_revoke; 392 } 393 394 *handlep = handle; 395 return 0; 396 397 err_revoke: 398 drm_vma_node_revoke(&obj->vma_node, file_priv); 399 err_remove: 400 spin_lock(&file_priv->table_lock); 401 idr_remove(&file_priv->object_idr, handle); 402 spin_unlock(&file_priv->table_lock); 403 err_unref: 404 drm_gem_object_handle_put_unlocked(obj); 405 return ret; 406 } 407 408 /** 409 * drm_gem_handle_create - create a gem handle for an object 410 * @file_priv: drm file-private structure to register the handle for 411 * @obj: object to register 412 * @handlep: pointer to return the created handle to the caller 413 * 414 * Create a handle for this object. This adds a handle reference to the object, 415 * which includes a regular reference count. Callers will likely want to 416 * dereference the object afterwards. 417 * 418 * Since this publishes @obj to userspace it must be fully set up by this point, 419 * drivers must call this last in their buffer object creation callbacks. 420 */ 421 int drm_gem_handle_create(struct drm_file *file_priv, 422 struct drm_gem_object *obj, 423 u32 *handlep) 424 { 425 mutex_lock(&obj->dev->object_name_lock); 426 427 return drm_gem_handle_create_tail(file_priv, obj, handlep); 428 } 429 EXPORT_SYMBOL(drm_gem_handle_create); 430 431 432 /** 433 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 434 * @obj: obj in question 435 * 436 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 437 * 438 * Note that drm_gem_object_release() already calls this function, so drivers 439 * don't have to take care of releasing the mmap offset themselves when freeing 440 * the GEM object. 441 */ 442 void 443 drm_gem_free_mmap_offset(struct drm_gem_object *obj) 444 { 445 struct drm_device *dev = obj->dev; 446 447 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); 448 } 449 EXPORT_SYMBOL(drm_gem_free_mmap_offset); 450 451 /** 452 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 453 * @obj: obj in question 454 * @size: the virtual size 455 * 456 * GEM memory mapping works by handing back to userspace a fake mmap offset 457 * it can use in a subsequent mmap(2) call. The DRM core code then looks 458 * up the object based on the offset and sets up the various memory mapping 459 * structures. 460 * 461 * This routine allocates and attaches a fake offset for @obj, in cases where 462 * the virtual size differs from the physical size (ie. &drm_gem_object.size). 463 * Otherwise just use drm_gem_create_mmap_offset(). 464 * 465 * This function is idempotent and handles an already allocated mmap offset 466 * transparently. Drivers do not need to check for this case. 467 */ 468 int 469 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 470 { 471 struct drm_device *dev = obj->dev; 472 473 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, 474 size / PAGE_SIZE); 475 } 476 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 477 478 /** 479 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 480 * @obj: obj in question 481 * 482 * GEM memory mapping works by handing back to userspace a fake mmap offset 483 * it can use in a subsequent mmap(2) call. The DRM core code then looks 484 * up the object based on the offset and sets up the various memory mapping 485 * structures. 486 * 487 * This routine allocates and attaches a fake offset for @obj. 488 * 489 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release 490 * the fake offset again. 491 */ 492 int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 493 { 494 return drm_gem_create_mmap_offset_size(obj, obj->size); 495 } 496 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 497 498 /* 499 * Move pages to appropriate lru and release the pagevec, decrementing the 500 * ref count of those pages. 501 */ 502 static void drm_gem_check_release_pagevec(struct pagevec *pvec) 503 { 504 check_move_unevictable_pages(pvec); 505 __pagevec_release(pvec); 506 cond_resched(); 507 } 508 509 /** 510 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 511 * from shmem 512 * @obj: obj in question 513 * 514 * This reads the page-array of the shmem-backing storage of the given gem 515 * object. An array of pages is returned. If a page is not allocated or 516 * swapped-out, this will allocate/swap-in the required pages. Note that the 517 * whole object is covered by the page-array and pinned in memory. 518 * 519 * Use drm_gem_put_pages() to release the array and unpin all pages. 520 * 521 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). 522 * If you require other GFP-masks, you have to do those allocations yourself. 523 * 524 * Note that you are not allowed to change gfp-zones during runtime. That is, 525 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as 526 * set during initialization. If you have special zone constraints, set them 527 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care 528 * to keep pages in the required zone during swap-in. 529 * 530 * This function is only valid on objects initialized with 531 * drm_gem_object_init(), but not for those initialized with 532 * drm_gem_private_object_init() only. 533 */ 534 struct page **drm_gem_get_pages(struct drm_gem_object *obj) 535 { 536 struct address_space *mapping; 537 struct page *p, **pages; 538 struct pagevec pvec; 539 int i, npages; 540 541 542 if (WARN_ON(!obj->filp)) 543 return ERR_PTR(-EINVAL); 544 545 /* This is the shared memory object that backs the GEM resource */ 546 mapping = obj->filp->f_mapping; 547 548 /* We already BUG_ON() for non-page-aligned sizes in 549 * drm_gem_object_init(), so we should never hit this unless 550 * driver author is doing something really wrong: 551 */ 552 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 553 554 npages = obj->size >> PAGE_SHIFT; 555 556 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 557 if (pages == NULL) 558 return ERR_PTR(-ENOMEM); 559 560 mapping_set_unevictable(mapping); 561 562 for (i = 0; i < npages; i++) { 563 p = shmem_read_mapping_page(mapping, i); 564 if (IS_ERR(p)) 565 goto fail; 566 pages[i] = p; 567 568 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the 569 * correct region during swapin. Note that this requires 570 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 571 * so shmem can relocate pages during swapin if required. 572 */ 573 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && 574 (page_to_pfn(p) >= 0x00100000UL)); 575 } 576 577 return pages; 578 579 fail: 580 mapping_clear_unevictable(mapping); 581 pagevec_init(&pvec); 582 while (i--) { 583 if (!pagevec_add(&pvec, pages[i])) 584 drm_gem_check_release_pagevec(&pvec); 585 } 586 if (pagevec_count(&pvec)) 587 drm_gem_check_release_pagevec(&pvec); 588 589 kvfree(pages); 590 return ERR_CAST(p); 591 } 592 EXPORT_SYMBOL(drm_gem_get_pages); 593 594 /** 595 * drm_gem_put_pages - helper to free backing pages for a GEM object 596 * @obj: obj in question 597 * @pages: pages to free 598 * @dirty: if true, pages will be marked as dirty 599 * @accessed: if true, the pages will be marked as accessed 600 */ 601 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 602 bool dirty, bool accessed) 603 { 604 int i, npages; 605 struct address_space *mapping; 606 struct pagevec pvec; 607 608 mapping = file_inode(obj->filp)->i_mapping; 609 mapping_clear_unevictable(mapping); 610 611 /* We already BUG_ON() for non-page-aligned sizes in 612 * drm_gem_object_init(), so we should never hit this unless 613 * driver author is doing something really wrong: 614 */ 615 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 616 617 npages = obj->size >> PAGE_SHIFT; 618 619 pagevec_init(&pvec); 620 for (i = 0; i < npages; i++) { 621 if (!pages[i]) 622 continue; 623 624 if (dirty) 625 set_page_dirty(pages[i]); 626 627 if (accessed) 628 mark_page_accessed(pages[i]); 629 630 /* Undo the reference we took when populating the table */ 631 if (!pagevec_add(&pvec, pages[i])) 632 drm_gem_check_release_pagevec(&pvec); 633 } 634 if (pagevec_count(&pvec)) 635 drm_gem_check_release_pagevec(&pvec); 636 637 kvfree(pages); 638 } 639 EXPORT_SYMBOL(drm_gem_put_pages); 640 641 static int objects_lookup(struct drm_file *filp, u32 *handle, int count, 642 struct drm_gem_object **objs) 643 { 644 int i, ret = 0; 645 struct drm_gem_object *obj; 646 647 spin_lock(&filp->table_lock); 648 649 for (i = 0; i < count; i++) { 650 /* Check if we currently have a reference on the object */ 651 obj = idr_find(&filp->object_idr, handle[i]); 652 if (!obj) { 653 ret = -ENOENT; 654 break; 655 } 656 drm_gem_object_get(obj); 657 objs[i] = obj; 658 } 659 spin_unlock(&filp->table_lock); 660 661 return ret; 662 } 663 664 /** 665 * drm_gem_objects_lookup - look up GEM objects from an array of handles 666 * @filp: DRM file private date 667 * @bo_handles: user pointer to array of userspace handle 668 * @count: size of handle array 669 * @objs_out: returned pointer to array of drm_gem_object pointers 670 * 671 * Takes an array of userspace handles and returns a newly allocated array of 672 * GEM objects. 673 * 674 * For a single handle lookup, use drm_gem_object_lookup(). 675 * 676 * Returns: 677 * 678 * @objs filled in with GEM object pointers. Returned GEM objects need to be 679 * released with drm_gem_object_put(). -ENOENT is returned on a lookup 680 * failure. 0 is returned on success. 681 * 682 */ 683 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 684 int count, struct drm_gem_object ***objs_out) 685 { 686 int ret; 687 u32 *handles; 688 struct drm_gem_object **objs; 689 690 if (!count) 691 return 0; 692 693 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), 694 GFP_KERNEL | __GFP_ZERO); 695 if (!objs) 696 return -ENOMEM; 697 698 *objs_out = objs; 699 700 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); 701 if (!handles) { 702 ret = -ENOMEM; 703 goto out; 704 } 705 706 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { 707 ret = -EFAULT; 708 DRM_DEBUG("Failed to copy in GEM handles\n"); 709 goto out; 710 } 711 712 ret = objects_lookup(filp, handles, count, objs); 713 out: 714 kvfree(handles); 715 return ret; 716 717 } 718 EXPORT_SYMBOL(drm_gem_objects_lookup); 719 720 /** 721 * drm_gem_object_lookup - look up a GEM object from its handle 722 * @filp: DRM file private date 723 * @handle: userspace handle 724 * 725 * Returns: 726 * 727 * A reference to the object named by the handle if such exists on @filp, NULL 728 * otherwise. 729 * 730 * If looking up an array of handles, use drm_gem_objects_lookup(). 731 */ 732 struct drm_gem_object * 733 drm_gem_object_lookup(struct drm_file *filp, u32 handle) 734 { 735 struct drm_gem_object *obj = NULL; 736 737 objects_lookup(filp, &handle, 1, &obj); 738 return obj; 739 } 740 EXPORT_SYMBOL(drm_gem_object_lookup); 741 742 /** 743 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects 744 * shared and/or exclusive fences. 745 * @filep: DRM file private date 746 * @handle: userspace handle 747 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 748 * @timeout: timeout value in jiffies or zero to return immediately 749 * 750 * Returns: 751 * 752 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 753 * greater than 0 on success. 754 */ 755 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 756 bool wait_all, unsigned long timeout) 757 { 758 long ret; 759 struct drm_gem_object *obj; 760 761 obj = drm_gem_object_lookup(filep, handle); 762 if (!obj) { 763 DRM_DEBUG("Failed to look up GEM BO %d\n", handle); 764 return -EINVAL; 765 } 766 767 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), 768 true, timeout); 769 if (ret == 0) 770 ret = -ETIME; 771 else if (ret > 0) 772 ret = 0; 773 774 drm_gem_object_put(obj); 775 776 return ret; 777 } 778 EXPORT_SYMBOL(drm_gem_dma_resv_wait); 779 780 /** 781 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl 782 * @dev: drm_device 783 * @data: ioctl data 784 * @file_priv: drm file-private structure 785 * 786 * Releases the handle to an mm object. 787 */ 788 int 789 drm_gem_close_ioctl(struct drm_device *dev, void *data, 790 struct drm_file *file_priv) 791 { 792 struct drm_gem_close *args = data; 793 int ret; 794 795 if (!drm_core_check_feature(dev, DRIVER_GEM)) 796 return -EOPNOTSUPP; 797 798 ret = drm_gem_handle_delete(file_priv, args->handle); 799 800 return ret; 801 } 802 803 /** 804 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl 805 * @dev: drm_device 806 * @data: ioctl data 807 * @file_priv: drm file-private structure 808 * 809 * Create a global name for an object, returning the name. 810 * 811 * Note that the name does not hold a reference; when the object 812 * is freed, the name goes away. 813 */ 814 int 815 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 816 struct drm_file *file_priv) 817 { 818 struct drm_gem_flink *args = data; 819 struct drm_gem_object *obj; 820 int ret; 821 822 if (!drm_core_check_feature(dev, DRIVER_GEM)) 823 return -EOPNOTSUPP; 824 825 obj = drm_gem_object_lookup(file_priv, args->handle); 826 if (obj == NULL) 827 return -ENOENT; 828 829 mutex_lock(&dev->object_name_lock); 830 /* prevent races with concurrent gem_close. */ 831 if (obj->handle_count == 0) { 832 ret = -ENOENT; 833 goto err; 834 } 835 836 if (!obj->name) { 837 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); 838 if (ret < 0) 839 goto err; 840 841 obj->name = ret; 842 } 843 844 args->name = (uint64_t) obj->name; 845 ret = 0; 846 847 err: 848 mutex_unlock(&dev->object_name_lock); 849 drm_gem_object_put(obj); 850 return ret; 851 } 852 853 /** 854 * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl 855 * @dev: drm_device 856 * @data: ioctl data 857 * @file_priv: drm file-private structure 858 * 859 * Open an object using the global name, returning a handle and the size. 860 * 861 * This handle (of course) holds a reference to the object, so the object 862 * will not go away until the handle is deleted. 863 */ 864 int 865 drm_gem_open_ioctl(struct drm_device *dev, void *data, 866 struct drm_file *file_priv) 867 { 868 struct drm_gem_open *args = data; 869 struct drm_gem_object *obj; 870 int ret; 871 u32 handle; 872 873 if (!drm_core_check_feature(dev, DRIVER_GEM)) 874 return -EOPNOTSUPP; 875 876 mutex_lock(&dev->object_name_lock); 877 obj = idr_find(&dev->object_name_idr, (int) args->name); 878 if (obj) { 879 drm_gem_object_get(obj); 880 } else { 881 mutex_unlock(&dev->object_name_lock); 882 return -ENOENT; 883 } 884 885 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 886 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 887 if (ret) 888 goto err; 889 890 args->handle = handle; 891 args->size = obj->size; 892 893 err: 894 drm_gem_object_put(obj); 895 return ret; 896 } 897 898 /** 899 * drm_gem_open - initializes GEM file-private structures at devnode open time 900 * @dev: drm_device which is being opened by userspace 901 * @file_private: drm file-private structure to set up 902 * 903 * Called at device open time, sets up the structure for handling refcounting 904 * of mm objects. 905 */ 906 void 907 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 908 { 909 idr_init_base(&file_private->object_idr, 1); 910 spin_lock_init(&file_private->table_lock); 911 } 912 913 /** 914 * drm_gem_release - release file-private GEM resources 915 * @dev: drm_device which is being closed by userspace 916 * @file_private: drm file-private structure to clean up 917 * 918 * Called at close time when the filp is going away. 919 * 920 * Releases any remaining references on objects by this filp. 921 */ 922 void 923 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 924 { 925 idr_for_each(&file_private->object_idr, 926 &drm_gem_object_release_handle, file_private); 927 idr_destroy(&file_private->object_idr); 928 } 929 930 /** 931 * drm_gem_object_release - release GEM buffer object resources 932 * @obj: GEM buffer object 933 * 934 * This releases any structures and resources used by @obj and is the inverse of 935 * drm_gem_object_init(). 936 */ 937 void 938 drm_gem_object_release(struct drm_gem_object *obj) 939 { 940 if (obj->filp) 941 fput(obj->filp); 942 943 drm_gem_private_object_fini(obj); 944 945 drm_gem_free_mmap_offset(obj); 946 drm_gem_lru_remove(obj); 947 } 948 EXPORT_SYMBOL(drm_gem_object_release); 949 950 /** 951 * drm_gem_object_free - free a GEM object 952 * @kref: kref of the object to free 953 * 954 * Called after the last reference to the object has been lost. 955 * 956 * Frees the object 957 */ 958 void 959 drm_gem_object_free(struct kref *kref) 960 { 961 struct drm_gem_object *obj = 962 container_of(kref, struct drm_gem_object, refcount); 963 964 if (WARN_ON(!obj->funcs->free)) 965 return; 966 967 obj->funcs->free(obj); 968 } 969 EXPORT_SYMBOL(drm_gem_object_free); 970 971 /** 972 * drm_gem_vm_open - vma->ops->open implementation for GEM 973 * @vma: VM area structure 974 * 975 * This function implements the #vm_operations_struct open() callback for GEM 976 * drivers. This must be used together with drm_gem_vm_close(). 977 */ 978 void drm_gem_vm_open(struct vm_area_struct *vma) 979 { 980 struct drm_gem_object *obj = vma->vm_private_data; 981 982 drm_gem_object_get(obj); 983 } 984 EXPORT_SYMBOL(drm_gem_vm_open); 985 986 /** 987 * drm_gem_vm_close - vma->ops->close implementation for GEM 988 * @vma: VM area structure 989 * 990 * This function implements the #vm_operations_struct close() callback for GEM 991 * drivers. This must be used together with drm_gem_vm_open(). 992 */ 993 void drm_gem_vm_close(struct vm_area_struct *vma) 994 { 995 struct drm_gem_object *obj = vma->vm_private_data; 996 997 drm_gem_object_put(obj); 998 } 999 EXPORT_SYMBOL(drm_gem_vm_close); 1000 1001 /** 1002 * drm_gem_mmap_obj - memory map a GEM object 1003 * @obj: the GEM object to map 1004 * @obj_size: the object size to be mapped, in bytes 1005 * @vma: VMA for the area to be mapped 1006 * 1007 * Set up the VMA to prepare mapping of the GEM object using the GEM object's 1008 * vm_ops. Depending on their requirements, GEM objects can either 1009 * provide a fault handler in their vm_ops (in which case any accesses to 1010 * the object will be trapped, to perform migration, GTT binding, surface 1011 * register allocation, or performance monitoring), or mmap the buffer memory 1012 * synchronously after calling drm_gem_mmap_obj. 1013 * 1014 * This function is mainly intended to implement the DMABUF mmap operation, when 1015 * the GEM object is not looked up based on its fake offset. To implement the 1016 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 1017 * 1018 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while 1019 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 1020 * callers must verify access restrictions before calling this helper. 1021 * 1022 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 1023 * size, or if no vm_ops are provided. 1024 */ 1025 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 1026 struct vm_area_struct *vma) 1027 { 1028 int ret; 1029 1030 /* Check for valid size. */ 1031 if (obj_size < vma->vm_end - vma->vm_start) 1032 return -EINVAL; 1033 1034 /* Take a ref for this mapping of the object, so that the fault 1035 * handler can dereference the mmap offset's pointer to the object. 1036 * This reference is cleaned up by the corresponding vm_close 1037 * (which should happen whether the vma was created by this call, or 1038 * by a vm_open due to mremap or partial unmap or whatever). 1039 */ 1040 drm_gem_object_get(obj); 1041 1042 vma->vm_private_data = obj; 1043 vma->vm_ops = obj->funcs->vm_ops; 1044 1045 if (obj->funcs->mmap) { 1046 ret = obj->funcs->mmap(obj, vma); 1047 if (ret) 1048 goto err_drm_gem_object_put; 1049 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); 1050 } else { 1051 if (!vma->vm_ops) { 1052 ret = -EINVAL; 1053 goto err_drm_gem_object_put; 1054 } 1055 1056 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); 1057 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1058 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1059 } 1060 1061 return 0; 1062 1063 err_drm_gem_object_put: 1064 drm_gem_object_put(obj); 1065 return ret; 1066 } 1067 EXPORT_SYMBOL(drm_gem_mmap_obj); 1068 1069 /** 1070 * drm_gem_mmap - memory map routine for GEM objects 1071 * @filp: DRM file pointer 1072 * @vma: VMA for the area to be mapped 1073 * 1074 * If a driver supports GEM object mapping, mmap calls on the DRM file 1075 * descriptor will end up here. 1076 * 1077 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 1078 * contain the fake offset we created when the GTT map ioctl was called on 1079 * the object) and map it with a call to drm_gem_mmap_obj(). 1080 * 1081 * If the caller is not granted access to the buffer object, the mmap will fail 1082 * with EACCES. Please see the vma manager for more information. 1083 */ 1084 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 1085 { 1086 struct drm_file *priv = filp->private_data; 1087 struct drm_device *dev = priv->minor->dev; 1088 struct drm_gem_object *obj = NULL; 1089 struct drm_vma_offset_node *node; 1090 int ret; 1091 1092 if (drm_dev_is_unplugged(dev)) 1093 return -ENODEV; 1094 1095 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1096 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1097 vma->vm_pgoff, 1098 vma_pages(vma)); 1099 if (likely(node)) { 1100 obj = container_of(node, struct drm_gem_object, vma_node); 1101 /* 1102 * When the object is being freed, after it hits 0-refcnt it 1103 * proceeds to tear down the object. In the process it will 1104 * attempt to remove the VMA offset and so acquire this 1105 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 1106 * that matches our range, we know it is in the process of being 1107 * destroyed and will be freed as soon as we release the lock - 1108 * so we have to check for the 0-refcnted object and treat it as 1109 * invalid. 1110 */ 1111 if (!kref_get_unless_zero(&obj->refcount)) 1112 obj = NULL; 1113 } 1114 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1115 1116 if (!obj) 1117 return -EINVAL; 1118 1119 if (!drm_vma_node_is_allowed(node, priv)) { 1120 drm_gem_object_put(obj); 1121 return -EACCES; 1122 } 1123 1124 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, 1125 vma); 1126 1127 drm_gem_object_put(obj); 1128 1129 return ret; 1130 } 1131 EXPORT_SYMBOL(drm_gem_mmap); 1132 1133 void drm_gem_print_info(struct drm_printer *p, unsigned int indent, 1134 const struct drm_gem_object *obj) 1135 { 1136 drm_printf_indent(p, indent, "name=%d\n", obj->name); 1137 drm_printf_indent(p, indent, "refcount=%u\n", 1138 kref_read(&obj->refcount)); 1139 drm_printf_indent(p, indent, "start=%08lx\n", 1140 drm_vma_node_start(&obj->vma_node)); 1141 drm_printf_indent(p, indent, "size=%zu\n", obj->size); 1142 drm_printf_indent(p, indent, "imported=%s\n", 1143 str_yes_no(obj->import_attach)); 1144 1145 if (obj->funcs->print_info) 1146 obj->funcs->print_info(p, indent, obj); 1147 } 1148 1149 int drm_gem_pin(struct drm_gem_object *obj) 1150 { 1151 if (obj->funcs->pin) 1152 return obj->funcs->pin(obj); 1153 else 1154 return 0; 1155 } 1156 1157 void drm_gem_unpin(struct drm_gem_object *obj) 1158 { 1159 if (obj->funcs->unpin) 1160 obj->funcs->unpin(obj); 1161 } 1162 1163 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) 1164 { 1165 int ret; 1166 1167 dma_resv_assert_held(obj->resv); 1168 1169 if (!obj->funcs->vmap) 1170 return -EOPNOTSUPP; 1171 1172 ret = obj->funcs->vmap(obj, map); 1173 if (ret) 1174 return ret; 1175 else if (iosys_map_is_null(map)) 1176 return -ENOMEM; 1177 1178 return 0; 1179 } 1180 EXPORT_SYMBOL(drm_gem_vmap); 1181 1182 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) 1183 { 1184 dma_resv_assert_held(obj->resv); 1185 1186 if (iosys_map_is_null(map)) 1187 return; 1188 1189 if (obj->funcs->vunmap) 1190 obj->funcs->vunmap(obj, map); 1191 1192 /* Always set the mapping to NULL. Callers may rely on this. */ 1193 iosys_map_clear(map); 1194 } 1195 EXPORT_SYMBOL(drm_gem_vunmap); 1196 1197 int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map) 1198 { 1199 int ret; 1200 1201 dma_resv_lock(obj->resv, NULL); 1202 ret = drm_gem_vmap(obj, map); 1203 dma_resv_unlock(obj->resv); 1204 1205 return ret; 1206 } 1207 EXPORT_SYMBOL(drm_gem_vmap_unlocked); 1208 1209 void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map) 1210 { 1211 dma_resv_lock(obj->resv, NULL); 1212 drm_gem_vunmap(obj, map); 1213 dma_resv_unlock(obj->resv); 1214 } 1215 EXPORT_SYMBOL(drm_gem_vunmap_unlocked); 1216 1217 /** 1218 * drm_gem_lock_reservations - Sets up the ww context and acquires 1219 * the lock on an array of GEM objects. 1220 * 1221 * Once you've locked your reservations, you'll want to set up space 1222 * for your shared fences (if applicable), submit your job, then 1223 * drm_gem_unlock_reservations(). 1224 * 1225 * @objs: drm_gem_objects to lock 1226 * @count: Number of objects in @objs 1227 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as 1228 * part of tracking this set of locked reservations. 1229 */ 1230 int 1231 drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 1232 struct ww_acquire_ctx *acquire_ctx) 1233 { 1234 int contended = -1; 1235 int i, ret; 1236 1237 ww_acquire_init(acquire_ctx, &reservation_ww_class); 1238 1239 retry: 1240 if (contended != -1) { 1241 struct drm_gem_object *obj = objs[contended]; 1242 1243 ret = dma_resv_lock_slow_interruptible(obj->resv, 1244 acquire_ctx); 1245 if (ret) { 1246 ww_acquire_fini(acquire_ctx); 1247 return ret; 1248 } 1249 } 1250 1251 for (i = 0; i < count; i++) { 1252 if (i == contended) 1253 continue; 1254 1255 ret = dma_resv_lock_interruptible(objs[i]->resv, 1256 acquire_ctx); 1257 if (ret) { 1258 int j; 1259 1260 for (j = 0; j < i; j++) 1261 dma_resv_unlock(objs[j]->resv); 1262 1263 if (contended != -1 && contended >= i) 1264 dma_resv_unlock(objs[contended]->resv); 1265 1266 if (ret == -EDEADLK) { 1267 contended = i; 1268 goto retry; 1269 } 1270 1271 ww_acquire_fini(acquire_ctx); 1272 return ret; 1273 } 1274 } 1275 1276 ww_acquire_done(acquire_ctx); 1277 1278 return 0; 1279 } 1280 EXPORT_SYMBOL(drm_gem_lock_reservations); 1281 1282 void 1283 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, 1284 struct ww_acquire_ctx *acquire_ctx) 1285 { 1286 int i; 1287 1288 for (i = 0; i < count; i++) 1289 dma_resv_unlock(objs[i]->resv); 1290 1291 ww_acquire_fini(acquire_ctx); 1292 } 1293 EXPORT_SYMBOL(drm_gem_unlock_reservations); 1294 1295 /** 1296 * drm_gem_lru_init - initialize a LRU 1297 * 1298 * @lru: The LRU to initialize 1299 * @lock: The lock protecting the LRU 1300 */ 1301 void 1302 drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock) 1303 { 1304 lru->lock = lock; 1305 lru->count = 0; 1306 INIT_LIST_HEAD(&lru->list); 1307 } 1308 EXPORT_SYMBOL(drm_gem_lru_init); 1309 1310 static void 1311 drm_gem_lru_remove_locked(struct drm_gem_object *obj) 1312 { 1313 obj->lru->count -= obj->size >> PAGE_SHIFT; 1314 WARN_ON(obj->lru->count < 0); 1315 list_del(&obj->lru_node); 1316 obj->lru = NULL; 1317 } 1318 1319 /** 1320 * drm_gem_lru_remove - remove object from whatever LRU it is in 1321 * 1322 * If the object is currently in any LRU, remove it. 1323 * 1324 * @obj: The GEM object to remove from current LRU 1325 */ 1326 void 1327 drm_gem_lru_remove(struct drm_gem_object *obj) 1328 { 1329 struct drm_gem_lru *lru = obj->lru; 1330 1331 if (!lru) 1332 return; 1333 1334 mutex_lock(lru->lock); 1335 drm_gem_lru_remove_locked(obj); 1336 mutex_unlock(lru->lock); 1337 } 1338 EXPORT_SYMBOL(drm_gem_lru_remove); 1339 1340 /** 1341 * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU 1342 * 1343 * Like &drm_gem_lru_move_tail but lru lock must be held 1344 * 1345 * @lru: The LRU to move the object into. 1346 * @obj: The GEM object to move into this LRU 1347 */ 1348 void 1349 drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj) 1350 { 1351 lockdep_assert_held_once(lru->lock); 1352 1353 if (obj->lru) 1354 drm_gem_lru_remove_locked(obj); 1355 1356 lru->count += obj->size >> PAGE_SHIFT; 1357 list_add_tail(&obj->lru_node, &lru->list); 1358 obj->lru = lru; 1359 } 1360 EXPORT_SYMBOL(drm_gem_lru_move_tail_locked); 1361 1362 /** 1363 * drm_gem_lru_move_tail - move the object to the tail of the LRU 1364 * 1365 * If the object is already in this LRU it will be moved to the 1366 * tail. Otherwise it will be removed from whichever other LRU 1367 * it is in (if any) and moved into this LRU. 1368 * 1369 * @lru: The LRU to move the object into. 1370 * @obj: The GEM object to move into this LRU 1371 */ 1372 void 1373 drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj) 1374 { 1375 mutex_lock(lru->lock); 1376 drm_gem_lru_move_tail_locked(lru, obj); 1377 mutex_unlock(lru->lock); 1378 } 1379 EXPORT_SYMBOL(drm_gem_lru_move_tail); 1380 1381 /** 1382 * drm_gem_lru_scan - helper to implement shrinker.scan_objects 1383 * 1384 * If the shrink callback succeeds, it is expected that the driver 1385 * move the object out of this LRU. 1386 * 1387 * If the LRU possibly contain active buffers, it is the responsibility 1388 * of the shrink callback to check for this (ie. dma_resv_test_signaled()) 1389 * or if necessary block until the buffer becomes idle. 1390 * 1391 * @lru: The LRU to scan 1392 * @nr_to_scan: The number of pages to try to reclaim 1393 * @remaining: The number of pages left to reclaim, should be initialized by caller 1394 * @shrink: Callback to try to shrink/reclaim the object. 1395 */ 1396 unsigned long 1397 drm_gem_lru_scan(struct drm_gem_lru *lru, 1398 unsigned int nr_to_scan, 1399 unsigned long *remaining, 1400 bool (*shrink)(struct drm_gem_object *obj)) 1401 { 1402 struct drm_gem_lru still_in_lru; 1403 struct drm_gem_object *obj; 1404 unsigned freed = 0; 1405 1406 drm_gem_lru_init(&still_in_lru, lru->lock); 1407 1408 mutex_lock(lru->lock); 1409 1410 while (freed < nr_to_scan) { 1411 obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node); 1412 1413 if (!obj) 1414 break; 1415 1416 drm_gem_lru_move_tail_locked(&still_in_lru, obj); 1417 1418 /* 1419 * If it's in the process of being freed, gem_object->free() 1420 * may be blocked on lock waiting to remove it. So just 1421 * skip it. 1422 */ 1423 if (!kref_get_unless_zero(&obj->refcount)) 1424 continue; 1425 1426 /* 1427 * Now that we own a reference, we can drop the lock for the 1428 * rest of the loop body, to reduce contention with other 1429 * code paths that need the LRU lock 1430 */ 1431 mutex_unlock(lru->lock); 1432 1433 /* 1434 * Note that this still needs to be trylock, since we can 1435 * hit shrinker in response to trying to get backing pages 1436 * for this obj (ie. while it's lock is already held) 1437 */ 1438 if (!dma_resv_trylock(obj->resv)) { 1439 *remaining += obj->size >> PAGE_SHIFT; 1440 goto tail; 1441 } 1442 1443 if (shrink(obj)) { 1444 freed += obj->size >> PAGE_SHIFT; 1445 1446 /* 1447 * If we succeeded in releasing the object's backing 1448 * pages, we expect the driver to have moved the object 1449 * out of this LRU 1450 */ 1451 WARN_ON(obj->lru == &still_in_lru); 1452 WARN_ON(obj->lru == lru); 1453 } 1454 1455 dma_resv_unlock(obj->resv); 1456 1457 tail: 1458 drm_gem_object_put(obj); 1459 mutex_lock(lru->lock); 1460 } 1461 1462 /* 1463 * Move objects we've skipped over out of the temporary still_in_lru 1464 * back into this LRU 1465 */ 1466 list_for_each_entry (obj, &still_in_lru.list, lru_node) 1467 obj->lru = lru; 1468 list_splice_tail(&still_in_lru.list, &lru->list); 1469 lru->count += still_in_lru.count; 1470 1471 mutex_unlock(lru->lock); 1472 1473 return freed; 1474 } 1475 EXPORT_SYMBOL(drm_gem_lru_scan); 1476 1477 /** 1478 * drm_gem_evict - helper to evict backing pages for a GEM object 1479 * @obj: obj in question 1480 */ 1481 int drm_gem_evict(struct drm_gem_object *obj) 1482 { 1483 dma_resv_assert_held(obj->resv); 1484 1485 if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)) 1486 return -EBUSY; 1487 1488 if (obj->funcs->evict) 1489 return obj->funcs->evict(obj); 1490 1491 return 0; 1492 } 1493 EXPORT_SYMBOL(drm_gem_evict); 1494