1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/dma-buf.h> 29 #include <linux/file.h> 30 #include <linux/fs.h> 31 #include <linux/iosys-map.h> 32 #include <linux/mem_encrypt.h> 33 #include <linux/mm.h> 34 #include <linux/mman.h> 35 #include <linux/module.h> 36 #include <linux/pagemap.h> 37 #include <linux/pagevec.h> 38 #include <linux/shmem_fs.h> 39 #include <linux/slab.h> 40 #include <linux/string_helpers.h> 41 #include <linux/types.h> 42 #include <linux/uaccess.h> 43 44 #include <drm/drm.h> 45 #include <drm/drm_device.h> 46 #include <drm/drm_drv.h> 47 #include <drm/drm_file.h> 48 #include <drm/drm_gem.h> 49 #include <drm/drm_managed.h> 50 #include <drm/drm_print.h> 51 #include <drm/drm_vma_manager.h> 52 53 #include "drm_internal.h" 54 55 /** @file drm_gem.c 56 * 57 * This file provides some of the base ioctls and library routines for 58 * the graphics memory manager implemented by each device driver. 59 * 60 * Because various devices have different requirements in terms of 61 * synchronization and migration strategies, implementing that is left up to 62 * the driver, and all that the general API provides should be generic -- 63 * allocating objects, reading/writing data with the cpu, freeing objects. 64 * Even there, platform-dependent optimizations for reading/writing data with 65 * the CPU mean we'll likely hook those out to driver-specific calls. However, 66 * the DRI2 implementation wants to have at least allocate/mmap be generic. 67 * 68 * The goal was to have swap-backed object allocation managed through 69 * struct file. However, file descriptors as handles to a struct file have 70 * two major failings: 71 * - Process limits prevent more than 1024 or so being used at a time by 72 * default. 73 * - Inability to allocate high fds will aggravate the X Server's select() 74 * handling, and likely that of many GL client applications as well. 75 * 76 * This led to a plan of using our own integer IDs (called handles, following 77 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 78 * ioctls. The objects themselves will still include the struct file so 79 * that we can transition to fds if the required kernel infrastructure shows 80 * up at a later date, and as our interface with shmfs for memory allocation. 81 */ 82 83 static void 84 drm_gem_init_release(struct drm_device *dev, void *ptr) 85 { 86 drm_vma_offset_manager_destroy(dev->vma_offset_manager); 87 } 88 89 /** 90 * drm_gem_init - Initialize the GEM device fields 91 * @dev: drm_devic structure to initialize 92 */ 93 int 94 drm_gem_init(struct drm_device *dev) 95 { 96 struct drm_vma_offset_manager *vma_offset_manager; 97 98 mutex_init(&dev->object_name_lock); 99 idr_init_base(&dev->object_name_idr, 1); 100 101 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), 102 GFP_KERNEL); 103 if (!vma_offset_manager) { 104 DRM_ERROR("out of memory\n"); 105 return -ENOMEM; 106 } 107 108 dev->vma_offset_manager = vma_offset_manager; 109 drm_vma_offset_manager_init(vma_offset_manager, 110 DRM_FILE_PAGE_OFFSET_START, 111 DRM_FILE_PAGE_OFFSET_SIZE); 112 113 return drmm_add_action(dev, drm_gem_init_release, NULL); 114 } 115 116 /** 117 * drm_gem_object_init_with_mnt - initialize an allocated shmem-backed GEM 118 * object in a given shmfs mountpoint 119 * 120 * @dev: drm_device the object should be initialized for 121 * @obj: drm_gem_object to initialize 122 * @size: object size 123 * @gemfs: tmpfs mount where the GEM object will be created. If NULL, use 124 * the usual tmpfs mountpoint (`shm_mnt`). 125 * 126 * Initialize an already allocated GEM object of the specified size with 127 * shmfs backing store. 128 */ 129 int drm_gem_object_init_with_mnt(struct drm_device *dev, 130 struct drm_gem_object *obj, size_t size, 131 struct vfsmount *gemfs) 132 { 133 struct file *filp; 134 135 drm_gem_private_object_init(dev, obj, size); 136 137 if (gemfs) 138 filp = shmem_file_setup_with_mnt(gemfs, "drm mm object", size, 139 VM_NORESERVE); 140 else 141 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 142 143 if (IS_ERR(filp)) 144 return PTR_ERR(filp); 145 146 obj->filp = filp; 147 148 return 0; 149 } 150 EXPORT_SYMBOL(drm_gem_object_init_with_mnt); 151 152 /** 153 * drm_gem_object_init - initialize an allocated shmem-backed GEM object 154 * @dev: drm_device the object should be initialized for 155 * @obj: drm_gem_object to initialize 156 * @size: object size 157 * 158 * Initialize an already allocated GEM object of the specified size with 159 * shmfs backing store. 160 */ 161 int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, 162 size_t size) 163 { 164 return drm_gem_object_init_with_mnt(dev, obj, size, NULL); 165 } 166 EXPORT_SYMBOL(drm_gem_object_init); 167 168 /** 169 * drm_gem_private_object_init - initialize an allocated private GEM object 170 * @dev: drm_device the object should be initialized for 171 * @obj: drm_gem_object to initialize 172 * @size: object size 173 * 174 * Initialize an already allocated GEM object of the specified size with 175 * no GEM provided backing store. Instead the caller is responsible for 176 * backing the object and handling it. 177 */ 178 void drm_gem_private_object_init(struct drm_device *dev, 179 struct drm_gem_object *obj, size_t size) 180 { 181 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 182 183 obj->dev = dev; 184 obj->filp = NULL; 185 186 kref_init(&obj->refcount); 187 obj->handle_count = 0; 188 obj->size = size; 189 dma_resv_init(&obj->_resv); 190 if (!obj->resv) 191 obj->resv = &obj->_resv; 192 193 if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA)) 194 drm_gem_gpuva_init(obj); 195 196 drm_vma_node_reset(&obj->vma_node); 197 INIT_LIST_HEAD(&obj->lru_node); 198 } 199 EXPORT_SYMBOL(drm_gem_private_object_init); 200 201 /** 202 * drm_gem_private_object_fini - Finalize a failed drm_gem_object 203 * @obj: drm_gem_object 204 * 205 * Uninitialize an already allocated GEM object when it initialized failed 206 */ 207 void drm_gem_private_object_fini(struct drm_gem_object *obj) 208 { 209 WARN_ON(obj->dma_buf); 210 211 dma_resv_fini(&obj->_resv); 212 } 213 EXPORT_SYMBOL(drm_gem_private_object_fini); 214 215 static void drm_gem_object_handle_get(struct drm_gem_object *obj) 216 { 217 struct drm_device *dev = obj->dev; 218 219 drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock)); 220 221 if (obj->handle_count++ == 0) 222 drm_gem_object_get(obj); 223 } 224 225 /** 226 * drm_gem_object_handle_get_unlocked - acquire reference on user-space handles 227 * @obj: GEM object 228 * 229 * Acquires a reference on the GEM buffer object's handle. Required 230 * to keep the GEM object alive. Call drm_gem_object_handle_put_unlocked() 231 * to release the reference. 232 */ 233 void drm_gem_object_handle_get_unlocked(struct drm_gem_object *obj) 234 { 235 struct drm_device *dev = obj->dev; 236 237 guard(mutex)(&dev->object_name_lock); 238 239 drm_WARN_ON(dev, !obj->handle_count); /* first ref taken in create-tail helper */ 240 drm_gem_object_handle_get(obj); 241 } 242 EXPORT_SYMBOL(drm_gem_object_handle_get_unlocked); 243 244 /** 245 * drm_gem_object_handle_free - release resources bound to userspace handles 246 * @obj: GEM object to clean up. 247 * 248 * Called after the last handle to the object has been closed 249 * 250 * Removes any name for the object. Note that this must be 251 * called before drm_gem_object_free or we'll be touching 252 * freed memory 253 */ 254 static void drm_gem_object_handle_free(struct drm_gem_object *obj) 255 { 256 struct drm_device *dev = obj->dev; 257 258 /* Remove any name for this object */ 259 if (obj->name) { 260 idr_remove(&dev->object_name_idr, obj->name); 261 obj->name = 0; 262 } 263 } 264 265 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) 266 { 267 /* Unbreak the reference cycle if we have an exported dma_buf. */ 268 if (obj->dma_buf) { 269 dma_buf_put(obj->dma_buf); 270 obj->dma_buf = NULL; 271 } 272 } 273 274 /** 275 * drm_gem_object_handle_put_unlocked - releases reference on user-space handles 276 * @obj: GEM object 277 * 278 * Releases a reference on the GEM buffer object's handle. Possibly releases 279 * the GEM buffer object and associated dma-buf objects. 280 */ 281 void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) 282 { 283 struct drm_device *dev = obj->dev; 284 bool final = false; 285 286 if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) 287 return; 288 289 /* 290 * Must bump handle count first as this may be the last 291 * ref, in which case the object would disappear before we 292 * checked for a name 293 */ 294 295 mutex_lock(&dev->object_name_lock); 296 if (--obj->handle_count == 0) { 297 drm_gem_object_handle_free(obj); 298 drm_gem_object_exported_dma_buf_free(obj); 299 final = true; 300 } 301 mutex_unlock(&dev->object_name_lock); 302 303 if (final) 304 drm_gem_object_put(obj); 305 } 306 EXPORT_SYMBOL(drm_gem_object_handle_put_unlocked); 307 308 /* 309 * Called at device or object close to release the file's 310 * handle references on objects. 311 */ 312 static int 313 drm_gem_object_release_handle(int id, void *ptr, void *data) 314 { 315 struct drm_file *file_priv = data; 316 struct drm_gem_object *obj = ptr; 317 318 if (obj->funcs->close) 319 obj->funcs->close(obj, file_priv); 320 321 drm_prime_remove_buf_handle(&file_priv->prime, id); 322 drm_vma_node_revoke(&obj->vma_node, file_priv); 323 324 drm_gem_object_handle_put_unlocked(obj); 325 326 return 0; 327 } 328 329 /** 330 * drm_gem_handle_delete - deletes the given file-private handle 331 * @filp: drm file-private structure to use for the handle look up 332 * @handle: userspace handle to delete 333 * 334 * Removes the GEM handle from the @filp lookup table which has been added with 335 * drm_gem_handle_create(). If this is the last handle also cleans up linked 336 * resources like GEM names. 337 */ 338 int 339 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 340 { 341 struct drm_gem_object *obj; 342 343 spin_lock(&filp->table_lock); 344 345 /* Check if we currently have a reference on the object */ 346 obj = idr_replace(&filp->object_idr, NULL, handle); 347 spin_unlock(&filp->table_lock); 348 if (IS_ERR_OR_NULL(obj)) 349 return -EINVAL; 350 351 /* Release driver's reference and decrement refcount. */ 352 drm_gem_object_release_handle(handle, obj, filp); 353 354 /* And finally make the handle available for future allocations. */ 355 spin_lock(&filp->table_lock); 356 idr_remove(&filp->object_idr, handle); 357 spin_unlock(&filp->table_lock); 358 359 return 0; 360 } 361 EXPORT_SYMBOL(drm_gem_handle_delete); 362 363 /** 364 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object 365 * @file: drm file-private structure containing the gem object 366 * @dev: corresponding drm_device 367 * @handle: gem object handle 368 * @offset: return location for the fake mmap offset 369 * 370 * This implements the &drm_driver.dumb_map_offset kms driver callback for 371 * drivers which use gem to manage their backing storage. 372 * 373 * Returns: 374 * 0 on success or a negative error code on failure. 375 */ 376 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 377 u32 handle, u64 *offset) 378 { 379 struct drm_gem_object *obj; 380 int ret; 381 382 obj = drm_gem_object_lookup(file, handle); 383 if (!obj) 384 return -ENOENT; 385 386 /* Don't allow imported objects to be mapped */ 387 if (drm_gem_is_imported(obj)) { 388 ret = -EINVAL; 389 goto out; 390 } 391 392 ret = drm_gem_create_mmap_offset(obj); 393 if (ret) 394 goto out; 395 396 *offset = drm_vma_node_offset_addr(&obj->vma_node); 397 out: 398 drm_gem_object_put(obj); 399 400 return ret; 401 } 402 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); 403 404 /** 405 * drm_gem_handle_create_tail - internal functions to create a handle 406 * @file_priv: drm file-private structure to register the handle for 407 * @obj: object to register 408 * @handlep: pointer to return the created handle to the caller 409 * 410 * This expects the &drm_device.object_name_lock to be held already and will 411 * drop it before returning. Used to avoid races in establishing new handles 412 * when importing an object from either an flink name or a dma-buf. 413 * 414 * Handles must be release again through drm_gem_handle_delete(). This is done 415 * when userspace closes @file_priv for all attached handles, or through the 416 * GEM_CLOSE ioctl for individual handles. 417 */ 418 int 419 drm_gem_handle_create_tail(struct drm_file *file_priv, 420 struct drm_gem_object *obj, 421 u32 *handlep) 422 { 423 struct drm_device *dev = obj->dev; 424 u32 handle; 425 int ret; 426 427 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 428 429 drm_gem_object_handle_get(obj); 430 431 /* 432 * Get the user-visible handle using idr. Preload and perform 433 * allocation under our spinlock. 434 */ 435 idr_preload(GFP_KERNEL); 436 spin_lock(&file_priv->table_lock); 437 438 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 439 440 spin_unlock(&file_priv->table_lock); 441 idr_preload_end(); 442 443 mutex_unlock(&dev->object_name_lock); 444 if (ret < 0) 445 goto err_unref; 446 447 handle = ret; 448 449 ret = drm_vma_node_allow(&obj->vma_node, file_priv); 450 if (ret) 451 goto err_remove; 452 453 if (obj->funcs->open) { 454 ret = obj->funcs->open(obj, file_priv); 455 if (ret) 456 goto err_revoke; 457 } 458 459 *handlep = handle; 460 return 0; 461 462 err_revoke: 463 drm_vma_node_revoke(&obj->vma_node, file_priv); 464 err_remove: 465 spin_lock(&file_priv->table_lock); 466 idr_remove(&file_priv->object_idr, handle); 467 spin_unlock(&file_priv->table_lock); 468 err_unref: 469 drm_gem_object_handle_put_unlocked(obj); 470 return ret; 471 } 472 473 /** 474 * drm_gem_handle_create - create a gem handle for an object 475 * @file_priv: drm file-private structure to register the handle for 476 * @obj: object to register 477 * @handlep: pointer to return the created handle to the caller 478 * 479 * Create a handle for this object. This adds a handle reference to the object, 480 * which includes a regular reference count. Callers will likely want to 481 * dereference the object afterwards. 482 * 483 * Since this publishes @obj to userspace it must be fully set up by this point, 484 * drivers must call this last in their buffer object creation callbacks. 485 */ 486 int drm_gem_handle_create(struct drm_file *file_priv, 487 struct drm_gem_object *obj, 488 u32 *handlep) 489 { 490 mutex_lock(&obj->dev->object_name_lock); 491 492 return drm_gem_handle_create_tail(file_priv, obj, handlep); 493 } 494 EXPORT_SYMBOL(drm_gem_handle_create); 495 496 497 /** 498 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 499 * @obj: obj in question 500 * 501 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 502 * 503 * Note that drm_gem_object_release() already calls this function, so drivers 504 * don't have to take care of releasing the mmap offset themselves when freeing 505 * the GEM object. 506 */ 507 void 508 drm_gem_free_mmap_offset(struct drm_gem_object *obj) 509 { 510 struct drm_device *dev = obj->dev; 511 512 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); 513 } 514 EXPORT_SYMBOL(drm_gem_free_mmap_offset); 515 516 /** 517 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 518 * @obj: obj in question 519 * @size: the virtual size 520 * 521 * GEM memory mapping works by handing back to userspace a fake mmap offset 522 * it can use in a subsequent mmap(2) call. The DRM core code then looks 523 * up the object based on the offset and sets up the various memory mapping 524 * structures. 525 * 526 * This routine allocates and attaches a fake offset for @obj, in cases where 527 * the virtual size differs from the physical size (ie. &drm_gem_object.size). 528 * Otherwise just use drm_gem_create_mmap_offset(). 529 * 530 * This function is idempotent and handles an already allocated mmap offset 531 * transparently. Drivers do not need to check for this case. 532 */ 533 int 534 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 535 { 536 struct drm_device *dev = obj->dev; 537 538 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, 539 size / PAGE_SIZE); 540 } 541 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 542 543 /** 544 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 545 * @obj: obj in question 546 * 547 * GEM memory mapping works by handing back to userspace a fake mmap offset 548 * it can use in a subsequent mmap(2) call. The DRM core code then looks 549 * up the object based on the offset and sets up the various memory mapping 550 * structures. 551 * 552 * This routine allocates and attaches a fake offset for @obj. 553 * 554 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release 555 * the fake offset again. 556 */ 557 int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 558 { 559 return drm_gem_create_mmap_offset_size(obj, obj->size); 560 } 561 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 562 563 /* 564 * Move folios to appropriate lru and release the folios, decrementing the 565 * ref count of those folios. 566 */ 567 static void drm_gem_check_release_batch(struct folio_batch *fbatch) 568 { 569 check_move_unevictable_folios(fbatch); 570 __folio_batch_release(fbatch); 571 cond_resched(); 572 } 573 574 /** 575 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 576 * from shmem 577 * @obj: obj in question 578 * 579 * This reads the page-array of the shmem-backing storage of the given gem 580 * object. An array of pages is returned. If a page is not allocated or 581 * swapped-out, this will allocate/swap-in the required pages. Note that the 582 * whole object is covered by the page-array and pinned in memory. 583 * 584 * Use drm_gem_put_pages() to release the array and unpin all pages. 585 * 586 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). 587 * If you require other GFP-masks, you have to do those allocations yourself. 588 * 589 * Note that you are not allowed to change gfp-zones during runtime. That is, 590 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as 591 * set during initialization. If you have special zone constraints, set them 592 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care 593 * to keep pages in the required zone during swap-in. 594 * 595 * This function is only valid on objects initialized with 596 * drm_gem_object_init(), but not for those initialized with 597 * drm_gem_private_object_init() only. 598 */ 599 struct page **drm_gem_get_pages(struct drm_gem_object *obj) 600 { 601 struct address_space *mapping; 602 struct page **pages; 603 struct folio *folio; 604 struct folio_batch fbatch; 605 long i, j, npages; 606 607 if (WARN_ON(!obj->filp)) 608 return ERR_PTR(-EINVAL); 609 610 /* This is the shared memory object that backs the GEM resource */ 611 mapping = obj->filp->f_mapping; 612 613 /* We already BUG_ON() for non-page-aligned sizes in 614 * drm_gem_object_init(), so we should never hit this unless 615 * driver author is doing something really wrong: 616 */ 617 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 618 619 npages = obj->size >> PAGE_SHIFT; 620 621 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 622 if (pages == NULL) 623 return ERR_PTR(-ENOMEM); 624 625 mapping_set_unevictable(mapping); 626 627 i = 0; 628 while (i < npages) { 629 long nr; 630 folio = shmem_read_folio_gfp(mapping, i, 631 mapping_gfp_mask(mapping)); 632 if (IS_ERR(folio)) 633 goto fail; 634 nr = min(npages - i, folio_nr_pages(folio)); 635 for (j = 0; j < nr; j++, i++) 636 pages[i] = folio_file_page(folio, i); 637 638 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the 639 * correct region during swapin. Note that this requires 640 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 641 * so shmem can relocate pages during swapin if required. 642 */ 643 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && 644 (folio_pfn(folio) >= 0x00100000UL)); 645 } 646 647 return pages; 648 649 fail: 650 mapping_clear_unevictable(mapping); 651 folio_batch_init(&fbatch); 652 j = 0; 653 while (j < i) { 654 struct folio *f = page_folio(pages[j]); 655 if (!folio_batch_add(&fbatch, f)) 656 drm_gem_check_release_batch(&fbatch); 657 j += folio_nr_pages(f); 658 } 659 if (fbatch.nr) 660 drm_gem_check_release_batch(&fbatch); 661 662 kvfree(pages); 663 return ERR_CAST(folio); 664 } 665 EXPORT_SYMBOL(drm_gem_get_pages); 666 667 /** 668 * drm_gem_put_pages - helper to free backing pages for a GEM object 669 * @obj: obj in question 670 * @pages: pages to free 671 * @dirty: if true, pages will be marked as dirty 672 * @accessed: if true, the pages will be marked as accessed 673 */ 674 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 675 bool dirty, bool accessed) 676 { 677 int i, npages; 678 struct address_space *mapping; 679 struct folio_batch fbatch; 680 681 mapping = file_inode(obj->filp)->i_mapping; 682 mapping_clear_unevictable(mapping); 683 684 /* We already BUG_ON() for non-page-aligned sizes in 685 * drm_gem_object_init(), so we should never hit this unless 686 * driver author is doing something really wrong: 687 */ 688 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 689 690 npages = obj->size >> PAGE_SHIFT; 691 692 folio_batch_init(&fbatch); 693 for (i = 0; i < npages; i++) { 694 struct folio *folio; 695 696 if (!pages[i]) 697 continue; 698 folio = page_folio(pages[i]); 699 700 if (dirty) 701 folio_mark_dirty(folio); 702 703 if (accessed) 704 folio_mark_accessed(folio); 705 706 /* Undo the reference we took when populating the table */ 707 if (!folio_batch_add(&fbatch, folio)) 708 drm_gem_check_release_batch(&fbatch); 709 i += folio_nr_pages(folio) - 1; 710 } 711 if (folio_batch_count(&fbatch)) 712 drm_gem_check_release_batch(&fbatch); 713 714 kvfree(pages); 715 } 716 EXPORT_SYMBOL(drm_gem_put_pages); 717 718 static int objects_lookup(struct drm_file *filp, u32 *handle, int count, 719 struct drm_gem_object **objs) 720 { 721 int i, ret = 0; 722 struct drm_gem_object *obj; 723 724 spin_lock(&filp->table_lock); 725 726 for (i = 0; i < count; i++) { 727 /* Check if we currently have a reference on the object */ 728 obj = idr_find(&filp->object_idr, handle[i]); 729 if (!obj) { 730 ret = -ENOENT; 731 break; 732 } 733 drm_gem_object_get(obj); 734 objs[i] = obj; 735 } 736 spin_unlock(&filp->table_lock); 737 738 return ret; 739 } 740 741 /** 742 * drm_gem_objects_lookup - look up GEM objects from an array of handles 743 * @filp: DRM file private date 744 * @bo_handles: user pointer to array of userspace handle 745 * @count: size of handle array 746 * @objs_out: returned pointer to array of drm_gem_object pointers 747 * 748 * Takes an array of userspace handles and returns a newly allocated array of 749 * GEM objects. 750 * 751 * For a single handle lookup, use drm_gem_object_lookup(). 752 * 753 * Returns: 754 * @objs filled in with GEM object pointers. Returned GEM objects need to be 755 * released with drm_gem_object_put(). -ENOENT is returned on a lookup 756 * failure. 0 is returned on success. 757 * 758 */ 759 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 760 int count, struct drm_gem_object ***objs_out) 761 { 762 int ret; 763 u32 *handles; 764 struct drm_gem_object **objs; 765 766 if (!count) 767 return 0; 768 769 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), 770 GFP_KERNEL | __GFP_ZERO); 771 if (!objs) 772 return -ENOMEM; 773 774 *objs_out = objs; 775 776 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); 777 if (!handles) { 778 ret = -ENOMEM; 779 goto out; 780 } 781 782 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { 783 ret = -EFAULT; 784 DRM_DEBUG("Failed to copy in GEM handles\n"); 785 goto out; 786 } 787 788 ret = objects_lookup(filp, handles, count, objs); 789 out: 790 kvfree(handles); 791 return ret; 792 793 } 794 EXPORT_SYMBOL(drm_gem_objects_lookup); 795 796 /** 797 * drm_gem_object_lookup - look up a GEM object from its handle 798 * @filp: DRM file private date 799 * @handle: userspace handle 800 * 801 * If looking up an array of handles, use drm_gem_objects_lookup(). 802 * 803 * Returns: 804 * A reference to the object named by the handle if such exists on @filp, NULL 805 * otherwise. 806 */ 807 struct drm_gem_object * 808 drm_gem_object_lookup(struct drm_file *filp, u32 handle) 809 { 810 struct drm_gem_object *obj = NULL; 811 812 objects_lookup(filp, &handle, 1, &obj); 813 return obj; 814 } 815 EXPORT_SYMBOL(drm_gem_object_lookup); 816 817 /** 818 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects 819 * shared and/or exclusive fences. 820 * @filep: DRM file private date 821 * @handle: userspace handle 822 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 823 * @timeout: timeout value in jiffies or zero to return immediately 824 * 825 * Returns: 826 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 827 * greater than 0 on success. 828 */ 829 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 830 bool wait_all, unsigned long timeout) 831 { 832 long ret; 833 struct drm_gem_object *obj; 834 835 obj = drm_gem_object_lookup(filep, handle); 836 if (!obj) { 837 DRM_DEBUG("Failed to look up GEM BO %d\n", handle); 838 return -EINVAL; 839 } 840 841 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), 842 true, timeout); 843 if (ret == 0) 844 ret = -ETIME; 845 else if (ret > 0) 846 ret = 0; 847 848 drm_gem_object_put(obj); 849 850 return ret; 851 } 852 EXPORT_SYMBOL(drm_gem_dma_resv_wait); 853 854 /** 855 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl 856 * @dev: drm_device 857 * @data: ioctl data 858 * @file_priv: drm file-private structure 859 * 860 * Releases the handle to an mm object. 861 */ 862 int 863 drm_gem_close_ioctl(struct drm_device *dev, void *data, 864 struct drm_file *file_priv) 865 { 866 struct drm_gem_close *args = data; 867 int ret; 868 869 if (!drm_core_check_feature(dev, DRIVER_GEM)) 870 return -EOPNOTSUPP; 871 872 ret = drm_gem_handle_delete(file_priv, args->handle); 873 874 return ret; 875 } 876 877 /** 878 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl 879 * @dev: drm_device 880 * @data: ioctl data 881 * @file_priv: drm file-private structure 882 * 883 * Create a global name for an object, returning the name. 884 * 885 * Note that the name does not hold a reference; when the object 886 * is freed, the name goes away. 887 */ 888 int 889 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 890 struct drm_file *file_priv) 891 { 892 struct drm_gem_flink *args = data; 893 struct drm_gem_object *obj; 894 int ret; 895 896 if (!drm_core_check_feature(dev, DRIVER_GEM)) 897 return -EOPNOTSUPP; 898 899 obj = drm_gem_object_lookup(file_priv, args->handle); 900 if (obj == NULL) 901 return -ENOENT; 902 903 mutex_lock(&dev->object_name_lock); 904 /* prevent races with concurrent gem_close. */ 905 if (obj->handle_count == 0) { 906 ret = -ENOENT; 907 goto err; 908 } 909 910 if (!obj->name) { 911 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); 912 if (ret < 0) 913 goto err; 914 915 obj->name = ret; 916 } 917 918 args->name = (uint64_t) obj->name; 919 ret = 0; 920 921 err: 922 mutex_unlock(&dev->object_name_lock); 923 drm_gem_object_put(obj); 924 return ret; 925 } 926 927 /** 928 * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl 929 * @dev: drm_device 930 * @data: ioctl data 931 * @file_priv: drm file-private structure 932 * 933 * Open an object using the global name, returning a handle and the size. 934 * 935 * This handle (of course) holds a reference to the object, so the object 936 * will not go away until the handle is deleted. 937 */ 938 int 939 drm_gem_open_ioctl(struct drm_device *dev, void *data, 940 struct drm_file *file_priv) 941 { 942 struct drm_gem_open *args = data; 943 struct drm_gem_object *obj; 944 int ret; 945 u32 handle; 946 947 if (!drm_core_check_feature(dev, DRIVER_GEM)) 948 return -EOPNOTSUPP; 949 950 mutex_lock(&dev->object_name_lock); 951 obj = idr_find(&dev->object_name_idr, (int) args->name); 952 if (obj) { 953 drm_gem_object_get(obj); 954 } else { 955 mutex_unlock(&dev->object_name_lock); 956 return -ENOENT; 957 } 958 959 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 960 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 961 if (ret) 962 goto err; 963 964 args->handle = handle; 965 args->size = obj->size; 966 967 err: 968 drm_gem_object_put(obj); 969 return ret; 970 } 971 972 /** 973 * drm_gem_open - initializes GEM file-private structures at devnode open time 974 * @dev: drm_device which is being opened by userspace 975 * @file_private: drm file-private structure to set up 976 * 977 * Called at device open time, sets up the structure for handling refcounting 978 * of mm objects. 979 */ 980 void 981 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 982 { 983 idr_init_base(&file_private->object_idr, 1); 984 spin_lock_init(&file_private->table_lock); 985 } 986 987 /** 988 * drm_gem_release - release file-private GEM resources 989 * @dev: drm_device which is being closed by userspace 990 * @file_private: drm file-private structure to clean up 991 * 992 * Called at close time when the filp is going away. 993 * 994 * Releases any remaining references on objects by this filp. 995 */ 996 void 997 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 998 { 999 idr_for_each(&file_private->object_idr, 1000 &drm_gem_object_release_handle, file_private); 1001 idr_destroy(&file_private->object_idr); 1002 } 1003 1004 /** 1005 * drm_gem_object_release - release GEM buffer object resources 1006 * @obj: GEM buffer object 1007 * 1008 * This releases any structures and resources used by @obj and is the inverse of 1009 * drm_gem_object_init(). 1010 */ 1011 void 1012 drm_gem_object_release(struct drm_gem_object *obj) 1013 { 1014 if (obj->filp) 1015 fput(obj->filp); 1016 1017 drm_gem_private_object_fini(obj); 1018 1019 drm_gem_free_mmap_offset(obj); 1020 drm_gem_lru_remove(obj); 1021 } 1022 EXPORT_SYMBOL(drm_gem_object_release); 1023 1024 /** 1025 * drm_gem_object_free - free a GEM object 1026 * @kref: kref of the object to free 1027 * 1028 * Called after the last reference to the object has been lost. 1029 * 1030 * Frees the object 1031 */ 1032 void 1033 drm_gem_object_free(struct kref *kref) 1034 { 1035 struct drm_gem_object *obj = 1036 container_of(kref, struct drm_gem_object, refcount); 1037 1038 if (WARN_ON(!obj->funcs->free)) 1039 return; 1040 1041 obj->funcs->free(obj); 1042 } 1043 EXPORT_SYMBOL(drm_gem_object_free); 1044 1045 /** 1046 * drm_gem_vm_open - vma->ops->open implementation for GEM 1047 * @vma: VM area structure 1048 * 1049 * This function implements the #vm_operations_struct open() callback for GEM 1050 * drivers. This must be used together with drm_gem_vm_close(). 1051 */ 1052 void drm_gem_vm_open(struct vm_area_struct *vma) 1053 { 1054 struct drm_gem_object *obj = vma->vm_private_data; 1055 1056 drm_gem_object_get(obj); 1057 } 1058 EXPORT_SYMBOL(drm_gem_vm_open); 1059 1060 /** 1061 * drm_gem_vm_close - vma->ops->close implementation for GEM 1062 * @vma: VM area structure 1063 * 1064 * This function implements the #vm_operations_struct close() callback for GEM 1065 * drivers. This must be used together with drm_gem_vm_open(). 1066 */ 1067 void drm_gem_vm_close(struct vm_area_struct *vma) 1068 { 1069 struct drm_gem_object *obj = vma->vm_private_data; 1070 1071 drm_gem_object_put(obj); 1072 } 1073 EXPORT_SYMBOL(drm_gem_vm_close); 1074 1075 /** 1076 * drm_gem_mmap_obj - memory map a GEM object 1077 * @obj: the GEM object to map 1078 * @obj_size: the object size to be mapped, in bytes 1079 * @vma: VMA for the area to be mapped 1080 * 1081 * Set up the VMA to prepare mapping of the GEM object using the GEM object's 1082 * vm_ops. Depending on their requirements, GEM objects can either 1083 * provide a fault handler in their vm_ops (in which case any accesses to 1084 * the object will be trapped, to perform migration, GTT binding, surface 1085 * register allocation, or performance monitoring), or mmap the buffer memory 1086 * synchronously after calling drm_gem_mmap_obj. 1087 * 1088 * This function is mainly intended to implement the DMABUF mmap operation, when 1089 * the GEM object is not looked up based on its fake offset. To implement the 1090 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 1091 * 1092 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while 1093 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 1094 * callers must verify access restrictions before calling this helper. 1095 * 1096 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 1097 * size, or if no vm_ops are provided. 1098 */ 1099 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 1100 struct vm_area_struct *vma) 1101 { 1102 int ret; 1103 1104 /* Check for valid size. */ 1105 if (obj_size < vma->vm_end - vma->vm_start) 1106 return -EINVAL; 1107 1108 /* Take a ref for this mapping of the object, so that the fault 1109 * handler can dereference the mmap offset's pointer to the object. 1110 * This reference is cleaned up by the corresponding vm_close 1111 * (which should happen whether the vma was created by this call, or 1112 * by a vm_open due to mremap or partial unmap or whatever). 1113 */ 1114 drm_gem_object_get(obj); 1115 1116 vma->vm_private_data = obj; 1117 vma->vm_ops = obj->funcs->vm_ops; 1118 1119 if (obj->funcs->mmap) { 1120 ret = obj->funcs->mmap(obj, vma); 1121 if (ret) 1122 goto err_drm_gem_object_put; 1123 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); 1124 } else { 1125 if (!vma->vm_ops) { 1126 ret = -EINVAL; 1127 goto err_drm_gem_object_put; 1128 } 1129 1130 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); 1131 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1132 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1133 } 1134 1135 return 0; 1136 1137 err_drm_gem_object_put: 1138 drm_gem_object_put(obj); 1139 return ret; 1140 } 1141 EXPORT_SYMBOL(drm_gem_mmap_obj); 1142 1143 /** 1144 * drm_gem_mmap - memory map routine for GEM objects 1145 * @filp: DRM file pointer 1146 * @vma: VMA for the area to be mapped 1147 * 1148 * If a driver supports GEM object mapping, mmap calls on the DRM file 1149 * descriptor will end up here. 1150 * 1151 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 1152 * contain the fake offset we created when the GTT map ioctl was called on 1153 * the object) and map it with a call to drm_gem_mmap_obj(). 1154 * 1155 * If the caller is not granted access to the buffer object, the mmap will fail 1156 * with EACCES. Please see the vma manager for more information. 1157 */ 1158 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 1159 { 1160 struct drm_file *priv = filp->private_data; 1161 struct drm_device *dev = priv->minor->dev; 1162 struct drm_gem_object *obj = NULL; 1163 struct drm_vma_offset_node *node; 1164 int ret; 1165 1166 if (drm_dev_is_unplugged(dev)) 1167 return -ENODEV; 1168 1169 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1170 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1171 vma->vm_pgoff, 1172 vma_pages(vma)); 1173 if (likely(node)) { 1174 obj = container_of(node, struct drm_gem_object, vma_node); 1175 /* 1176 * When the object is being freed, after it hits 0-refcnt it 1177 * proceeds to tear down the object. In the process it will 1178 * attempt to remove the VMA offset and so acquire this 1179 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 1180 * that matches our range, we know it is in the process of being 1181 * destroyed and will be freed as soon as we release the lock - 1182 * so we have to check for the 0-refcnted object and treat it as 1183 * invalid. 1184 */ 1185 if (!kref_get_unless_zero(&obj->refcount)) 1186 obj = NULL; 1187 } 1188 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1189 1190 if (!obj) 1191 return -EINVAL; 1192 1193 if (!drm_vma_node_is_allowed(node, priv)) { 1194 drm_gem_object_put(obj); 1195 return -EACCES; 1196 } 1197 1198 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, 1199 vma); 1200 1201 drm_gem_object_put(obj); 1202 1203 return ret; 1204 } 1205 EXPORT_SYMBOL(drm_gem_mmap); 1206 1207 void drm_gem_print_info(struct drm_printer *p, unsigned int indent, 1208 const struct drm_gem_object *obj) 1209 { 1210 drm_printf_indent(p, indent, "name=%d\n", obj->name); 1211 drm_printf_indent(p, indent, "refcount=%u\n", 1212 kref_read(&obj->refcount)); 1213 drm_printf_indent(p, indent, "start=%08lx\n", 1214 drm_vma_node_start(&obj->vma_node)); 1215 drm_printf_indent(p, indent, "size=%zu\n", obj->size); 1216 drm_printf_indent(p, indent, "imported=%s\n", 1217 str_yes_no(drm_gem_is_imported(obj))); 1218 1219 if (obj->funcs->print_info) 1220 obj->funcs->print_info(p, indent, obj); 1221 } 1222 1223 int drm_gem_pin_locked(struct drm_gem_object *obj) 1224 { 1225 if (obj->funcs->pin) 1226 return obj->funcs->pin(obj); 1227 1228 return 0; 1229 } 1230 1231 void drm_gem_unpin_locked(struct drm_gem_object *obj) 1232 { 1233 if (obj->funcs->unpin) 1234 obj->funcs->unpin(obj); 1235 } 1236 1237 int drm_gem_pin(struct drm_gem_object *obj) 1238 { 1239 int ret; 1240 1241 dma_resv_lock(obj->resv, NULL); 1242 ret = drm_gem_pin_locked(obj); 1243 dma_resv_unlock(obj->resv); 1244 1245 return ret; 1246 } 1247 1248 void drm_gem_unpin(struct drm_gem_object *obj) 1249 { 1250 dma_resv_lock(obj->resv, NULL); 1251 drm_gem_unpin_locked(obj); 1252 dma_resv_unlock(obj->resv); 1253 } 1254 1255 int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map) 1256 { 1257 int ret; 1258 1259 dma_resv_assert_held(obj->resv); 1260 1261 if (!obj->funcs->vmap) 1262 return -EOPNOTSUPP; 1263 1264 ret = obj->funcs->vmap(obj, map); 1265 if (ret) 1266 return ret; 1267 else if (iosys_map_is_null(map)) 1268 return -ENOMEM; 1269 1270 return 0; 1271 } 1272 EXPORT_SYMBOL(drm_gem_vmap_locked); 1273 1274 void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map) 1275 { 1276 dma_resv_assert_held(obj->resv); 1277 1278 if (iosys_map_is_null(map)) 1279 return; 1280 1281 if (obj->funcs->vunmap) 1282 obj->funcs->vunmap(obj, map); 1283 1284 /* Always set the mapping to NULL. Callers may rely on this. */ 1285 iosys_map_clear(map); 1286 } 1287 EXPORT_SYMBOL(drm_gem_vunmap_locked); 1288 1289 void drm_gem_lock(struct drm_gem_object *obj) 1290 { 1291 dma_resv_lock(obj->resv, NULL); 1292 } 1293 EXPORT_SYMBOL(drm_gem_lock); 1294 1295 void drm_gem_unlock(struct drm_gem_object *obj) 1296 { 1297 dma_resv_unlock(obj->resv); 1298 } 1299 EXPORT_SYMBOL(drm_gem_unlock); 1300 1301 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) 1302 { 1303 int ret; 1304 1305 dma_resv_lock(obj->resv, NULL); 1306 ret = drm_gem_vmap_locked(obj, map); 1307 dma_resv_unlock(obj->resv); 1308 1309 return ret; 1310 } 1311 EXPORT_SYMBOL(drm_gem_vmap); 1312 1313 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) 1314 { 1315 dma_resv_lock(obj->resv, NULL); 1316 drm_gem_vunmap_locked(obj, map); 1317 dma_resv_unlock(obj->resv); 1318 } 1319 EXPORT_SYMBOL(drm_gem_vunmap); 1320 1321 /** 1322 * drm_gem_lock_reservations - Sets up the ww context and acquires 1323 * the lock on an array of GEM objects. 1324 * 1325 * Once you've locked your reservations, you'll want to set up space 1326 * for your shared fences (if applicable), submit your job, then 1327 * drm_gem_unlock_reservations(). 1328 * 1329 * @objs: drm_gem_objects to lock 1330 * @count: Number of objects in @objs 1331 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as 1332 * part of tracking this set of locked reservations. 1333 */ 1334 int 1335 drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 1336 struct ww_acquire_ctx *acquire_ctx) 1337 { 1338 int contended = -1; 1339 int i, ret; 1340 1341 ww_acquire_init(acquire_ctx, &reservation_ww_class); 1342 1343 retry: 1344 if (contended != -1) { 1345 struct drm_gem_object *obj = objs[contended]; 1346 1347 ret = dma_resv_lock_slow_interruptible(obj->resv, 1348 acquire_ctx); 1349 if (ret) { 1350 ww_acquire_fini(acquire_ctx); 1351 return ret; 1352 } 1353 } 1354 1355 for (i = 0; i < count; i++) { 1356 if (i == contended) 1357 continue; 1358 1359 ret = dma_resv_lock_interruptible(objs[i]->resv, 1360 acquire_ctx); 1361 if (ret) { 1362 int j; 1363 1364 for (j = 0; j < i; j++) 1365 dma_resv_unlock(objs[j]->resv); 1366 1367 if (contended != -1 && contended >= i) 1368 dma_resv_unlock(objs[contended]->resv); 1369 1370 if (ret == -EDEADLK) { 1371 contended = i; 1372 goto retry; 1373 } 1374 1375 ww_acquire_fini(acquire_ctx); 1376 return ret; 1377 } 1378 } 1379 1380 ww_acquire_done(acquire_ctx); 1381 1382 return 0; 1383 } 1384 EXPORT_SYMBOL(drm_gem_lock_reservations); 1385 1386 void 1387 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, 1388 struct ww_acquire_ctx *acquire_ctx) 1389 { 1390 int i; 1391 1392 for (i = 0; i < count; i++) 1393 dma_resv_unlock(objs[i]->resv); 1394 1395 ww_acquire_fini(acquire_ctx); 1396 } 1397 EXPORT_SYMBOL(drm_gem_unlock_reservations); 1398 1399 /** 1400 * drm_gem_lru_init - initialize a LRU 1401 * 1402 * @lru: The LRU to initialize 1403 * @lock: The lock protecting the LRU 1404 */ 1405 void 1406 drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock) 1407 { 1408 lru->lock = lock; 1409 lru->count = 0; 1410 INIT_LIST_HEAD(&lru->list); 1411 } 1412 EXPORT_SYMBOL(drm_gem_lru_init); 1413 1414 static void 1415 drm_gem_lru_remove_locked(struct drm_gem_object *obj) 1416 { 1417 obj->lru->count -= obj->size >> PAGE_SHIFT; 1418 WARN_ON(obj->lru->count < 0); 1419 list_del(&obj->lru_node); 1420 obj->lru = NULL; 1421 } 1422 1423 /** 1424 * drm_gem_lru_remove - remove object from whatever LRU it is in 1425 * 1426 * If the object is currently in any LRU, remove it. 1427 * 1428 * @obj: The GEM object to remove from current LRU 1429 */ 1430 void 1431 drm_gem_lru_remove(struct drm_gem_object *obj) 1432 { 1433 struct drm_gem_lru *lru = obj->lru; 1434 1435 if (!lru) 1436 return; 1437 1438 mutex_lock(lru->lock); 1439 drm_gem_lru_remove_locked(obj); 1440 mutex_unlock(lru->lock); 1441 } 1442 EXPORT_SYMBOL(drm_gem_lru_remove); 1443 1444 /** 1445 * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU 1446 * 1447 * Like &drm_gem_lru_move_tail but lru lock must be held 1448 * 1449 * @lru: The LRU to move the object into. 1450 * @obj: The GEM object to move into this LRU 1451 */ 1452 void 1453 drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj) 1454 { 1455 lockdep_assert_held_once(lru->lock); 1456 1457 if (obj->lru) 1458 drm_gem_lru_remove_locked(obj); 1459 1460 lru->count += obj->size >> PAGE_SHIFT; 1461 list_add_tail(&obj->lru_node, &lru->list); 1462 obj->lru = lru; 1463 } 1464 EXPORT_SYMBOL(drm_gem_lru_move_tail_locked); 1465 1466 /** 1467 * drm_gem_lru_move_tail - move the object to the tail of the LRU 1468 * 1469 * If the object is already in this LRU it will be moved to the 1470 * tail. Otherwise it will be removed from whichever other LRU 1471 * it is in (if any) and moved into this LRU. 1472 * 1473 * @lru: The LRU to move the object into. 1474 * @obj: The GEM object to move into this LRU 1475 */ 1476 void 1477 drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj) 1478 { 1479 mutex_lock(lru->lock); 1480 drm_gem_lru_move_tail_locked(lru, obj); 1481 mutex_unlock(lru->lock); 1482 } 1483 EXPORT_SYMBOL(drm_gem_lru_move_tail); 1484 1485 /** 1486 * drm_gem_lru_scan - helper to implement shrinker.scan_objects 1487 * 1488 * If the shrink callback succeeds, it is expected that the driver 1489 * move the object out of this LRU. 1490 * 1491 * If the LRU possibly contain active buffers, it is the responsibility 1492 * of the shrink callback to check for this (ie. dma_resv_test_signaled()) 1493 * or if necessary block until the buffer becomes idle. 1494 * 1495 * @lru: The LRU to scan 1496 * @nr_to_scan: The number of pages to try to reclaim 1497 * @remaining: The number of pages left to reclaim, should be initialized by caller 1498 * @shrink: Callback to try to shrink/reclaim the object. 1499 */ 1500 unsigned long 1501 drm_gem_lru_scan(struct drm_gem_lru *lru, 1502 unsigned int nr_to_scan, 1503 unsigned long *remaining, 1504 bool (*shrink)(struct drm_gem_object *obj)) 1505 { 1506 struct drm_gem_lru still_in_lru; 1507 struct drm_gem_object *obj; 1508 unsigned freed = 0; 1509 1510 drm_gem_lru_init(&still_in_lru, lru->lock); 1511 1512 mutex_lock(lru->lock); 1513 1514 while (freed < nr_to_scan) { 1515 obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node); 1516 1517 if (!obj) 1518 break; 1519 1520 drm_gem_lru_move_tail_locked(&still_in_lru, obj); 1521 1522 /* 1523 * If it's in the process of being freed, gem_object->free() 1524 * may be blocked on lock waiting to remove it. So just 1525 * skip it. 1526 */ 1527 if (!kref_get_unless_zero(&obj->refcount)) 1528 continue; 1529 1530 /* 1531 * Now that we own a reference, we can drop the lock for the 1532 * rest of the loop body, to reduce contention with other 1533 * code paths that need the LRU lock 1534 */ 1535 mutex_unlock(lru->lock); 1536 1537 /* 1538 * Note that this still needs to be trylock, since we can 1539 * hit shrinker in response to trying to get backing pages 1540 * for this obj (ie. while it's lock is already held) 1541 */ 1542 if (!dma_resv_trylock(obj->resv)) { 1543 *remaining += obj->size >> PAGE_SHIFT; 1544 goto tail; 1545 } 1546 1547 if (shrink(obj)) { 1548 freed += obj->size >> PAGE_SHIFT; 1549 1550 /* 1551 * If we succeeded in releasing the object's backing 1552 * pages, we expect the driver to have moved the object 1553 * out of this LRU 1554 */ 1555 WARN_ON(obj->lru == &still_in_lru); 1556 WARN_ON(obj->lru == lru); 1557 } 1558 1559 dma_resv_unlock(obj->resv); 1560 1561 tail: 1562 drm_gem_object_put(obj); 1563 mutex_lock(lru->lock); 1564 } 1565 1566 /* 1567 * Move objects we've skipped over out of the temporary still_in_lru 1568 * back into this LRU 1569 */ 1570 list_for_each_entry (obj, &still_in_lru.list, lru_node) 1571 obj->lru = lru; 1572 list_splice_tail(&still_in_lru.list, &lru->list); 1573 lru->count += still_in_lru.count; 1574 1575 mutex_unlock(lru->lock); 1576 1577 return freed; 1578 } 1579 EXPORT_SYMBOL(drm_gem_lru_scan); 1580 1581 /** 1582 * drm_gem_evict_locked - helper to evict backing pages for a GEM object 1583 * @obj: obj in question 1584 */ 1585 int drm_gem_evict_locked(struct drm_gem_object *obj) 1586 { 1587 dma_resv_assert_held(obj->resv); 1588 1589 if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)) 1590 return -EBUSY; 1591 1592 if (obj->funcs->evict) 1593 return obj->funcs->evict(obj); 1594 1595 return 0; 1596 } 1597 EXPORT_SYMBOL(drm_gem_evict_locked); 1598