1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/dma-buf.h> 29 #include <linux/export.h> 30 #include <linux/file.h> 31 #include <linux/fs.h> 32 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 33 #include <linux/fs_context.h> 34 #endif 35 #include <linux/iosys-map.h> 36 #include <linux/mem_encrypt.h> 37 #include <linux/mm.h> 38 #include <linux/mman.h> 39 #include <linux/module.h> 40 #include <linux/pagemap.h> 41 #include <linux/pagevec.h> 42 #include <linux/sched/mm.h> 43 #include <linux/shmem_fs.h> 44 #include <linux/slab.h> 45 #include <linux/string_helpers.h> 46 #include <linux/types.h> 47 #include <linux/uaccess.h> 48 49 #include <drm/drm.h> 50 #include <drm/drm_device.h> 51 #include <drm/drm_drv.h> 52 #include <drm/drm_file.h> 53 #include <drm/drm_gem.h> 54 #include <drm/drm_managed.h> 55 #include <drm/drm_print.h> 56 #include <drm/drm_vma_manager.h> 57 58 #include "drm_internal.h" 59 60 /** @file drm_gem.c 61 * 62 * This file provides some of the base ioctls and library routines for 63 * the graphics memory manager implemented by each device driver. 64 * 65 * Because various devices have different requirements in terms of 66 * synchronization and migration strategies, implementing that is left up to 67 * the driver, and all that the general API provides should be generic -- 68 * allocating objects, reading/writing data with the cpu, freeing objects. 69 * Even there, platform-dependent optimizations for reading/writing data with 70 * the CPU mean we'll likely hook those out to driver-specific calls. However, 71 * the DRI2 implementation wants to have at least allocate/mmap be generic. 72 * 73 * The goal was to have swap-backed object allocation managed through 74 * struct file. However, file descriptors as handles to a struct file have 75 * two major failings: 76 * - Process limits prevent more than 1024 or so being used at a time by 77 * default. 78 * - Inability to allocate high fds will aggravate the X Server's select() 79 * handling, and likely that of many GL client applications as well. 80 * 81 * This led to a plan of using our own integer IDs (called handles, following 82 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 83 * ioctls. The objects themselves will still include the struct file so 84 * that we can transition to fds if the required kernel infrastructure shows 85 * up at a later date, and as our interface with shmfs for memory allocation. 86 */ 87 88 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 89 static void drm_gem_huge_mnt_free(struct drm_device *dev, void *data) 90 { 91 kern_unmount(dev->huge_mnt); 92 } 93 94 /** 95 * drm_gem_huge_mnt_create - Create, mount and use a huge tmpfs mountpoint 96 * @dev: DRM device that will use the huge tmpfs mountpoint 97 * @value: huge tmpfs mount option value 98 * 99 * This function creates and mounts a dedicated huge tmpfs mountpoint for the 100 * lifetime of the DRM device @dev which is used at GEM object initialization 101 * with drm_gem_object_init(). 102 * 103 * The most common option for @value is "within_size" which only allocates huge 104 * pages if the page will be fully within the GEM object size. "always", 105 * "advise" and "never" are supported too but the latter would just create a 106 * mountpoint similar to the default one (`shm_mnt`). See shmemfs and 107 * Transparent Hugepage for more information. 108 * 109 * Returns: 110 * 0 on success or a negative error code on failure. 111 */ 112 int drm_gem_huge_mnt_create(struct drm_device *dev, const char *value) 113 { 114 struct file_system_type *type; 115 struct fs_context *fc; 116 int ret; 117 118 if (unlikely(drm_gem_get_huge_mnt(dev))) 119 return 0; 120 121 type = get_fs_type("tmpfs"); 122 if (unlikely(!type)) 123 return -EOPNOTSUPP; 124 fc = fs_context_for_mount(type, SB_KERNMOUNT); 125 if (IS_ERR(fc)) 126 return PTR_ERR(fc); 127 ret = vfs_parse_fs_string(fc, "source", "tmpfs"); 128 if (unlikely(ret)) 129 return -ENOPARAM; 130 ret = vfs_parse_fs_string(fc, "huge", value); 131 if (unlikely(ret)) 132 return -ENOPARAM; 133 134 dev->huge_mnt = fc_mount_longterm(fc); 135 put_fs_context(fc); 136 137 return drmm_add_action_or_reset(dev, drm_gem_huge_mnt_free, NULL); 138 } 139 EXPORT_SYMBOL_GPL(drm_gem_huge_mnt_create); 140 #endif 141 142 static void 143 drm_gem_init_release(struct drm_device *dev, void *ptr) 144 { 145 drm_vma_offset_manager_destroy(dev->vma_offset_manager); 146 } 147 148 /** 149 * drm_gem_init - Initialize the GEM device fields 150 * @dev: drm_devic structure to initialize 151 */ 152 int 153 drm_gem_init(struct drm_device *dev) 154 { 155 struct drm_vma_offset_manager *vma_offset_manager; 156 157 mutex_init(&dev->object_name_lock); 158 idr_init_base(&dev->object_name_idr, 1); 159 160 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), 161 GFP_KERNEL); 162 if (!vma_offset_manager) 163 return -ENOMEM; 164 165 dev->vma_offset_manager = vma_offset_manager; 166 drm_vma_offset_manager_init(vma_offset_manager, 167 DRM_FILE_PAGE_OFFSET_START, 168 DRM_FILE_PAGE_OFFSET_SIZE); 169 170 return drmm_add_action(dev, drm_gem_init_release, NULL); 171 } 172 173 /** 174 * drm_gem_object_init - initialize an allocated shmem-backed GEM object 175 * 176 * @dev: drm_device the object should be initialized for 177 * @obj: drm_gem_object to initialize 178 * @size: object size 179 * 180 * Initialize an already allocated GEM object of the specified size with 181 * shmfs backing store. A huge mountpoint can be used by calling 182 * drm_gem_huge_mnt_create() beforehand. 183 */ 184 int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, 185 size_t size) 186 { 187 struct vfsmount *huge_mnt; 188 struct file *filp; 189 const vma_flags_t flags = mk_vma_flags(VMA_NORESERVE_BIT); 190 191 drm_gem_private_object_init(dev, obj, size); 192 193 huge_mnt = drm_gem_get_huge_mnt(dev); 194 if (huge_mnt) 195 filp = shmem_file_setup_with_mnt(huge_mnt, "drm mm object", 196 size, flags); 197 else 198 filp = shmem_file_setup("drm mm object", size, flags); 199 200 if (IS_ERR(filp)) 201 return PTR_ERR(filp); 202 203 obj->filp = filp; 204 205 return 0; 206 } 207 EXPORT_SYMBOL(drm_gem_object_init); 208 209 /** 210 * drm_gem_private_object_init - initialize an allocated private GEM object 211 * @dev: drm_device the object should be initialized for 212 * @obj: drm_gem_object to initialize 213 * @size: object size 214 * 215 * Initialize an already allocated GEM object of the specified size with 216 * no GEM provided backing store. Instead the caller is responsible for 217 * backing the object and handling it. 218 */ 219 void drm_gem_private_object_init(struct drm_device *dev, 220 struct drm_gem_object *obj, size_t size) 221 { 222 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 223 224 obj->dev = dev; 225 obj->filp = NULL; 226 227 kref_init(&obj->refcount); 228 obj->handle_count = 0; 229 obj->size = size; 230 mutex_init(&obj->gpuva.lock); 231 dma_resv_init(&obj->_resv); 232 if (!obj->resv) 233 obj->resv = &obj->_resv; 234 235 if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA)) 236 drm_gem_gpuva_init(obj); 237 238 drm_vma_node_reset(&obj->vma_node); 239 INIT_LIST_HEAD(&obj->lru_node); 240 } 241 EXPORT_SYMBOL(drm_gem_private_object_init); 242 243 /** 244 * drm_gem_private_object_fini - Finalize a failed drm_gem_object 245 * @obj: drm_gem_object 246 * 247 * Uninitialize an already allocated GEM object when it initialized failed 248 */ 249 void drm_gem_private_object_fini(struct drm_gem_object *obj) 250 { 251 WARN_ON(obj->dma_buf); 252 253 dma_resv_fini(&obj->_resv); 254 mutex_destroy(&obj->gpuva.lock); 255 } 256 EXPORT_SYMBOL(drm_gem_private_object_fini); 257 258 static void drm_gem_object_handle_get(struct drm_gem_object *obj) 259 { 260 struct drm_device *dev = obj->dev; 261 262 drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock)); 263 264 if (obj->handle_count++ == 0) 265 drm_gem_object_get(obj); 266 } 267 268 /** 269 * drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any 270 * @obj: GEM object 271 * 272 * Acquires a reference on the GEM buffer object's handle. Required to keep 273 * the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked() 274 * to release the reference. Does nothing if the buffer object has no handle. 275 * 276 * Returns: 277 * True if a handle exists, or false otherwise 278 */ 279 bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj) 280 { 281 struct drm_device *dev = obj->dev; 282 283 guard(mutex)(&dev->object_name_lock); 284 285 /* 286 * First ref taken during GEM object creation, if any. Some 287 * drivers set up internal framebuffers with GEM objects that 288 * do not have a GEM handle. Hence, this counter can be zero. 289 */ 290 if (!obj->handle_count) 291 return false; 292 293 drm_gem_object_handle_get(obj); 294 295 return true; 296 } 297 298 /** 299 * drm_gem_object_handle_free - release resources bound to userspace handles 300 * @obj: GEM object to clean up. 301 * 302 * Called after the last handle to the object has been closed 303 * 304 * Removes any name for the object. Note that this must be 305 * called before drm_gem_object_free or we'll be touching 306 * freed memory 307 */ 308 static void drm_gem_object_handle_free(struct drm_gem_object *obj) 309 { 310 struct drm_device *dev = obj->dev; 311 312 /* Remove any name for this object */ 313 if (obj->name) { 314 idr_remove(&dev->object_name_idr, obj->name); 315 obj->name = 0; 316 } 317 } 318 319 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) 320 { 321 /* Unbreak the reference cycle if we have an exported dma_buf. */ 322 if (obj->dma_buf) { 323 dma_buf_put(obj->dma_buf); 324 obj->dma_buf = NULL; 325 } 326 } 327 328 /** 329 * drm_gem_object_handle_put_unlocked - releases reference on user-space handle 330 * @obj: GEM object 331 * 332 * Releases a reference on the GEM buffer object's handle. Possibly releases 333 * the GEM buffer object and associated dma-buf objects. 334 */ 335 void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) 336 { 337 struct drm_device *dev = obj->dev; 338 bool final = false; 339 340 if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0)) 341 return; 342 343 /* 344 * Must bump handle count first as this may be the last 345 * ref, in which case the object would disappear before 346 * we checked for a name. 347 */ 348 349 mutex_lock(&dev->object_name_lock); 350 if (--obj->handle_count == 0) { 351 drm_gem_object_handle_free(obj); 352 drm_gem_object_exported_dma_buf_free(obj); 353 final = true; 354 } 355 mutex_unlock(&dev->object_name_lock); 356 357 if (final) 358 drm_gem_object_put(obj); 359 } 360 361 /* 362 * Called at device or object close to release the file's 363 * handle references on objects. 364 */ 365 static int 366 drm_gem_object_release_handle(int id, void *ptr, void *data) 367 { 368 struct drm_file *file_priv = data; 369 struct drm_gem_object *obj = ptr; 370 371 if (drm_WARN_ON(obj->dev, !data)) 372 return 0; 373 374 if (obj->funcs->close) 375 obj->funcs->close(obj, file_priv); 376 377 mutex_lock(&file_priv->prime.lock); 378 379 drm_prime_remove_buf_handle(&file_priv->prime, id); 380 381 mutex_unlock(&file_priv->prime.lock); 382 383 drm_vma_node_revoke(&obj->vma_node, file_priv); 384 385 drm_gem_object_handle_put_unlocked(obj); 386 387 return 0; 388 } 389 390 /** 391 * drm_gem_handle_delete - deletes the given file-private handle 392 * @filp: drm file-private structure to use for the handle look up 393 * @handle: userspace handle to delete 394 * 395 * Removes the GEM handle from the @filp lookup table which has been added with 396 * drm_gem_handle_create(). If this is the last handle also cleans up linked 397 * resources like GEM names. 398 */ 399 int 400 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 401 { 402 struct drm_gem_object *obj; 403 404 spin_lock(&filp->table_lock); 405 406 /* Check if we currently have a reference on the object */ 407 obj = idr_replace(&filp->object_idr, NULL, handle); 408 spin_unlock(&filp->table_lock); 409 if (IS_ERR_OR_NULL(obj)) 410 return -EINVAL; 411 412 /* Release driver's reference and decrement refcount. */ 413 drm_gem_object_release_handle(handle, obj, filp); 414 415 /* And finally make the handle available for future allocations. */ 416 spin_lock(&filp->table_lock); 417 idr_remove(&filp->object_idr, handle); 418 spin_unlock(&filp->table_lock); 419 420 return 0; 421 } 422 EXPORT_SYMBOL(drm_gem_handle_delete); 423 424 /** 425 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object 426 * @file: drm file-private structure containing the gem object 427 * @dev: corresponding drm_device 428 * @handle: gem object handle 429 * @offset: return location for the fake mmap offset 430 * 431 * This implements the &drm_driver.dumb_map_offset kms driver callback for 432 * drivers which use gem to manage their backing storage. 433 * 434 * Returns: 435 * 0 on success or a negative error code on failure. 436 */ 437 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 438 u32 handle, u64 *offset) 439 { 440 struct drm_gem_object *obj; 441 int ret; 442 443 obj = drm_gem_object_lookup(file, handle); 444 if (!obj) 445 return -ENOENT; 446 447 /* Don't allow imported objects to be mapped */ 448 if (drm_gem_is_imported(obj)) { 449 ret = -EINVAL; 450 goto out; 451 } 452 453 ret = drm_gem_create_mmap_offset(obj); 454 if (ret) 455 goto out; 456 457 *offset = drm_vma_node_offset_addr(&obj->vma_node); 458 out: 459 drm_gem_object_put(obj); 460 461 return ret; 462 } 463 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); 464 465 /** 466 * drm_gem_handle_create_tail - internal functions to create a handle 467 * @file_priv: drm file-private structure to register the handle for 468 * @obj: object to register 469 * @handlep: pointer to return the created handle to the caller 470 * 471 * This expects the &drm_device.object_name_lock to be held already and will 472 * drop it before returning. Used to avoid races in establishing new handles 473 * when importing an object from either an flink name or a dma-buf. 474 * 475 * Handles must be release again through drm_gem_handle_delete(). This is done 476 * when userspace closes @file_priv for all attached handles, or through the 477 * GEM_CLOSE ioctl for individual handles. 478 */ 479 int 480 drm_gem_handle_create_tail(struct drm_file *file_priv, 481 struct drm_gem_object *obj, 482 u32 *handlep) 483 { 484 struct drm_device *dev = obj->dev; 485 u32 handle; 486 int ret; 487 488 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 489 490 drm_gem_object_handle_get(obj); 491 492 /* 493 * Get the user-visible handle using idr. Preload and perform 494 * allocation under our spinlock. 495 */ 496 idr_preload(GFP_KERNEL); 497 spin_lock(&file_priv->table_lock); 498 499 ret = idr_alloc(&file_priv->object_idr, NULL, 1, 0, GFP_NOWAIT); 500 501 spin_unlock(&file_priv->table_lock); 502 idr_preload_end(); 503 504 mutex_unlock(&dev->object_name_lock); 505 if (ret < 0) 506 goto err_unref; 507 508 handle = ret; 509 510 ret = drm_vma_node_allow(&obj->vma_node, file_priv); 511 if (ret) 512 goto err_remove; 513 514 if (obj->funcs->open) { 515 ret = obj->funcs->open(obj, file_priv); 516 if (ret) 517 goto err_revoke; 518 } 519 520 /* mirrors drm_gem_handle_delete to avoid races */ 521 spin_lock(&file_priv->table_lock); 522 obj = idr_replace(&file_priv->object_idr, obj, handle); 523 WARN_ON(obj != NULL); 524 spin_unlock(&file_priv->table_lock); 525 *handlep = handle; 526 return 0; 527 528 err_revoke: 529 drm_vma_node_revoke(&obj->vma_node, file_priv); 530 err_remove: 531 spin_lock(&file_priv->table_lock); 532 idr_remove(&file_priv->object_idr, handle); 533 spin_unlock(&file_priv->table_lock); 534 err_unref: 535 drm_gem_object_handle_put_unlocked(obj); 536 return ret; 537 } 538 539 /** 540 * drm_gem_handle_create - create a gem handle for an object 541 * @file_priv: drm file-private structure to register the handle for 542 * @obj: object to register 543 * @handlep: pointer to return the created handle to the caller 544 * 545 * Create a handle for this object. This adds a handle reference to the object, 546 * which includes a regular reference count. Callers will likely want to 547 * dereference the object afterwards. 548 * 549 * Since this publishes @obj to userspace it must be fully set up by this point, 550 * drivers must call this last in their buffer object creation callbacks. 551 */ 552 int drm_gem_handle_create(struct drm_file *file_priv, 553 struct drm_gem_object *obj, 554 u32 *handlep) 555 { 556 mutex_lock(&obj->dev->object_name_lock); 557 558 return drm_gem_handle_create_tail(file_priv, obj, handlep); 559 } 560 EXPORT_SYMBOL(drm_gem_handle_create); 561 562 563 /** 564 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 565 * @obj: obj in question 566 * 567 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 568 * 569 * Note that drm_gem_object_release() already calls this function, so drivers 570 * don't have to take care of releasing the mmap offset themselves when freeing 571 * the GEM object. 572 */ 573 void 574 drm_gem_free_mmap_offset(struct drm_gem_object *obj) 575 { 576 struct drm_device *dev = obj->dev; 577 578 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); 579 } 580 EXPORT_SYMBOL(drm_gem_free_mmap_offset); 581 582 /** 583 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 584 * @obj: obj in question 585 * @size: the virtual size 586 * 587 * GEM memory mapping works by handing back to userspace a fake mmap offset 588 * it can use in a subsequent mmap(2) call. The DRM core code then looks 589 * up the object based on the offset and sets up the various memory mapping 590 * structures. 591 * 592 * This routine allocates and attaches a fake offset for @obj, in cases where 593 * the virtual size differs from the physical size (ie. &drm_gem_object.size). 594 * Otherwise just use drm_gem_create_mmap_offset(). 595 * 596 * This function is idempotent and handles an already allocated mmap offset 597 * transparently. Drivers do not need to check for this case. 598 */ 599 int 600 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 601 { 602 struct drm_device *dev = obj->dev; 603 604 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, 605 size / PAGE_SIZE); 606 } 607 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 608 609 /** 610 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 611 * @obj: obj in question 612 * 613 * GEM memory mapping works by handing back to userspace a fake mmap offset 614 * it can use in a subsequent mmap(2) call. The DRM core code then looks 615 * up the object based on the offset and sets up the various memory mapping 616 * structures. 617 * 618 * This routine allocates and attaches a fake offset for @obj. 619 * 620 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release 621 * the fake offset again. 622 */ 623 int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 624 { 625 return drm_gem_create_mmap_offset_size(obj, obj->size); 626 } 627 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 628 629 /* 630 * Move folios to appropriate lru and release the folios, decrementing the 631 * ref count of those folios. 632 */ 633 static void drm_gem_check_release_batch(struct folio_batch *fbatch) 634 { 635 check_move_unevictable_folios(fbatch); 636 __folio_batch_release(fbatch); 637 cond_resched(); 638 } 639 640 /** 641 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 642 * from shmem 643 * @obj: obj in question 644 * 645 * This reads the page-array of the shmem-backing storage of the given gem 646 * object. An array of pages is returned. If a page is not allocated or 647 * swapped-out, this will allocate/swap-in the required pages. Note that the 648 * whole object is covered by the page-array and pinned in memory. 649 * 650 * Use drm_gem_put_pages() to release the array and unpin all pages. 651 * 652 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). 653 * If you require other GFP-masks, you have to do those allocations yourself. 654 * 655 * Note that you are not allowed to change gfp-zones during runtime. That is, 656 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as 657 * set during initialization. If you have special zone constraints, set them 658 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care 659 * to keep pages in the required zone during swap-in. 660 * 661 * This function is only valid on objects initialized with 662 * drm_gem_object_init(), but not for those initialized with 663 * drm_gem_private_object_init() only. 664 */ 665 struct page **drm_gem_get_pages(struct drm_gem_object *obj) 666 { 667 struct address_space *mapping; 668 struct page **pages; 669 struct folio *folio; 670 struct folio_batch fbatch; 671 unsigned long i, j, npages; 672 673 if (WARN_ON(!obj->filp)) 674 return ERR_PTR(-EINVAL); 675 676 /* This is the shared memory object that backs the GEM resource */ 677 mapping = obj->filp->f_mapping; 678 679 /* We already BUG_ON() for non-page-aligned sizes in 680 * drm_gem_object_init(), so we should never hit this unless 681 * driver author is doing something really wrong: 682 */ 683 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 684 685 npages = obj->size >> PAGE_SHIFT; 686 687 pages = kvmalloc_objs(struct page *, npages); 688 if (pages == NULL) 689 return ERR_PTR(-ENOMEM); 690 691 mapping_set_unevictable(mapping); 692 693 i = 0; 694 while (i < npages) { 695 unsigned long nr; 696 folio = shmem_read_folio_gfp(mapping, i, 697 mapping_gfp_mask(mapping)); 698 if (IS_ERR(folio)) 699 goto fail; 700 nr = min(npages - i, folio_nr_pages(folio)); 701 for (j = 0; j < nr; j++, i++) 702 pages[i] = folio_file_page(folio, i); 703 704 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the 705 * correct region during swapin. Note that this requires 706 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 707 * so shmem can relocate pages during swapin if required. 708 */ 709 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && 710 (folio_pfn(folio) >= 0x00100000UL)); 711 } 712 713 return pages; 714 715 fail: 716 mapping_clear_unevictable(mapping); 717 folio_batch_init(&fbatch); 718 j = 0; 719 while (j < i) { 720 struct folio *f = page_folio(pages[j]); 721 if (!folio_batch_add(&fbatch, f)) 722 drm_gem_check_release_batch(&fbatch); 723 j += folio_nr_pages(f); 724 } 725 if (fbatch.nr) 726 drm_gem_check_release_batch(&fbatch); 727 728 kvfree(pages); 729 return ERR_CAST(folio); 730 } 731 EXPORT_SYMBOL(drm_gem_get_pages); 732 733 /** 734 * drm_gem_put_pages - helper to free backing pages for a GEM object 735 * @obj: obj in question 736 * @pages: pages to free 737 * @dirty: if true, pages will be marked as dirty 738 * @accessed: if true, the pages will be marked as accessed 739 */ 740 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 741 bool dirty, bool accessed) 742 { 743 int i, npages; 744 struct address_space *mapping; 745 struct folio_batch fbatch; 746 747 mapping = file_inode(obj->filp)->i_mapping; 748 mapping_clear_unevictable(mapping); 749 750 /* We already BUG_ON() for non-page-aligned sizes in 751 * drm_gem_object_init(), so we should never hit this unless 752 * driver author is doing something really wrong: 753 */ 754 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 755 756 npages = obj->size >> PAGE_SHIFT; 757 758 folio_batch_init(&fbatch); 759 for (i = 0; i < npages; i++) { 760 struct folio *folio; 761 762 if (!pages[i]) 763 continue; 764 folio = page_folio(pages[i]); 765 766 if (dirty) 767 folio_mark_dirty(folio); 768 769 if (accessed) 770 folio_mark_accessed(folio); 771 772 /* Undo the reference we took when populating the table */ 773 if (!folio_batch_add(&fbatch, folio)) 774 drm_gem_check_release_batch(&fbatch); 775 i += folio_nr_pages(folio) - 1; 776 } 777 if (folio_batch_count(&fbatch)) 778 drm_gem_check_release_batch(&fbatch); 779 780 kvfree(pages); 781 } 782 EXPORT_SYMBOL(drm_gem_put_pages); 783 784 static int objects_lookup(struct drm_file *filp, u32 *handle, int count, 785 struct drm_gem_object **objs) 786 { 787 int i; 788 struct drm_gem_object *obj; 789 790 spin_lock(&filp->table_lock); 791 792 for (i = 0; i < count; i++) { 793 /* Check if we currently have a reference on the object */ 794 obj = idr_find(&filp->object_idr, handle[i]); 795 if (!obj) 796 goto err; 797 798 drm_gem_object_get(obj); 799 objs[i] = obj; 800 } 801 802 spin_unlock(&filp->table_lock); 803 return 0; 804 805 err: 806 spin_unlock(&filp->table_lock); 807 808 while (i--) 809 drm_gem_object_put(objs[i]); 810 811 return -ENOENT; 812 } 813 814 /** 815 * drm_gem_objects_lookup - look up GEM objects from an array of handles 816 * @filp: DRM file private date 817 * @bo_handles: user pointer to array of userspace handle 818 * @count: size of handle array 819 * @objs_out: returned pointer to array of drm_gem_object pointers 820 * 821 * Takes an array of userspace handles and returns a newly allocated array of 822 * GEM objects. 823 * 824 * For a single handle lookup, use drm_gem_object_lookup(). 825 * 826 * Returns: 827 * @objs filled in with GEM object pointers. Returned GEM objects need to be 828 * released with drm_gem_object_put(). -ENOENT is returned on a lookup 829 * failure. 0 is returned on success. 830 * 831 */ 832 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 833 int count, struct drm_gem_object ***objs_out) 834 { 835 struct drm_gem_object **objs; 836 u32 *handles; 837 int ret; 838 839 *objs_out = NULL; 840 841 if (!count) 842 return 0; 843 844 objs = kvmalloc_objs(*objs, count); 845 if (!objs) 846 return -ENOMEM; 847 848 handles = vmemdup_array_user(bo_handles, count, sizeof(u32)); 849 if (IS_ERR(handles)) { 850 ret = PTR_ERR(handles); 851 goto err_free_objs; 852 } 853 854 ret = objects_lookup(filp, handles, count, objs); 855 if (ret) 856 goto err_free_handles; 857 858 kvfree(handles); 859 *objs_out = objs; 860 return 0; 861 862 err_free_handles: 863 kvfree(handles); 864 err_free_objs: 865 kvfree(objs); 866 return ret; 867 } 868 EXPORT_SYMBOL(drm_gem_objects_lookup); 869 870 /** 871 * drm_gem_object_lookup - look up a GEM object from its handle 872 * @filp: DRM file private date 873 * @handle: userspace handle 874 * 875 * If looking up an array of handles, use drm_gem_objects_lookup(). 876 * 877 * Returns: 878 * A reference to the object named by the handle if such exists on @filp, NULL 879 * otherwise. 880 */ 881 struct drm_gem_object * 882 drm_gem_object_lookup(struct drm_file *filp, u32 handle) 883 { 884 struct drm_gem_object *obj = NULL; 885 886 objects_lookup(filp, &handle, 1, &obj); 887 return obj; 888 } 889 EXPORT_SYMBOL(drm_gem_object_lookup); 890 891 /** 892 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects 893 * shared and/or exclusive fences. 894 * @filep: DRM file private date 895 * @handle: userspace handle 896 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 897 * @timeout: timeout value in jiffies or zero to return immediately 898 * 899 * Returns: 900 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 901 * greater than 0 on success. 902 */ 903 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 904 bool wait_all, unsigned long timeout) 905 { 906 struct drm_device *dev = filep->minor->dev; 907 struct drm_gem_object *obj; 908 long ret; 909 910 obj = drm_gem_object_lookup(filep, handle); 911 if (!obj) { 912 drm_dbg_core(dev, "Failed to look up GEM BO %d\n", handle); 913 return -EINVAL; 914 } 915 916 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), 917 true, timeout); 918 if (ret == 0) 919 ret = -ETIME; 920 else if (ret > 0) 921 ret = 0; 922 923 drm_gem_object_put(obj); 924 925 return ret; 926 } 927 EXPORT_SYMBOL(drm_gem_dma_resv_wait); 928 929 int 930 drm_gem_close_ioctl(struct drm_device *dev, void *data, 931 struct drm_file *file_priv) 932 { 933 struct drm_gem_close *args = data; 934 int ret; 935 936 if (!drm_core_check_feature(dev, DRIVER_GEM)) 937 return -EOPNOTSUPP; 938 939 ret = drm_gem_handle_delete(file_priv, args->handle); 940 941 return ret; 942 } 943 944 int 945 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 946 struct drm_file *file_priv) 947 { 948 struct drm_gem_flink *args = data; 949 struct drm_gem_object *obj; 950 int ret; 951 952 if (!drm_core_check_feature(dev, DRIVER_GEM)) 953 return -EOPNOTSUPP; 954 955 obj = drm_gem_object_lookup(file_priv, args->handle); 956 if (obj == NULL) 957 return -ENOENT; 958 959 mutex_lock(&dev->object_name_lock); 960 /* prevent races with concurrent gem_close. */ 961 if (obj->handle_count == 0) { 962 ret = -ENOENT; 963 goto err; 964 } 965 966 if (!obj->name) { 967 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); 968 if (ret < 0) 969 goto err; 970 971 obj->name = ret; 972 } 973 974 args->name = (uint64_t) obj->name; 975 ret = 0; 976 977 err: 978 mutex_unlock(&dev->object_name_lock); 979 drm_gem_object_put(obj); 980 return ret; 981 } 982 983 int 984 drm_gem_open_ioctl(struct drm_device *dev, void *data, 985 struct drm_file *file_priv) 986 { 987 struct drm_gem_open *args = data; 988 struct drm_gem_object *obj; 989 int ret; 990 u32 handle; 991 992 if (!drm_core_check_feature(dev, DRIVER_GEM)) 993 return -EOPNOTSUPP; 994 995 mutex_lock(&dev->object_name_lock); 996 obj = idr_find(&dev->object_name_idr, (int) args->name); 997 if (obj) { 998 drm_gem_object_get(obj); 999 } else { 1000 mutex_unlock(&dev->object_name_lock); 1001 return -ENOENT; 1002 } 1003 1004 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 1005 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 1006 if (ret) 1007 goto err; 1008 1009 args->handle = handle; 1010 args->size = obj->size; 1011 1012 err: 1013 drm_gem_object_put(obj); 1014 return ret; 1015 } 1016 1017 int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data, 1018 struct drm_file *file_priv) 1019 { 1020 struct drm_gem_change_handle *args = data; 1021 struct drm_gem_object *obj; 1022 int handle, ret; 1023 1024 if (!drm_core_check_feature(dev, DRIVER_GEM)) 1025 return -EOPNOTSUPP; 1026 1027 /* idr_alloc() limitation. */ 1028 if (args->new_handle > INT_MAX) 1029 return -EINVAL; 1030 handle = args->new_handle; 1031 1032 obj = drm_gem_object_lookup(file_priv, args->handle); 1033 if (!obj) 1034 return -ENOENT; 1035 1036 if (args->handle == handle) { 1037 ret = 0; 1038 goto out; 1039 } 1040 1041 mutex_lock(&file_priv->prime.lock); 1042 1043 spin_lock(&file_priv->table_lock); 1044 ret = idr_alloc(&file_priv->object_idr, obj, handle, handle + 1, 1045 GFP_NOWAIT); 1046 spin_unlock(&file_priv->table_lock); 1047 1048 if (ret < 0) 1049 goto out_unlock; 1050 1051 if (obj->dma_buf) { 1052 ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf, 1053 handle); 1054 if (ret < 0) { 1055 spin_lock(&file_priv->table_lock); 1056 idr_remove(&file_priv->object_idr, handle); 1057 spin_unlock(&file_priv->table_lock); 1058 goto out_unlock; 1059 } 1060 1061 drm_prime_remove_buf_handle(&file_priv->prime, args->handle); 1062 } 1063 1064 ret = 0; 1065 1066 spin_lock(&file_priv->table_lock); 1067 idr_remove(&file_priv->object_idr, args->handle); 1068 spin_unlock(&file_priv->table_lock); 1069 1070 out_unlock: 1071 mutex_unlock(&file_priv->prime.lock); 1072 out: 1073 drm_gem_object_put(obj); 1074 1075 return ret; 1076 } 1077 1078 /** 1079 * drm_gem_open - initializes GEM file-private structures at devnode open time 1080 * @dev: drm_device which is being opened by userspace 1081 * @file_private: drm file-private structure to set up 1082 * 1083 * Called at device open time, sets up the structure for handling refcounting 1084 * of mm objects. 1085 */ 1086 void 1087 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 1088 { 1089 idr_init_base(&file_private->object_idr, 1); 1090 spin_lock_init(&file_private->table_lock); 1091 } 1092 1093 /** 1094 * drm_gem_release - release file-private GEM resources 1095 * @dev: drm_device which is being closed by userspace 1096 * @file_private: drm file-private structure to clean up 1097 * 1098 * Called at close time when the filp is going away. 1099 * 1100 * Releases any remaining references on objects by this filp. 1101 */ 1102 void 1103 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 1104 { 1105 idr_for_each(&file_private->object_idr, 1106 &drm_gem_object_release_handle, file_private); 1107 idr_destroy(&file_private->object_idr); 1108 } 1109 1110 /** 1111 * drm_gem_object_release - release GEM buffer object resources 1112 * @obj: GEM buffer object 1113 * 1114 * This releases any structures and resources used by @obj and is the inverse of 1115 * drm_gem_object_init(). 1116 */ 1117 void 1118 drm_gem_object_release(struct drm_gem_object *obj) 1119 { 1120 if (obj->filp) 1121 fput(obj->filp); 1122 1123 drm_gem_private_object_fini(obj); 1124 1125 drm_gem_free_mmap_offset(obj); 1126 drm_gem_lru_remove(obj); 1127 } 1128 EXPORT_SYMBOL(drm_gem_object_release); 1129 1130 /** 1131 * drm_gem_object_free - free a GEM object 1132 * @kref: kref of the object to free 1133 * 1134 * Called after the last reference to the object has been lost. 1135 * 1136 * Frees the object 1137 */ 1138 void 1139 drm_gem_object_free(struct kref *kref) 1140 { 1141 struct drm_gem_object *obj = 1142 container_of(kref, struct drm_gem_object, refcount); 1143 1144 if (WARN_ON(!obj->funcs->free)) 1145 return; 1146 1147 obj->funcs->free(obj); 1148 } 1149 EXPORT_SYMBOL(drm_gem_object_free); 1150 1151 /** 1152 * drm_gem_vm_open - vma->ops->open implementation for GEM 1153 * @vma: VM area structure 1154 * 1155 * This function implements the #vm_operations_struct open() callback for GEM 1156 * drivers. This must be used together with drm_gem_vm_close(). 1157 */ 1158 void drm_gem_vm_open(struct vm_area_struct *vma) 1159 { 1160 struct drm_gem_object *obj = vma->vm_private_data; 1161 1162 drm_gem_object_get(obj); 1163 } 1164 EXPORT_SYMBOL(drm_gem_vm_open); 1165 1166 /** 1167 * drm_gem_vm_close - vma->ops->close implementation for GEM 1168 * @vma: VM area structure 1169 * 1170 * This function implements the #vm_operations_struct close() callback for GEM 1171 * drivers. This must be used together with drm_gem_vm_open(). 1172 */ 1173 void drm_gem_vm_close(struct vm_area_struct *vma) 1174 { 1175 struct drm_gem_object *obj = vma->vm_private_data; 1176 1177 drm_gem_object_put(obj); 1178 } 1179 EXPORT_SYMBOL(drm_gem_vm_close); 1180 1181 /** 1182 * drm_gem_mmap_obj - memory map a GEM object 1183 * @obj: the GEM object to map 1184 * @obj_size: the object size to be mapped, in bytes 1185 * @vma: VMA for the area to be mapped 1186 * 1187 * Set up the VMA to prepare mapping of the GEM object using the GEM object's 1188 * vm_ops. Depending on their requirements, GEM objects can either 1189 * provide a fault handler in their vm_ops (in which case any accesses to 1190 * the object will be trapped, to perform migration, GTT binding, surface 1191 * register allocation, or performance monitoring), or mmap the buffer memory 1192 * synchronously after calling drm_gem_mmap_obj. 1193 * 1194 * This function is mainly intended to implement the DMABUF mmap operation, when 1195 * the GEM object is not looked up based on its fake offset. To implement the 1196 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 1197 * 1198 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while 1199 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 1200 * callers must verify access restrictions before calling this helper. 1201 * 1202 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 1203 * size, or if no vm_ops are provided. 1204 */ 1205 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 1206 struct vm_area_struct *vma) 1207 { 1208 int ret; 1209 1210 /* Check for valid size. */ 1211 if (obj_size < vma->vm_end - vma->vm_start) 1212 return -EINVAL; 1213 1214 /* Take a ref for this mapping of the object, so that the fault 1215 * handler can dereference the mmap offset's pointer to the object. 1216 * This reference is cleaned up by the corresponding vm_close 1217 * (which should happen whether the vma was created by this call, or 1218 * by a vm_open due to mremap or partial unmap or whatever). 1219 */ 1220 drm_gem_object_get(obj); 1221 1222 vma->vm_private_data = obj; 1223 vma->vm_ops = obj->funcs->vm_ops; 1224 1225 if (obj->funcs->mmap) { 1226 ret = obj->funcs->mmap(obj, vma); 1227 if (ret) 1228 goto err_drm_gem_object_put; 1229 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); 1230 } else { 1231 if (!vma->vm_ops) { 1232 ret = -EINVAL; 1233 goto err_drm_gem_object_put; 1234 } 1235 1236 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); 1237 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1238 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1239 } 1240 1241 return 0; 1242 1243 err_drm_gem_object_put: 1244 drm_gem_object_put(obj); 1245 return ret; 1246 } 1247 EXPORT_SYMBOL(drm_gem_mmap_obj); 1248 1249 /* 1250 * Look up a GEM object in offset space based on the exact start address. The 1251 * caller must be granted access to the object. Returns a GEM object on success 1252 * or a negative error code on failure. The returned GEM object needs to be 1253 * released with drm_gem_object_put(). 1254 */ 1255 static struct drm_gem_object * 1256 drm_gem_object_lookup_at_offset(struct file *filp, unsigned long start, 1257 unsigned long pages) 1258 { 1259 struct drm_file *priv = filp->private_data; 1260 struct drm_device *dev = priv->minor->dev; 1261 struct drm_gem_object *obj = NULL; 1262 struct drm_vma_offset_node *node; 1263 1264 if (drm_dev_is_unplugged(dev)) 1265 return ERR_PTR(-ENODEV); 1266 1267 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1268 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1269 start, pages); 1270 if (likely(node)) { 1271 obj = container_of(node, struct drm_gem_object, vma_node); 1272 /* 1273 * When the object is being freed, after it hits 0-refcnt it 1274 * proceeds to tear down the object. In the process it will 1275 * attempt to remove the VMA offset and so acquire this 1276 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 1277 * that matches our range, we know it is in the process of being 1278 * destroyed and will be freed as soon as we release the lock - 1279 * so we have to check for the 0-refcnted object and treat it as 1280 * invalid. 1281 */ 1282 if (!kref_get_unless_zero(&obj->refcount)) 1283 obj = NULL; 1284 } 1285 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1286 1287 if (!obj) 1288 return ERR_PTR(-EINVAL); 1289 1290 if (!drm_vma_node_is_allowed(node, priv)) { 1291 drm_gem_object_put(obj); 1292 return ERR_PTR(-EACCES); 1293 } 1294 1295 return obj; 1296 } 1297 1298 #ifdef CONFIG_MMU 1299 /** 1300 * drm_gem_get_unmapped_area - get memory mapping region routine for GEM objects 1301 * @filp: DRM file pointer 1302 * @uaddr: User address hint 1303 * @len: Mapping length 1304 * @pgoff: Offset (in pages) 1305 * @flags: Mapping flags 1306 * 1307 * If a driver supports GEM object mapping, before ending up in drm_gem_mmap(), 1308 * mmap calls on the DRM file descriptor will first try to find a free linear 1309 * address space large enough for a mapping. Since GEM objects are backed by 1310 * shmem buffers, this should preferably be handled by the shmem virtual memory 1311 * filesystem which can appropriately align addresses to huge page sizes when 1312 * needed. 1313 * 1314 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 1315 * contain the fake offset we created) and call shmem_get_unmapped_area() with 1316 * the right file pointer. 1317 * 1318 * If a GEM object is not available at the given offset or if the caller is not 1319 * granted access to it, fall back to mm_get_unmapped_area(). 1320 */ 1321 unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr, 1322 unsigned long len, unsigned long pgoff, 1323 unsigned long flags) 1324 { 1325 struct drm_gem_object *obj; 1326 unsigned long ret; 1327 1328 obj = drm_gem_object_lookup_at_offset(filp, pgoff, len >> PAGE_SHIFT); 1329 if (IS_ERR(obj)) 1330 obj = NULL; 1331 1332 if (!obj || !obj->filp || !obj->filp->f_op->get_unmapped_area) 1333 ret = mm_get_unmapped_area(filp, uaddr, len, 0, flags); 1334 else 1335 ret = obj->filp->f_op->get_unmapped_area(obj->filp, uaddr, len, 0, flags); 1336 1337 drm_gem_object_put(obj); 1338 1339 return ret; 1340 } 1341 EXPORT_SYMBOL_GPL(drm_gem_get_unmapped_area); 1342 #endif 1343 1344 /** 1345 * drm_gem_mmap - memory map routine for GEM objects 1346 * @filp: DRM file pointer 1347 * @vma: VMA for the area to be mapped 1348 * 1349 * If a driver supports GEM object mapping, mmap calls on the DRM file 1350 * descriptor will end up here. 1351 * 1352 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 1353 * contain the fake offset we created) and map it with a call to 1354 * drm_gem_mmap_obj(). 1355 * 1356 * If the caller is not granted access to the buffer object, the mmap will fail 1357 * with EACCES. Please see the vma manager for more information. 1358 */ 1359 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 1360 { 1361 struct drm_gem_object *obj; 1362 int ret; 1363 1364 obj = drm_gem_object_lookup_at_offset(filp, vma->vm_pgoff, 1365 vma_pages(vma)); 1366 if (IS_ERR(obj)) 1367 return PTR_ERR(obj); 1368 1369 ret = drm_gem_mmap_obj(obj, 1370 drm_vma_node_size(&obj->vma_node) << PAGE_SHIFT, 1371 vma); 1372 1373 drm_gem_object_put(obj); 1374 1375 return ret; 1376 } 1377 EXPORT_SYMBOL(drm_gem_mmap); 1378 1379 void drm_gem_print_info(struct drm_printer *p, unsigned int indent, 1380 const struct drm_gem_object *obj) 1381 { 1382 drm_printf_indent(p, indent, "name=%d\n", obj->name); 1383 drm_printf_indent(p, indent, "refcount=%u\n", 1384 kref_read(&obj->refcount)); 1385 drm_printf_indent(p, indent, "start=%08lx\n", 1386 drm_vma_node_start(&obj->vma_node)); 1387 drm_printf_indent(p, indent, "size=%zu\n", obj->size); 1388 drm_printf_indent(p, indent, "imported=%s\n", 1389 str_yes_no(drm_gem_is_imported(obj))); 1390 1391 if (obj->funcs->print_info) 1392 obj->funcs->print_info(p, indent, obj); 1393 } 1394 1395 int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map) 1396 { 1397 int ret; 1398 1399 dma_resv_assert_held(obj->resv); 1400 1401 if (!obj->funcs->vmap) 1402 return -EOPNOTSUPP; 1403 1404 ret = obj->funcs->vmap(obj, map); 1405 if (ret) 1406 return ret; 1407 else if (iosys_map_is_null(map)) 1408 return -ENOMEM; 1409 1410 return 0; 1411 } 1412 EXPORT_SYMBOL(drm_gem_vmap_locked); 1413 1414 void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map) 1415 { 1416 dma_resv_assert_held(obj->resv); 1417 1418 if (iosys_map_is_null(map)) 1419 return; 1420 1421 if (obj->funcs->vunmap) 1422 obj->funcs->vunmap(obj, map); 1423 1424 /* Always set the mapping to NULL. Callers may rely on this. */ 1425 iosys_map_clear(map); 1426 } 1427 EXPORT_SYMBOL(drm_gem_vunmap_locked); 1428 1429 void drm_gem_lock(struct drm_gem_object *obj) 1430 { 1431 dma_resv_lock(obj->resv, NULL); 1432 } 1433 EXPORT_SYMBOL(drm_gem_lock); 1434 1435 void drm_gem_unlock(struct drm_gem_object *obj) 1436 { 1437 dma_resv_unlock(obj->resv); 1438 } 1439 EXPORT_SYMBOL(drm_gem_unlock); 1440 1441 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) 1442 { 1443 int ret; 1444 1445 dma_resv_lock(obj->resv, NULL); 1446 ret = drm_gem_vmap_locked(obj, map); 1447 dma_resv_unlock(obj->resv); 1448 1449 return ret; 1450 } 1451 EXPORT_SYMBOL(drm_gem_vmap); 1452 1453 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) 1454 { 1455 dma_resv_lock(obj->resv, NULL); 1456 drm_gem_vunmap_locked(obj, map); 1457 dma_resv_unlock(obj->resv); 1458 } 1459 EXPORT_SYMBOL(drm_gem_vunmap); 1460 1461 /** 1462 * drm_gem_lock_reservations - Sets up the ww context and acquires 1463 * the lock on an array of GEM objects. 1464 * 1465 * Once you've locked your reservations, you'll want to set up space 1466 * for your shared fences (if applicable), submit your job, then 1467 * drm_gem_unlock_reservations(). 1468 * 1469 * @objs: drm_gem_objects to lock 1470 * @count: Number of objects in @objs 1471 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as 1472 * part of tracking this set of locked reservations. 1473 */ 1474 int 1475 drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 1476 struct ww_acquire_ctx *acquire_ctx) 1477 { 1478 int contended = -1; 1479 int i, ret; 1480 1481 ww_acquire_init(acquire_ctx, &reservation_ww_class); 1482 1483 retry: 1484 if (contended != -1) { 1485 struct drm_gem_object *obj = objs[contended]; 1486 1487 ret = dma_resv_lock_slow_interruptible(obj->resv, 1488 acquire_ctx); 1489 if (ret) { 1490 ww_acquire_fini(acquire_ctx); 1491 return ret; 1492 } 1493 } 1494 1495 for (i = 0; i < count; i++) { 1496 if (i == contended) 1497 continue; 1498 1499 ret = dma_resv_lock_interruptible(objs[i]->resv, 1500 acquire_ctx); 1501 if (ret) { 1502 int j; 1503 1504 for (j = 0; j < i; j++) 1505 dma_resv_unlock(objs[j]->resv); 1506 1507 if (contended != -1 && contended >= i) 1508 dma_resv_unlock(objs[contended]->resv); 1509 1510 if (ret == -EDEADLK) { 1511 contended = i; 1512 goto retry; 1513 } 1514 1515 ww_acquire_fini(acquire_ctx); 1516 return ret; 1517 } 1518 } 1519 1520 ww_acquire_done(acquire_ctx); 1521 1522 return 0; 1523 } 1524 EXPORT_SYMBOL(drm_gem_lock_reservations); 1525 1526 void 1527 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, 1528 struct ww_acquire_ctx *acquire_ctx) 1529 { 1530 int i; 1531 1532 for (i = 0; i < count; i++) 1533 dma_resv_unlock(objs[i]->resv); 1534 1535 ww_acquire_fini(acquire_ctx); 1536 } 1537 EXPORT_SYMBOL(drm_gem_unlock_reservations); 1538 1539 /** 1540 * drm_gem_lru_init - initialize a LRU 1541 * 1542 * @lru: The LRU to initialize 1543 * @lock: The lock protecting the LRU 1544 */ 1545 void 1546 drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock) 1547 { 1548 lru->lock = lock; 1549 lru->count = 0; 1550 INIT_LIST_HEAD(&lru->list); 1551 } 1552 EXPORT_SYMBOL(drm_gem_lru_init); 1553 1554 static void 1555 drm_gem_lru_remove_locked(struct drm_gem_object *obj) 1556 { 1557 obj->lru->count -= obj->size >> PAGE_SHIFT; 1558 WARN_ON(obj->lru->count < 0); 1559 list_del(&obj->lru_node); 1560 obj->lru = NULL; 1561 } 1562 1563 /** 1564 * drm_gem_lru_remove - remove object from whatever LRU it is in 1565 * 1566 * If the object is currently in any LRU, remove it. 1567 * 1568 * @obj: The GEM object to remove from current LRU 1569 */ 1570 void 1571 drm_gem_lru_remove(struct drm_gem_object *obj) 1572 { 1573 struct drm_gem_lru *lru = obj->lru; 1574 1575 if (!lru) 1576 return; 1577 1578 mutex_lock(lru->lock); 1579 drm_gem_lru_remove_locked(obj); 1580 mutex_unlock(lru->lock); 1581 } 1582 EXPORT_SYMBOL(drm_gem_lru_remove); 1583 1584 /** 1585 * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU 1586 * 1587 * Like &drm_gem_lru_move_tail but lru lock must be held 1588 * 1589 * @lru: The LRU to move the object into. 1590 * @obj: The GEM object to move into this LRU 1591 */ 1592 void 1593 drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj) 1594 { 1595 lockdep_assert_held_once(lru->lock); 1596 1597 if (obj->lru) 1598 drm_gem_lru_remove_locked(obj); 1599 1600 lru->count += obj->size >> PAGE_SHIFT; 1601 list_add_tail(&obj->lru_node, &lru->list); 1602 obj->lru = lru; 1603 } 1604 EXPORT_SYMBOL(drm_gem_lru_move_tail_locked); 1605 1606 /** 1607 * drm_gem_lru_move_tail - move the object to the tail of the LRU 1608 * 1609 * If the object is already in this LRU it will be moved to the 1610 * tail. Otherwise it will be removed from whichever other LRU 1611 * it is in (if any) and moved into this LRU. 1612 * 1613 * @lru: The LRU to move the object into. 1614 * @obj: The GEM object to move into this LRU 1615 */ 1616 void 1617 drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj) 1618 { 1619 mutex_lock(lru->lock); 1620 drm_gem_lru_move_tail_locked(lru, obj); 1621 mutex_unlock(lru->lock); 1622 } 1623 EXPORT_SYMBOL(drm_gem_lru_move_tail); 1624 1625 /** 1626 * drm_gem_lru_scan - helper to implement shrinker.scan_objects 1627 * 1628 * If the shrink callback succeeds, it is expected that the driver 1629 * move the object out of this LRU. 1630 * 1631 * If the LRU possibly contain active buffers, it is the responsibility 1632 * of the shrink callback to check for this (ie. dma_resv_test_signaled()) 1633 * or if necessary block until the buffer becomes idle. 1634 * 1635 * @lru: The LRU to scan 1636 * @nr_to_scan: The number of pages to try to reclaim 1637 * @remaining: The number of pages left to reclaim, should be initialized by caller 1638 * @shrink: Callback to try to shrink/reclaim the object. 1639 * @ticket: Optional ww_acquire_ctx context to use for locking 1640 */ 1641 unsigned long 1642 drm_gem_lru_scan(struct drm_gem_lru *lru, 1643 unsigned int nr_to_scan, 1644 unsigned long *remaining, 1645 bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket), 1646 struct ww_acquire_ctx *ticket) 1647 { 1648 struct drm_gem_lru still_in_lru; 1649 struct drm_gem_object *obj; 1650 unsigned freed = 0; 1651 1652 drm_gem_lru_init(&still_in_lru, lru->lock); 1653 1654 mutex_lock(lru->lock); 1655 1656 while (freed < nr_to_scan) { 1657 obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node); 1658 1659 if (!obj) 1660 break; 1661 1662 drm_gem_lru_move_tail_locked(&still_in_lru, obj); 1663 1664 /* 1665 * If it's in the process of being freed, gem_object->free() 1666 * may be blocked on lock waiting to remove it. So just 1667 * skip it. 1668 */ 1669 if (!kref_get_unless_zero(&obj->refcount)) 1670 continue; 1671 1672 /* 1673 * Now that we own a reference, we can drop the lock for the 1674 * rest of the loop body, to reduce contention with other 1675 * code paths that need the LRU lock 1676 */ 1677 mutex_unlock(lru->lock); 1678 1679 if (ticket) 1680 ww_acquire_init(ticket, &reservation_ww_class); 1681 1682 /* 1683 * Note that this still needs to be trylock, since we can 1684 * hit shrinker in response to trying to get backing pages 1685 * for this obj (ie. while it's lock is already held) 1686 */ 1687 if (!ww_mutex_trylock(&obj->resv->lock, ticket)) { 1688 *remaining += obj->size >> PAGE_SHIFT; 1689 goto tail; 1690 } 1691 1692 if (shrink(obj, ticket)) { 1693 freed += obj->size >> PAGE_SHIFT; 1694 1695 /* 1696 * If we succeeded in releasing the object's backing 1697 * pages, we expect the driver to have moved the object 1698 * out of this LRU 1699 */ 1700 WARN_ON(obj->lru == &still_in_lru); 1701 WARN_ON(obj->lru == lru); 1702 } 1703 1704 dma_resv_unlock(obj->resv); 1705 1706 if (ticket) 1707 ww_acquire_fini(ticket); 1708 1709 tail: 1710 drm_gem_object_put(obj); 1711 mutex_lock(lru->lock); 1712 } 1713 1714 /* 1715 * Move objects we've skipped over out of the temporary still_in_lru 1716 * back into this LRU 1717 */ 1718 list_for_each_entry (obj, &still_in_lru.list, lru_node) 1719 obj->lru = lru; 1720 list_splice_tail(&still_in_lru.list, &lru->list); 1721 lru->count += still_in_lru.count; 1722 1723 mutex_unlock(lru->lock); 1724 1725 return freed; 1726 } 1727 EXPORT_SYMBOL(drm_gem_lru_scan); 1728 1729 /** 1730 * drm_gem_evict_locked - helper to evict backing pages for a GEM object 1731 * @obj: obj in question 1732 */ 1733 int drm_gem_evict_locked(struct drm_gem_object *obj) 1734 { 1735 dma_resv_assert_held(obj->resv); 1736 1737 if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)) 1738 return -EBUSY; 1739 1740 if (obj->funcs->evict) 1741 return obj->funcs->evict(obj); 1742 1743 return 0; 1744 } 1745 EXPORT_SYMBOL(drm_gem_evict_locked); 1746