1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2009-2022 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 /* 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 * 31 * While no substantial code is shared, the prime code is inspired by 32 * drm_prime.c, with 33 * Authors: 34 * Dave Airlie <airlied@redhat.com> 35 * Rob Clark <rob.clark@linaro.org> 36 */ 37 /** @file ttm_ref_object.c 38 * 39 * Base- and reference object implementation for the various 40 * ttm objects. Implements reference counting, minimal security checks 41 * and release on file close. 42 */ 43 44 45 #define pr_fmt(fmt) "[TTM] " fmt 46 47 #include "ttm_object.h" 48 #include "vmwgfx_drv.h" 49 50 #include <linux/list.h> 51 #include <linux/spinlock.h> 52 #include <linux/slab.h> 53 #include <linux/atomic.h> 54 #include <linux/module.h> 55 #include <linux/hashtable.h> 56 57 MODULE_IMPORT_NS(DMA_BUF); 58 59 #define VMW_TTM_OBJECT_REF_HT_ORDER 10 60 61 /** 62 * struct ttm_object_file 63 * 64 * @tdev: Pointer to the ttm_object_device. 65 * 66 * @lock: Lock that protects the ref_list list and the 67 * ref_hash hash tables. 68 * 69 * @ref_list: List of ttm_ref_objects to be destroyed at 70 * file release. 71 * 72 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type, 73 * for fast lookup of ref objects given a base object. 74 * 75 * @refcount: reference/usage count 76 */ 77 struct ttm_object_file { 78 struct ttm_object_device *tdev; 79 spinlock_t lock; 80 struct list_head ref_list; 81 DECLARE_HASHTABLE(ref_hash, VMW_TTM_OBJECT_REF_HT_ORDER); 82 struct kref refcount; 83 }; 84 85 /* 86 * struct ttm_object_device 87 * 88 * @object_lock: lock that protects idr. 89 * 90 * @object_count: Per device object count. 91 * 92 * This is the per-device data structure needed for ttm object management. 93 */ 94 95 struct ttm_object_device { 96 spinlock_t object_lock; 97 atomic_t object_count; 98 struct dma_buf_ops ops; 99 void (*dmabuf_release)(struct dma_buf *dma_buf); 100 struct idr idr; 101 }; 102 103 /* 104 * struct ttm_ref_object 105 * 106 * @hash: Hash entry for the per-file object reference hash. 107 * 108 * @head: List entry for the per-file list of ref-objects. 109 * 110 * @kref: Ref count. 111 * 112 * @obj: Base object this ref object is referencing. 113 * 114 * @ref_type: Type of ref object. 115 * 116 * This is similar to an idr object, but it also has a hash table entry 117 * that allows lookup with a pointer to the referenced object as a key. In 118 * that way, one can easily detect whether a base object is referenced by 119 * a particular ttm_object_file. It also carries a ref count to avoid creating 120 * multiple ref objects if a ttm_object_file references the same base 121 * object more than once. 122 */ 123 124 struct ttm_ref_object { 125 struct rcu_head rcu_head; 126 struct vmwgfx_hash_item hash; 127 struct list_head head; 128 struct kref kref; 129 struct ttm_base_object *obj; 130 struct ttm_object_file *tfile; 131 }; 132 133 static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf); 134 135 static inline struct ttm_object_file * 136 ttm_object_file_ref(struct ttm_object_file *tfile) 137 { 138 kref_get(&tfile->refcount); 139 return tfile; 140 } 141 142 static int ttm_tfile_find_ref_rcu(struct ttm_object_file *tfile, 143 uint64_t key, 144 struct vmwgfx_hash_item **p_hash) 145 { 146 struct vmwgfx_hash_item *hash; 147 148 hash_for_each_possible_rcu(tfile->ref_hash, hash, head, key) { 149 if (hash->key == key) { 150 *p_hash = hash; 151 return 0; 152 } 153 } 154 return -EINVAL; 155 } 156 157 static int ttm_tfile_find_ref(struct ttm_object_file *tfile, 158 uint64_t key, 159 struct vmwgfx_hash_item **p_hash) 160 { 161 struct vmwgfx_hash_item *hash; 162 163 hash_for_each_possible(tfile->ref_hash, hash, head, key) { 164 if (hash->key == key) { 165 *p_hash = hash; 166 return 0; 167 } 168 } 169 return -EINVAL; 170 } 171 172 static void ttm_object_file_destroy(struct kref *kref) 173 { 174 struct ttm_object_file *tfile = 175 container_of(kref, struct ttm_object_file, refcount); 176 177 kfree(tfile); 178 } 179 180 181 static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile) 182 { 183 struct ttm_object_file *tfile = *p_tfile; 184 185 *p_tfile = NULL; 186 kref_put(&tfile->refcount, ttm_object_file_destroy); 187 } 188 189 190 int ttm_base_object_init(struct ttm_object_file *tfile, 191 struct ttm_base_object *base, 192 bool shareable, 193 enum ttm_object_type object_type, 194 void (*refcount_release) (struct ttm_base_object **)) 195 { 196 struct ttm_object_device *tdev = tfile->tdev; 197 int ret; 198 199 base->shareable = shareable; 200 base->tfile = ttm_object_file_ref(tfile); 201 base->refcount_release = refcount_release; 202 base->object_type = object_type; 203 kref_init(&base->refcount); 204 idr_preload(GFP_KERNEL); 205 spin_lock(&tdev->object_lock); 206 ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT); 207 spin_unlock(&tdev->object_lock); 208 idr_preload_end(); 209 if (ret < 0) 210 return ret; 211 212 base->handle = ret; 213 ret = ttm_ref_object_add(tfile, base, NULL, false); 214 if (unlikely(ret != 0)) 215 goto out_err1; 216 217 ttm_base_object_unref(&base); 218 219 return 0; 220 out_err1: 221 spin_lock(&tdev->object_lock); 222 idr_remove(&tdev->idr, base->handle); 223 spin_unlock(&tdev->object_lock); 224 return ret; 225 } 226 227 static void ttm_release_base(struct kref *kref) 228 { 229 struct ttm_base_object *base = 230 container_of(kref, struct ttm_base_object, refcount); 231 struct ttm_object_device *tdev = base->tfile->tdev; 232 233 spin_lock(&tdev->object_lock); 234 idr_remove(&tdev->idr, base->handle); 235 spin_unlock(&tdev->object_lock); 236 237 /* 238 * Note: We don't use synchronize_rcu() here because it's far 239 * too slow. It's up to the user to free the object using 240 * call_rcu() or ttm_base_object_kfree(). 241 */ 242 243 ttm_object_file_unref(&base->tfile); 244 if (base->refcount_release) 245 base->refcount_release(&base); 246 } 247 248 void ttm_base_object_unref(struct ttm_base_object **p_base) 249 { 250 struct ttm_base_object *base = *p_base; 251 252 *p_base = NULL; 253 254 kref_put(&base->refcount, ttm_release_base); 255 } 256 257 /** 258 * ttm_base_object_noref_lookup - look up a base object without reference 259 * @tfile: The struct ttm_object_file the object is registered with. 260 * @key: The object handle. 261 * 262 * This function looks up a ttm base object and returns a pointer to it 263 * without refcounting the pointer. The returned pointer is only valid 264 * until ttm_base_object_noref_release() is called, and the object 265 * pointed to by the returned pointer may be doomed. Any persistent usage 266 * of the object requires a refcount to be taken using kref_get_unless_zero(). 267 * Iff this function returns successfully it needs to be paired with 268 * ttm_base_object_noref_release() and no sleeping- or scheduling functions 269 * may be called inbetween these function callse. 270 * 271 * Return: A pointer to the object if successful or NULL otherwise. 272 */ 273 struct ttm_base_object * 274 ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key) 275 { 276 struct vmwgfx_hash_item *hash; 277 int ret; 278 279 rcu_read_lock(); 280 ret = ttm_tfile_find_ref_rcu(tfile, key, &hash); 281 if (ret) { 282 rcu_read_unlock(); 283 return NULL; 284 } 285 286 __release(RCU); 287 return hlist_entry(hash, struct ttm_ref_object, hash)->obj; 288 } 289 EXPORT_SYMBOL(ttm_base_object_noref_lookup); 290 291 struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, 292 uint64_t key) 293 { 294 struct ttm_base_object *base = NULL; 295 struct vmwgfx_hash_item *hash; 296 int ret; 297 298 rcu_read_lock(); 299 ret = ttm_tfile_find_ref_rcu(tfile, key, &hash); 300 301 if (likely(ret == 0)) { 302 base = hlist_entry(hash, struct ttm_ref_object, hash)->obj; 303 if (!kref_get_unless_zero(&base->refcount)) 304 base = NULL; 305 } 306 rcu_read_unlock(); 307 308 return base; 309 } 310 311 struct ttm_base_object * 312 ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key) 313 { 314 struct ttm_base_object *base; 315 316 rcu_read_lock(); 317 base = idr_find(&tdev->idr, key); 318 319 if (base && !kref_get_unless_zero(&base->refcount)) 320 base = NULL; 321 rcu_read_unlock(); 322 323 return base; 324 } 325 326 int ttm_ref_object_add(struct ttm_object_file *tfile, 327 struct ttm_base_object *base, 328 bool *existed, 329 bool require_existed) 330 { 331 struct ttm_ref_object *ref; 332 struct vmwgfx_hash_item *hash; 333 int ret = -EINVAL; 334 335 if (base->tfile != tfile && !base->shareable) 336 return -EPERM; 337 338 if (existed != NULL) 339 *existed = true; 340 341 while (ret == -EINVAL) { 342 rcu_read_lock(); 343 ret = ttm_tfile_find_ref_rcu(tfile, base->handle, &hash); 344 345 if (ret == 0) { 346 ref = hlist_entry(hash, struct ttm_ref_object, hash); 347 if (kref_get_unless_zero(&ref->kref)) { 348 rcu_read_unlock(); 349 break; 350 } 351 } 352 353 rcu_read_unlock(); 354 if (require_existed) 355 return -EPERM; 356 357 ref = kmalloc(sizeof(*ref), GFP_KERNEL); 358 if (unlikely(ref == NULL)) { 359 return -ENOMEM; 360 } 361 362 ref->hash.key = base->handle; 363 ref->obj = base; 364 ref->tfile = tfile; 365 kref_init(&ref->kref); 366 367 spin_lock(&tfile->lock); 368 hash_add_rcu(tfile->ref_hash, &ref->hash.head, ref->hash.key); 369 ret = 0; 370 371 list_add_tail(&ref->head, &tfile->ref_list); 372 kref_get(&base->refcount); 373 spin_unlock(&tfile->lock); 374 if (existed != NULL) 375 *existed = false; 376 } 377 378 return ret; 379 } 380 381 static void __releases(tfile->lock) __acquires(tfile->lock) 382 ttm_ref_object_release(struct kref *kref) 383 { 384 struct ttm_ref_object *ref = 385 container_of(kref, struct ttm_ref_object, kref); 386 struct ttm_object_file *tfile = ref->tfile; 387 388 hash_del_rcu(&ref->hash.head); 389 list_del(&ref->head); 390 spin_unlock(&tfile->lock); 391 392 ttm_base_object_unref(&ref->obj); 393 kfree_rcu(ref, rcu_head); 394 spin_lock(&tfile->lock); 395 } 396 397 int ttm_ref_object_base_unref(struct ttm_object_file *tfile, 398 unsigned long key) 399 { 400 struct ttm_ref_object *ref; 401 struct vmwgfx_hash_item *hash; 402 int ret; 403 404 spin_lock(&tfile->lock); 405 ret = ttm_tfile_find_ref(tfile, key, &hash); 406 if (unlikely(ret != 0)) { 407 spin_unlock(&tfile->lock); 408 return -EINVAL; 409 } 410 ref = hlist_entry(hash, struct ttm_ref_object, hash); 411 kref_put(&ref->kref, ttm_ref_object_release); 412 spin_unlock(&tfile->lock); 413 return 0; 414 } 415 416 void ttm_object_file_release(struct ttm_object_file **p_tfile) 417 { 418 struct ttm_ref_object *ref; 419 struct list_head *list; 420 struct ttm_object_file *tfile = *p_tfile; 421 422 *p_tfile = NULL; 423 spin_lock(&tfile->lock); 424 425 /* 426 * Since we release the lock within the loop, we have to 427 * restart it from the beginning each time. 428 */ 429 430 while (!list_empty(&tfile->ref_list)) { 431 list = tfile->ref_list.next; 432 ref = list_entry(list, struct ttm_ref_object, head); 433 ttm_ref_object_release(&ref->kref); 434 } 435 436 spin_unlock(&tfile->lock); 437 438 ttm_object_file_unref(&tfile); 439 } 440 441 struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev) 442 { 443 struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); 444 445 if (unlikely(tfile == NULL)) 446 return NULL; 447 448 spin_lock_init(&tfile->lock); 449 tfile->tdev = tdev; 450 kref_init(&tfile->refcount); 451 INIT_LIST_HEAD(&tfile->ref_list); 452 453 hash_init(tfile->ref_hash); 454 455 return tfile; 456 } 457 458 struct ttm_object_device * 459 ttm_object_device_init(const struct dma_buf_ops *ops) 460 { 461 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); 462 463 if (unlikely(tdev == NULL)) 464 return NULL; 465 466 spin_lock_init(&tdev->object_lock); 467 atomic_set(&tdev->object_count, 0); 468 469 /* 470 * Our base is at VMWGFX_NUM_MOB + 1 because we want to create 471 * a seperate namespace for GEM handles (which are 472 * 1..VMWGFX_NUM_MOB) and the surface handles. Some ioctl's 473 * can take either handle as an argument so we want to 474 * easily be able to tell whether the handle refers to a 475 * GEM buffer or a surface. 476 */ 477 idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1); 478 tdev->ops = *ops; 479 tdev->dmabuf_release = tdev->ops.release; 480 tdev->ops.release = ttm_prime_dmabuf_release; 481 return tdev; 482 } 483 484 void ttm_object_device_release(struct ttm_object_device **p_tdev) 485 { 486 struct ttm_object_device *tdev = *p_tdev; 487 488 *p_tdev = NULL; 489 490 WARN_ON_ONCE(!idr_is_empty(&tdev->idr)); 491 idr_destroy(&tdev->idr); 492 493 kfree(tdev); 494 } 495 496 /** 497 * get_dma_buf_unless_doomed - get a dma_buf reference if possible. 498 * 499 * @dmabuf: Non-refcounted pointer to a struct dma-buf. 500 * 501 * Obtain a file reference from a lookup structure that doesn't refcount 502 * the file, but synchronizes with its release method to make sure it has 503 * not been freed yet. See for example kref_get_unless_zero documentation. 504 * Returns true if refcounting succeeds, false otherwise. 505 * 506 * Nobody really wants this as a public API yet, so let it mature here 507 * for some time... 508 */ 509 static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf) 510 { 511 return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L; 512 } 513 514 /** 515 * ttm_prime_refcount_release - refcount release method for a prime object. 516 * 517 * @p_base: Pointer to ttm_base_object pointer. 518 * 519 * This is a wrapper that calls the refcount_release founction of the 520 * underlying object. At the same time it cleans up the prime object. 521 * This function is called when all references to the base object we 522 * derive from are gone. 523 */ 524 static void ttm_prime_refcount_release(struct ttm_base_object **p_base) 525 { 526 struct ttm_base_object *base = *p_base; 527 struct ttm_prime_object *prime; 528 529 *p_base = NULL; 530 prime = container_of(base, struct ttm_prime_object, base); 531 BUG_ON(prime->dma_buf != NULL); 532 mutex_destroy(&prime->mutex); 533 if (prime->refcount_release) 534 prime->refcount_release(&base); 535 } 536 537 /** 538 * ttm_prime_dmabuf_release - Release method for the dma-bufs we export 539 * 540 * @dma_buf: 541 * 542 * This function first calls the dma_buf release method the driver 543 * provides. Then it cleans up our dma_buf pointer used for lookup, 544 * and finally releases the reference the dma_buf has on our base 545 * object. 546 */ 547 static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf) 548 { 549 struct ttm_prime_object *prime = 550 (struct ttm_prime_object *) dma_buf->priv; 551 struct ttm_base_object *base = &prime->base; 552 struct ttm_object_device *tdev = base->tfile->tdev; 553 554 if (tdev->dmabuf_release) 555 tdev->dmabuf_release(dma_buf); 556 mutex_lock(&prime->mutex); 557 if (prime->dma_buf == dma_buf) 558 prime->dma_buf = NULL; 559 mutex_unlock(&prime->mutex); 560 ttm_base_object_unref(&base); 561 } 562 563 /** 564 * ttm_prime_fd_to_handle - Get a base object handle from a prime fd 565 * 566 * @tfile: A struct ttm_object_file identifying the caller. 567 * @fd: The prime / dmabuf fd. 568 * @handle: The returned handle. 569 * 570 * This function returns a handle to an object that previously exported 571 * a dma-buf. Note that we don't handle imports yet, because we simply 572 * have no consumers of that implementation. 573 */ 574 int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, 575 int fd, u32 *handle) 576 { 577 struct ttm_object_device *tdev = tfile->tdev; 578 struct dma_buf *dma_buf; 579 struct ttm_prime_object *prime; 580 struct ttm_base_object *base; 581 int ret; 582 583 dma_buf = dma_buf_get(fd); 584 if (IS_ERR(dma_buf)) 585 return PTR_ERR(dma_buf); 586 587 if (dma_buf->ops != &tdev->ops) 588 return -ENOSYS; 589 590 prime = (struct ttm_prime_object *) dma_buf->priv; 591 base = &prime->base; 592 *handle = base->handle; 593 ret = ttm_ref_object_add(tfile, base, NULL, false); 594 595 dma_buf_put(dma_buf); 596 597 return ret; 598 } 599 600 /** 601 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object 602 * 603 * @tfile: Struct ttm_object_file identifying the caller. 604 * @handle: Handle to the object we're exporting from. 605 * @flags: flags for dma-buf creation. We just pass them on. 606 * @prime_fd: The returned file descriptor. 607 * 608 */ 609 int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, 610 uint32_t handle, uint32_t flags, 611 int *prime_fd) 612 { 613 struct ttm_object_device *tdev = tfile->tdev; 614 struct ttm_base_object *base; 615 struct dma_buf *dma_buf; 616 struct ttm_prime_object *prime; 617 int ret; 618 619 base = ttm_base_object_lookup(tfile, handle); 620 if (unlikely(base == NULL || 621 base->object_type != ttm_prime_type)) { 622 ret = -ENOENT; 623 goto out_unref; 624 } 625 626 prime = container_of(base, struct ttm_prime_object, base); 627 if (unlikely(!base->shareable)) { 628 ret = -EPERM; 629 goto out_unref; 630 } 631 632 ret = mutex_lock_interruptible(&prime->mutex); 633 if (unlikely(ret != 0)) { 634 ret = -ERESTARTSYS; 635 goto out_unref; 636 } 637 638 dma_buf = prime->dma_buf; 639 if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) { 640 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 641 exp_info.ops = &tdev->ops; 642 exp_info.size = prime->size; 643 exp_info.flags = flags; 644 exp_info.priv = prime; 645 646 /* 647 * Need to create a new dma_buf 648 */ 649 650 dma_buf = dma_buf_export(&exp_info); 651 if (IS_ERR(dma_buf)) { 652 ret = PTR_ERR(dma_buf); 653 mutex_unlock(&prime->mutex); 654 goto out_unref; 655 } 656 657 /* 658 * dma_buf has taken the base object reference 659 */ 660 base = NULL; 661 prime->dma_buf = dma_buf; 662 } 663 mutex_unlock(&prime->mutex); 664 665 ret = dma_buf_fd(dma_buf, flags); 666 if (ret >= 0) { 667 *prime_fd = ret; 668 ret = 0; 669 } else 670 dma_buf_put(dma_buf); 671 672 out_unref: 673 if (base) 674 ttm_base_object_unref(&base); 675 return ret; 676 } 677 678 /** 679 * ttm_prime_object_init - Initialize a ttm_prime_object 680 * 681 * @tfile: struct ttm_object_file identifying the caller 682 * @size: The size of the dma_bufs we export. 683 * @prime: The object to be initialized. 684 * @shareable: See ttm_base_object_init 685 * @type: See ttm_base_object_init 686 * @refcount_release: See ttm_base_object_init 687 * 688 * Initializes an object which is compatible with the drm_prime model 689 * for data sharing between processes and devices. 690 */ 691 int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size, 692 struct ttm_prime_object *prime, bool shareable, 693 enum ttm_object_type type, 694 void (*refcount_release) (struct ttm_base_object **)) 695 { 696 mutex_init(&prime->mutex); 697 prime->size = PAGE_ALIGN(size); 698 prime->real_type = type; 699 prime->dma_buf = NULL; 700 prime->refcount_release = refcount_release; 701 return ttm_base_object_init(tfile, &prime->base, shareable, 702 ttm_prime_type, 703 ttm_prime_refcount_release); 704 } 705