1 /* 2 * Copyright (c) 2016, Mellanox Technologies inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <asm/atomic.h> 34 #include <linux/file.h> 35 #include <linux/lockdep.h> 36 #include <rdma/ib_verbs.h> 37 #include <rdma/uverbs_types.h> 38 #include <linux/rcupdate.h> 39 #include <rdma/uverbs_ioctl.h> 40 #include <rdma/rdma_user_ioctl.h> 41 #include "uverbs.h" 42 #include "core_priv.h" 43 #include "rdma_core.h" 44 45 static void uverbs_uobject_free(struct kref *ref) 46 { 47 kfree_rcu(container_of(ref, struct ib_uobject, ref), rcu); 48 } 49 50 /* 51 * In order to indicate we no longer needs this uobject, uverbs_uobject_put 52 * is called. When the reference count is decreased, the uobject is freed. 53 * For example, this is used when attaching a completion channel to a CQ. 54 */ 55 void uverbs_uobject_put(struct ib_uobject *uobject) 56 { 57 kref_put(&uobject->ref, uverbs_uobject_free); 58 } 59 EXPORT_SYMBOL(uverbs_uobject_put); 60 61 static int uverbs_try_lock_object(struct ib_uobject *uobj, 62 enum rdma_lookup_mode mode) 63 { 64 /* 65 * When a shared access is required, we use a positive counter. Each 66 * shared access request checks that the value != -1 and increment it. 67 * Exclusive access is required for operations like write or destroy. 68 * In exclusive access mode, we check that the counter is zero (nobody 69 * claimed this object) and we set it to -1. Releasing a shared access 70 * lock is done simply by decreasing the counter. As for exclusive 71 * access locks, since only a single one of them is allowed 72 * concurrently, setting the counter to zero is enough for releasing 73 * this lock. 74 */ 75 switch (mode) { 76 case UVERBS_LOOKUP_READ: 77 return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ? 78 -EBUSY : 0; 79 case UVERBS_LOOKUP_WRITE: 80 /* lock is exclusive */ 81 return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY; 82 case UVERBS_LOOKUP_DESTROY: 83 return 0; 84 } 85 return 0; 86 } 87 88 static void assert_uverbs_usecnt(struct ib_uobject *uobj, 89 enum rdma_lookup_mode mode) 90 { 91 #ifdef CONFIG_LOCKDEP 92 switch (mode) { 93 case UVERBS_LOOKUP_READ: 94 WARN_ON(atomic_read(&uobj->usecnt) <= 0); 95 break; 96 case UVERBS_LOOKUP_WRITE: 97 WARN_ON(atomic_read(&uobj->usecnt) != -1); 98 break; 99 case UVERBS_LOOKUP_DESTROY: 100 break; 101 } 102 #endif 103 } 104 105 /* 106 * This must be called with the hw_destroy_rwsem locked for read or write, 107 * also the uobject itself must be locked for write. 108 * 109 * Upon return the HW object is guaranteed to be destroyed. 110 * 111 * For RDMA_REMOVE_ABORT, the hw_destroy_rwsem is not required to be held, 112 * however the type's allocat_commit function cannot have been called and the 113 * uobject cannot be on the uobjects_lists 114 * 115 * For RDMA_REMOVE_DESTROY the caller shold be holding a kref (eg via 116 * rdma_lookup_get_uobject) and the object is left in a state where the caller 117 * needs to call rdma_lookup_put_uobject. 118 * 119 * For all other destroy modes this function internally unlocks the uobject 120 * and consumes the kref on the uobj. 121 */ 122 static int uverbs_destroy_uobject(struct ib_uobject *uobj, 123 enum rdma_remove_reason reason, 124 struct uverbs_attr_bundle *attrs) 125 { 126 struct ib_uverbs_file *ufile = attrs->ufile; 127 unsigned long flags; 128 int ret; 129 130 lockdep_assert_held(&ufile->hw_destroy_rwsem); 131 assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE); 132 133 if (reason == RDMA_REMOVE_ABORT) { 134 WARN_ON(!list_empty(&uobj->list)); 135 WARN_ON(!uobj->context); 136 uobj->uapi_object->type_class->alloc_abort(uobj); 137 } else if (uobj->object) { 138 ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason, 139 attrs); 140 if (ret) { 141 if (ib_is_destroy_retryable(ret, reason, uobj)) 142 return ret; 143 144 /* Nothing to be done, dangle the memory and move on */ 145 WARN(true, 146 "ib_uverbs: failed to remove uobject id %d, driver err=%d", 147 uobj->id, ret); 148 } 149 150 uobj->object = NULL; 151 } 152 153 uobj->context = NULL; 154 155 /* 156 * For DESTROY the usecnt is held write locked, the caller is expected 157 * to put it unlock and put the object when done with it. Only DESTROY 158 * can remove the IDR handle. 159 */ 160 if (reason != RDMA_REMOVE_DESTROY) 161 atomic_set(&uobj->usecnt, 0); 162 else 163 uobj->uapi_object->type_class->remove_handle(uobj); 164 165 if (!list_empty(&uobj->list)) { 166 spin_lock_irqsave(&ufile->uobjects_lock, flags); 167 list_del_init(&uobj->list); 168 spin_unlock_irqrestore(&ufile->uobjects_lock, flags); 169 170 /* 171 * Pairs with the get in rdma_alloc_commit_uobject(), could 172 * destroy uobj. 173 */ 174 uverbs_uobject_put(uobj); 175 } 176 177 /* 178 * When aborting the stack kref remains owned by the core code, and is 179 * not transferred into the type. Pairs with the get in alloc_uobj 180 */ 181 if (reason == RDMA_REMOVE_ABORT) 182 uverbs_uobject_put(uobj); 183 184 return 0; 185 } 186 187 /* 188 * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY 189 * sequence. It should only be used from command callbacks. On success the 190 * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This 191 * version requires the caller to have already obtained an 192 * LOOKUP_DESTROY uobject kref. 193 */ 194 int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs) 195 { 196 struct ib_uverbs_file *ufile = attrs->ufile; 197 int ret; 198 199 down_read(&ufile->hw_destroy_rwsem); 200 201 ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE); 202 if (ret) 203 goto out_unlock; 204 205 ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY, attrs); 206 if (ret) { 207 atomic_set(&uobj->usecnt, 0); 208 goto out_unlock; 209 } 210 211 out_unlock: 212 up_read(&ufile->hw_destroy_rwsem); 213 return ret; 214 } 215 216 /* 217 * uobj_get_destroy destroys the HW object and returns a handle to the uobj 218 * with a NULL object pointer. The caller must pair this with 219 * uverbs_put_destroy. 220 */ 221 struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj, 222 u32 id, struct uverbs_attr_bundle *attrs) 223 { 224 struct ib_uobject *uobj; 225 int ret; 226 227 uobj = rdma_lookup_get_uobject(obj, attrs->ufile, id, 228 UVERBS_LOOKUP_DESTROY, attrs); 229 if (IS_ERR(uobj)) 230 return uobj; 231 232 ret = uobj_destroy(uobj, attrs); 233 if (ret) { 234 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY); 235 return ERR_PTR(ret); 236 } 237 238 return uobj; 239 } 240 241 /* 242 * Does both uobj_get_destroy() and uobj_put_destroy(). Returns 0 on success 243 * (negative errno on failure). For use by callers that do not need the uobj. 244 */ 245 int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id, 246 struct uverbs_attr_bundle *attrs) 247 { 248 struct ib_uobject *uobj; 249 250 uobj = __uobj_get_destroy(obj, id, attrs); 251 if (IS_ERR(uobj)) 252 return PTR_ERR(uobj); 253 254 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); 255 return 0; 256 } 257 258 /* alloc_uobj must be undone by uverbs_destroy_uobject() */ 259 static struct ib_uobject *alloc_uobj(struct uverbs_attr_bundle *attrs, 260 const struct uverbs_api_object *obj) 261 { 262 struct ib_uverbs_file *ufile = attrs->ufile; 263 struct ib_uobject *uobj; 264 265 if (!attrs->context) { 266 struct ib_ucontext *ucontext = 267 ib_uverbs_get_ucontext_file(ufile); 268 269 if (IS_ERR(ucontext)) 270 return ERR_CAST(ucontext); 271 attrs->context = ucontext; 272 } 273 274 uobj = kzalloc(obj->type_attrs->obj_size, GFP_KERNEL); 275 if (!uobj) 276 return ERR_PTR(-ENOMEM); 277 /* 278 * user_handle should be filled by the handler, 279 * The object is added to the list in the commit stage. 280 */ 281 uobj->ufile = ufile; 282 uobj->context = attrs->context; 283 INIT_LIST_HEAD(&uobj->list); 284 uobj->uapi_object = obj; 285 /* 286 * Allocated objects start out as write locked to deny any other 287 * syscalls from accessing them until they are committed. See 288 * rdma_alloc_commit_uobject 289 */ 290 atomic_set(&uobj->usecnt, -1); 291 kref_init(&uobj->ref); 292 293 return uobj; 294 } 295 296 static int idr_add_uobj(struct ib_uobject *uobj) 297 { 298 /* 299 * We start with allocating an idr pointing to NULL. This represents an 300 * object which isn't initialized yet. We'll replace it later on with 301 * the real object once we commit. 302 */ 303 return xa_alloc(&uobj->ufile->idr, &uobj->id, NULL, xa_limit_32b, 304 GFP_KERNEL); 305 } 306 307 /* Returns the ib_uobject or an error. The caller should check for IS_ERR. */ 308 static struct ib_uobject * 309 lookup_get_idr_uobject(const struct uverbs_api_object *obj, 310 struct ib_uverbs_file *ufile, s64 id, 311 enum rdma_lookup_mode mode) 312 { 313 struct ib_uobject *uobj; 314 315 if (id < 0 || id > ULONG_MAX) 316 return ERR_PTR(-EINVAL); 317 318 rcu_read_lock(); 319 /* 320 * The idr_find is guaranteed to return a pointer to something that 321 * isn't freed yet, or NULL, as the free after idr_remove goes through 322 * kfree_rcu(). However the object may still have been released and 323 * kfree() could be called at any time. 324 */ 325 uobj = xa_load(&ufile->idr, id); 326 if (!uobj || !kref_get_unless_zero(&uobj->ref)) 327 uobj = ERR_PTR(-ENOENT); 328 rcu_read_unlock(); 329 return uobj; 330 } 331 332 static struct ib_uobject * 333 lookup_get_fd_uobject(const struct uverbs_api_object *obj, 334 struct ib_uverbs_file *ufile, s64 id, 335 enum rdma_lookup_mode mode) 336 { 337 const struct uverbs_obj_fd_type *fd_type; 338 struct file *f; 339 struct ib_uobject *uobject; 340 int fdno = id; 341 342 if (fdno != id) 343 return ERR_PTR(-EINVAL); 344 345 if (mode != UVERBS_LOOKUP_READ) 346 return ERR_PTR(-EOPNOTSUPP); 347 348 if (!obj->type_attrs) 349 return ERR_PTR(-EIO); 350 fd_type = 351 container_of(obj->type_attrs, struct uverbs_obj_fd_type, type); 352 353 f = fget(fdno); 354 if (!f) 355 return ERR_PTR(-EBADF); 356 357 uobject = f->private_data; 358 /* 359 * fget(id) ensures we are not currently running 360 * uverbs_uobject_fd_release(), and the caller is expected to ensure 361 * that release is never done while a call to lookup is possible. 362 */ 363 if (f->f_op != fd_type->fops) { 364 fput(f); 365 return ERR_PTR(-EBADF); 366 } 367 368 uverbs_uobject_get(uobject); 369 return uobject; 370 } 371 372 struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj, 373 struct ib_uverbs_file *ufile, s64 id, 374 enum rdma_lookup_mode mode, 375 struct uverbs_attr_bundle *attrs) 376 { 377 struct ib_uobject *uobj; 378 int ret; 379 380 if (obj == ERR_PTR(-ENOMSG)) { 381 /* must be UVERBS_IDR_ANY_OBJECT, see uapi_get_object() */ 382 uobj = lookup_get_idr_uobject(NULL, ufile, id, mode); 383 if (IS_ERR(uobj)) 384 return uobj; 385 } else { 386 if (IS_ERR(obj)) 387 return ERR_PTR(-EINVAL); 388 389 uobj = obj->type_class->lookup_get(obj, ufile, id, mode); 390 if (IS_ERR(uobj)) 391 return uobj; 392 393 if (uobj->uapi_object != obj) { 394 ret = -EINVAL; 395 goto free; 396 } 397 } 398 399 /* 400 * If we have been disassociated block every command except for 401 * DESTROY based commands. 402 */ 403 if (mode != UVERBS_LOOKUP_DESTROY && 404 !srcu_dereference(ufile->device->ib_dev, 405 &ufile->device->disassociate_srcu)) { 406 ret = -EIO; 407 goto free; 408 } 409 410 ret = uverbs_try_lock_object(uobj, mode); 411 if (ret) 412 goto free; 413 if (attrs) 414 attrs->context = uobj->context; 415 416 return uobj; 417 free: 418 uobj->uapi_object->type_class->lookup_put(uobj, mode); 419 uverbs_uobject_put(uobj); 420 return ERR_PTR(ret); 421 } 422 423 static struct ib_uobject * 424 alloc_begin_idr_uobject(const struct uverbs_api_object *obj, 425 struct uverbs_attr_bundle *attrs) 426 { 427 int ret; 428 struct ib_uobject *uobj; 429 430 uobj = alloc_uobj(attrs, obj); 431 if (IS_ERR(uobj)) 432 return uobj; 433 434 ret = idr_add_uobj(uobj); 435 if (ret) 436 goto uobj_put; 437 438 ret = ib_rdmacg_try_charge(&uobj->cg_obj, uobj->context->device, 439 RDMACG_RESOURCE_HCA_OBJECT); 440 if (ret) 441 goto remove; 442 443 return uobj; 444 445 remove: 446 xa_erase(&attrs->ufile->idr, uobj->id); 447 uobj_put: 448 uverbs_uobject_put(uobj); 449 return ERR_PTR(ret); 450 } 451 452 static struct ib_uobject * 453 alloc_begin_fd_uobject(const struct uverbs_api_object *obj, 454 struct uverbs_attr_bundle *attrs) 455 { 456 const struct uverbs_obj_fd_type *fd_type = 457 container_of(obj->type_attrs, struct uverbs_obj_fd_type, type); 458 int new_fd; 459 struct ib_uobject *uobj; 460 struct file *filp; 461 462 if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release)) 463 return ERR_PTR(-EINVAL); 464 465 new_fd = get_unused_fd_flags(O_CLOEXEC); 466 if (new_fd < 0) 467 return ERR_PTR(new_fd); 468 469 uobj = alloc_uobj(attrs, obj); 470 if (IS_ERR(uobj)) 471 goto err_fd; 472 473 /* Note that uverbs_uobject_fd_release() is called during abort */ 474 filp = alloc_file(fd_type->flags, fd_type->fops); 475 if (IS_ERR(filp)) { 476 uobj = ERR_CAST(filp); 477 goto err_uobj; 478 } 479 uobj->object = filp; 480 481 uobj->id = new_fd; 482 return uobj; 483 484 err_uobj: 485 uverbs_uobject_put(uobj); 486 err_fd: 487 put_unused_fd(new_fd); 488 return uobj; 489 } 490 491 struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj, 492 struct uverbs_attr_bundle *attrs) 493 { 494 struct ib_uverbs_file *ufile = attrs->ufile; 495 struct ib_uobject *ret; 496 497 if (IS_ERR(obj)) 498 return ERR_PTR(-EINVAL); 499 500 /* 501 * The hw_destroy_rwsem is held across the entire object creation and 502 * released during rdma_alloc_commit_uobject or 503 * rdma_alloc_abort_uobject 504 */ 505 if (!down_read_trylock(&ufile->hw_destroy_rwsem)) 506 return ERR_PTR(-EIO); 507 508 ret = obj->type_class->alloc_begin(obj, attrs); 509 if (IS_ERR(ret)) { 510 up_read(&ufile->hw_destroy_rwsem); 511 return ret; 512 } 513 return ret; 514 } 515 516 static void alloc_abort_idr_uobject(struct ib_uobject *uobj) 517 { 518 xa_erase(&uobj->ufile->idr, uobj->id); 519 } 520 521 static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj, 522 enum rdma_remove_reason why, 523 struct uverbs_attr_bundle *attrs) 524 { 525 const struct uverbs_obj_idr_type *idr_type = 526 container_of(uobj->uapi_object->type_attrs, 527 struct uverbs_obj_idr_type, type); 528 int ret = idr_type->destroy_object(uobj, why, attrs); 529 530 /* 531 * We can only fail gracefully if the user requested to destroy the 532 * object or when a retry may be called upon an error. 533 * In the rest of the cases, just remove whatever you can. 534 */ 535 if (ib_is_destroy_retryable(ret, why, uobj)) 536 return ret; 537 538 if (why == RDMA_REMOVE_ABORT) 539 return 0; 540 541 return 0; 542 } 543 544 static void remove_handle_idr_uobject(struct ib_uobject *uobj) 545 { 546 xa_erase(&uobj->ufile->idr, uobj->id); 547 /* Matches the kref in alloc_commit_idr_uobject */ 548 uverbs_uobject_put(uobj); 549 } 550 551 static void alloc_abort_fd_uobject(struct ib_uobject *uobj) 552 { 553 struct file *filp = uobj->object; 554 555 fput(filp); 556 put_unused_fd(uobj->id); 557 } 558 559 static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj, 560 enum rdma_remove_reason why, 561 struct uverbs_attr_bundle *attrs) 562 { 563 const struct uverbs_obj_fd_type *fd_type = container_of( 564 uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type); 565 int ret = fd_type->destroy_object(uobj, why); 566 567 if (ib_is_destroy_retryable(ret, why, uobj)) 568 return ret; 569 570 return 0; 571 } 572 573 static void remove_handle_fd_uobject(struct ib_uobject *uobj) 574 { 575 } 576 577 static void alloc_commit_idr_uobject(struct ib_uobject *uobj) 578 { 579 struct ib_uverbs_file *ufile = uobj->ufile; 580 void *old; 581 582 /* 583 * We already allocated this IDR with a NULL object, so 584 * this shouldn't fail. 585 * 586 * NOTE: Storing the uobj transfers our kref on uobj to the XArray. 587 * It will be put by remove_commit_idr_uobject() 588 */ 589 old = xa_store(&ufile->idr, uobj->id, uobj, GFP_KERNEL); 590 WARN_ON(old != NULL); 591 } 592 593 static void alloc_commit_fd_uobject(struct ib_uobject *uobj) 594 { 595 int fd = uobj->id; 596 struct file *filp = uobj->object; 597 598 /* Matching put will be done in uverbs_uobject_fd_release() */ 599 kref_get(&uobj->ufile->ref); 600 601 /* This shouldn't be used anymore. Use the file object instead */ 602 uobj->id = 0; 603 604 /* 605 * NOTE: Once we install the file we loose ownership of our kref on 606 * uobj. It will be put by uverbs_uobject_fd_release() 607 */ 608 filp->private_data = uobj; 609 fd_install(fd, filp); 610 } 611 612 /* 613 * In all cases rdma_alloc_commit_uobject() consumes the kref to uobj and the 614 * caller can no longer assume uobj is valid. If this function fails it 615 * destroys the uboject, including the attached HW object. 616 */ 617 void rdma_alloc_commit_uobject(struct ib_uobject *uobj, 618 struct uverbs_attr_bundle *attrs) 619 { 620 struct ib_uverbs_file *ufile = attrs->ufile; 621 622 /* alloc_commit consumes the uobj kref */ 623 uobj->uapi_object->type_class->alloc_commit(uobj); 624 625 /* kref is held so long as the uobj is on the uobj list. */ 626 uverbs_uobject_get(uobj); 627 spin_lock_irq(&ufile->uobjects_lock); 628 list_add(&uobj->list, &ufile->uobjects); 629 spin_unlock_irq(&ufile->uobjects_lock); 630 631 /* matches atomic_set(-1) in alloc_uobj */ 632 atomic_set(&uobj->usecnt, 0); 633 634 /* Matches the down_read in rdma_alloc_begin_uobject */ 635 up_read(&ufile->hw_destroy_rwsem); 636 } 637 638 /* 639 * This consumes the kref for uobj. It is up to the caller to unwind the HW 640 * object and anything else connected to uobj before calling this. 641 */ 642 void rdma_alloc_abort_uobject(struct ib_uobject *uobj, 643 struct uverbs_attr_bundle *attrs) 644 { 645 struct ib_uverbs_file *ufile = uobj->ufile; 646 647 uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs); 648 649 /* Matches the down_read in rdma_alloc_begin_uobject */ 650 up_read(&ufile->hw_destroy_rwsem); 651 } 652 653 static void lookup_put_idr_uobject(struct ib_uobject *uobj, 654 enum rdma_lookup_mode mode) 655 { 656 } 657 658 static void lookup_put_fd_uobject(struct ib_uobject *uobj, 659 enum rdma_lookup_mode mode) 660 { 661 struct file *filp = uobj->object; 662 663 WARN_ON(mode != UVERBS_LOOKUP_READ); 664 /* 665 * This indirectly calls uverbs_uobject_fd_release() and free the 666 * object 667 */ 668 fput(filp); 669 } 670 671 void rdma_lookup_put_uobject(struct ib_uobject *uobj, 672 enum rdma_lookup_mode mode) 673 { 674 assert_uverbs_usecnt(uobj, mode); 675 uobj->uapi_object->type_class->lookup_put(uobj, mode); 676 /* 677 * In order to unlock an object, either decrease its usecnt for 678 * read access or zero it in case of exclusive access. See 679 * uverbs_try_lock_object for locking schema information. 680 */ 681 switch (mode) { 682 case UVERBS_LOOKUP_READ: 683 atomic_dec(&uobj->usecnt); 684 break; 685 case UVERBS_LOOKUP_WRITE: 686 atomic_set(&uobj->usecnt, 0); 687 break; 688 case UVERBS_LOOKUP_DESTROY: 689 break; 690 } 691 692 /* Pairs with the kref obtained by type->lookup_get */ 693 uverbs_uobject_put(uobj); 694 } 695 696 void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile) 697 { 698 xa_init_flags(&ufile->idr, XA_FLAGS_ALLOC); 699 } 700 701 void release_ufile_idr_uobject(struct ib_uverbs_file *ufile) 702 { 703 struct ib_uobject *entry; 704 unsigned long id; 705 706 /* 707 * At this point uverbs_cleanup_ufile() is guaranteed to have run, and 708 * there are no HW objects left, however the xarray is still populated 709 * with anything that has not been cleaned up by userspace. Since the 710 * kref on ufile is 0, nothing is allowed to call lookup_get. 711 * 712 * This is an optimized equivalent to remove_handle_idr_uobject 713 */ 714 xa_for_each(&ufile->idr, id, entry) { 715 WARN_ON(entry->object); 716 uverbs_uobject_put(entry); 717 } 718 719 xa_destroy(&ufile->idr); 720 } 721 722 const struct uverbs_obj_type_class uverbs_idr_class = { 723 .alloc_begin = alloc_begin_idr_uobject, 724 .lookup_get = lookup_get_idr_uobject, 725 .alloc_commit = alloc_commit_idr_uobject, 726 .alloc_abort = alloc_abort_idr_uobject, 727 .lookup_put = lookup_put_idr_uobject, 728 .destroy_hw = destroy_hw_idr_uobject, 729 .remove_handle = remove_handle_idr_uobject, 730 }; 731 EXPORT_SYMBOL(uverbs_idr_class); 732 733 /* 734 * Users of UVERBS_TYPE_ALLOC_FD should set this function as the struct 735 * file_operations release method. 736 */ 737 int uverbs_uobject_fd_release(struct inode *inode, struct file *filp) 738 { 739 struct ib_uverbs_file *ufile; 740 struct ib_uobject *uobj; 741 742 /* 743 * This can only happen if the fput came from alloc_abort_fd_uobject() 744 */ 745 if (!filp->private_data) 746 return 0; 747 uobj = filp->private_data; 748 ufile = uobj->ufile; 749 750 if (down_read_trylock(&ufile->hw_destroy_rwsem)) { 751 struct uverbs_attr_bundle attrs = { 752 .context = uobj->context, 753 .ufile = ufile, 754 }; 755 756 /* 757 * lookup_get_fd_uobject holds the kref on the struct file any 758 * time a FD uobj is locked, which prevents this release 759 * method from being invoked. Meaning we can always get the 760 * write lock here, or we have a kernel bug. 761 */ 762 WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE)); 763 uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE, &attrs); 764 up_read(&ufile->hw_destroy_rwsem); 765 } 766 767 /* Matches the get in alloc_commit_fd_uobject() */ 768 kref_put(&ufile->ref, ib_uverbs_release_file); 769 770 /* Pairs with filp->private_data in alloc_begin_fd_uobject */ 771 uverbs_uobject_put(uobj); 772 return 0; 773 } 774 EXPORT_SYMBOL(uverbs_uobject_fd_release); 775 776 /* 777 * Drop the ucontext off the ufile and completely disconnect it from the 778 * ib_device 779 */ 780 static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile, 781 enum rdma_remove_reason reason) 782 { 783 struct ib_ucontext *ucontext = ufile->ucontext; 784 struct ib_device *ib_dev = ucontext->device; 785 786 /* 787 * If we are closing the FD then the user mmap VMAs must have 788 * already been destroyed as they hold on to the filep, otherwise 789 * they need to be zap'd. 790 */ 791 if (reason == RDMA_REMOVE_DRIVER_REMOVE) { 792 uverbs_user_mmap_disassociate(ufile); 793 if (ib_dev->disassociate_ucontext) 794 ib_dev->disassociate_ucontext(ucontext); 795 } 796 797 ib_dev->dealloc_ucontext(ucontext); 798 WARN_ON(!xa_empty(&ucontext->mmap_xa)); 799 kfree(ucontext); 800 801 ufile->ucontext = NULL; 802 } 803 804 static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile, 805 enum rdma_remove_reason reason) 806 { 807 struct ib_uobject *obj, *next_obj; 808 int ret = -EINVAL; 809 struct uverbs_attr_bundle attrs = { .ufile = ufile }; 810 811 /* 812 * This shouldn't run while executing other commands on this 813 * context. Thus, the only thing we should take care of is 814 * releasing a FD while traversing this list. The FD could be 815 * closed and released from the _release fop of this FD. 816 * In order to mitigate this, we add a lock. 817 * We take and release the lock per traversal in order to let 818 * other threads (which might still use the FDs) chance to run. 819 */ 820 list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) { 821 attrs.context = obj->context; 822 /* 823 * if we hit this WARN_ON, that means we are 824 * racing with a lookup_get. 825 */ 826 WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE)); 827 if (!uverbs_destroy_uobject(obj, reason, &attrs)) 828 ret = 0; 829 else 830 atomic_set(&obj->usecnt, 0); 831 } 832 return ret; 833 } 834 835 /* 836 * Destroy the uncontext and every uobject associated with it. 837 * 838 * This is internally locked and can be called in parallel from multiple 839 * contexts. 840 */ 841 void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile, 842 enum rdma_remove_reason reason) 843 { 844 down_write(&ufile->hw_destroy_rwsem); 845 846 /* 847 * If a ucontext was never created then we can't have any uobjects to 848 * cleanup, nothing to do. 849 */ 850 if (!ufile->ucontext) 851 goto done; 852 853 ufile->ucontext->closing = true; 854 ufile->ucontext->cleanup_retryable = true; 855 while (!list_empty(&ufile->uobjects)) 856 if (__uverbs_cleanup_ufile(ufile, reason)) { 857 /* 858 * No entry was cleaned-up successfully during this 859 * iteration 860 */ 861 break; 862 } 863 864 ufile->ucontext->cleanup_retryable = false; 865 if (!list_empty(&ufile->uobjects)) 866 __uverbs_cleanup_ufile(ufile, reason); 867 868 ufile_destroy_ucontext(ufile, reason); 869 870 done: 871 up_write(&ufile->hw_destroy_rwsem); 872 } 873 874 const struct uverbs_obj_type_class uverbs_fd_class = { 875 .alloc_begin = alloc_begin_fd_uobject, 876 .lookup_get = lookup_get_fd_uobject, 877 .alloc_commit = alloc_commit_fd_uobject, 878 .alloc_abort = alloc_abort_fd_uobject, 879 .lookup_put = lookup_put_fd_uobject, 880 .destroy_hw = destroy_hw_fd_uobject, 881 .remove_handle = remove_handle_fd_uobject, 882 }; 883 EXPORT_SYMBOL(uverbs_fd_class); 884 885 struct ib_uobject * 886 uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access, 887 s64 id, struct uverbs_attr_bundle *attrs) 888 { 889 const struct uverbs_api_object *obj = 890 uapi_get_object(attrs->ufile->device->uapi, object_id); 891 892 switch (access) { 893 case UVERBS_ACCESS_READ: 894 return rdma_lookup_get_uobject(obj, attrs->ufile, id, 895 UVERBS_LOOKUP_READ, attrs); 896 case UVERBS_ACCESS_DESTROY: 897 /* Actual destruction is done inside uverbs_handle_method */ 898 return rdma_lookup_get_uobject(obj, attrs->ufile, id, 899 UVERBS_LOOKUP_DESTROY, attrs); 900 case UVERBS_ACCESS_WRITE: 901 return rdma_lookup_get_uobject(obj, attrs->ufile, id, 902 UVERBS_LOOKUP_WRITE, attrs); 903 case UVERBS_ACCESS_NEW: 904 return rdma_alloc_begin_uobject(obj, attrs); 905 default: 906 WARN_ON(true); 907 return ERR_PTR(-EOPNOTSUPP); 908 } 909 } 910 911 void uverbs_finalize_object(struct ib_uobject *uobj, 912 enum uverbs_obj_access access, bool commit, 913 struct uverbs_attr_bundle *attrs) 914 { 915 /* 916 * refcounts should be handled at the object level and not at the 917 * uobject level. Refcounts of the objects themselves are done in 918 * handlers. 919 */ 920 921 switch (access) { 922 case UVERBS_ACCESS_READ: 923 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_READ); 924 break; 925 case UVERBS_ACCESS_WRITE: 926 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); 927 break; 928 case UVERBS_ACCESS_DESTROY: 929 if (uobj) 930 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY); 931 break; 932 case UVERBS_ACCESS_NEW: 933 if (commit) 934 rdma_alloc_commit_uobject(uobj, attrs); 935 else 936 rdma_alloc_abort_uobject(uobj, attrs); 937 break; 938 default: 939 WARN_ON(true); 940 } 941 } 942