1 /* 2 * Copyright (c) 2016, Mellanox Technologies inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/file.h> 34 #include <linux/anon_inodes.h> 35 #include <linux/sched/mm.h> 36 #include <rdma/ib_verbs.h> 37 #include <rdma/uverbs_types.h> 38 #include <linux/rcupdate.h> 39 #include <rdma/uverbs_ioctl.h> 40 #include <rdma/rdma_user_ioctl.h> 41 #include "uverbs.h" 42 #include "core_priv.h" 43 #include "rdma_core.h" 44 45 static void uverbs_uobject_free(struct kref *ref) 46 { 47 kfree_rcu(container_of(ref, struct ib_uobject, ref), rcu); 48 } 49 50 /* 51 * In order to indicate we no longer needs this uobject, uverbs_uobject_put 52 * is called. When the reference count is decreased, the uobject is freed. 53 * For example, this is used when attaching a completion channel to a CQ. 54 */ 55 void uverbs_uobject_put(struct ib_uobject *uobject) 56 { 57 kref_put(&uobject->ref, uverbs_uobject_free); 58 } 59 EXPORT_SYMBOL(uverbs_uobject_put); 60 61 int uverbs_try_lock_object(struct ib_uobject *uobj, 62 enum rdma_lookup_mode mode) 63 { 64 /* 65 * When a shared access is required, we use a positive counter. Each 66 * shared access request checks that the value != -1 and increment it. 67 * Exclusive access is required for operations like write or destroy. 68 * In exclusive access mode, we check that the counter is zero (nobody 69 * claimed this object) and we set it to -1. Releasing a shared access 70 * lock is done simply by decreasing the counter. As for exclusive 71 * access locks, since only a single one of them is allowed 72 * concurrently, setting the counter to zero is enough for releasing 73 * this lock. 74 */ 75 switch (mode) { 76 case UVERBS_LOOKUP_READ: 77 return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ? 78 -EBUSY : 0; 79 case UVERBS_LOOKUP_WRITE: 80 /* lock is exclusive */ 81 return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY; 82 case UVERBS_LOOKUP_DESTROY: 83 return 0; 84 } 85 return 0; 86 } 87 EXPORT_SYMBOL(uverbs_try_lock_object); 88 89 static void assert_uverbs_usecnt(struct ib_uobject *uobj, 90 enum rdma_lookup_mode mode) 91 { 92 #ifdef CONFIG_LOCKDEP 93 switch (mode) { 94 case UVERBS_LOOKUP_READ: 95 WARN_ON(atomic_read(&uobj->usecnt) <= 0); 96 break; 97 case UVERBS_LOOKUP_WRITE: 98 WARN_ON(atomic_read(&uobj->usecnt) != -1); 99 break; 100 case UVERBS_LOOKUP_DESTROY: 101 break; 102 } 103 #endif 104 } 105 106 /* 107 * This must be called with the hw_destroy_rwsem locked for read or write, 108 * also the uobject itself must be locked for write. 109 * 110 * Upon return the HW object is guaranteed to be destroyed. 111 * 112 * For RDMA_REMOVE_ABORT, the hw_destroy_rwsem is not required to be held, 113 * however the type's allocat_commit function cannot have been called and the 114 * uobject cannot be on the uobjects_lists 115 * 116 * For RDMA_REMOVE_DESTROY the caller should be holding a kref (eg via 117 * rdma_lookup_get_uobject) and the object is left in a state where the caller 118 * needs to call rdma_lookup_put_uobject. 119 * 120 * For all other destroy modes this function internally unlocks the uobject 121 * and consumes the kref on the uobj. 122 */ 123 static int uverbs_destroy_uobject(struct ib_uobject *uobj, 124 enum rdma_remove_reason reason, 125 struct uverbs_attr_bundle *attrs) 126 { 127 struct ib_uverbs_file *ufile = attrs->ufile; 128 unsigned long flags; 129 int ret; 130 131 lockdep_assert_held(&ufile->hw_destroy_rwsem); 132 assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE); 133 134 if (reason == RDMA_REMOVE_ABORT) { 135 WARN_ON(!list_empty(&uobj->list)); 136 WARN_ON(!uobj->context); 137 uobj->uapi_object->type_class->alloc_abort(uobj); 138 } else if (uobj->object) { 139 ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason, 140 attrs); 141 if (ret) 142 /* Nothing to be done, wait till ucontext will clean it */ 143 return ret; 144 145 uobj->object = NULL; 146 } 147 148 uobj->context = NULL; 149 150 /* 151 * For DESTROY the usecnt is not changed, the caller is expected to 152 * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR 153 * handle. 154 */ 155 if (reason != RDMA_REMOVE_DESTROY) 156 atomic_set(&uobj->usecnt, 0); 157 else 158 uobj->uapi_object->type_class->remove_handle(uobj); 159 160 if (!list_empty(&uobj->list)) { 161 spin_lock_irqsave(&ufile->uobjects_lock, flags); 162 list_del_init(&uobj->list); 163 spin_unlock_irqrestore(&ufile->uobjects_lock, flags); 164 165 /* 166 * Pairs with the get in rdma_alloc_commit_uobject(), could 167 * destroy uobj. 168 */ 169 uverbs_uobject_put(uobj); 170 } 171 172 /* 173 * When aborting the stack kref remains owned by the core code, and is 174 * not transferred into the type. Pairs with the get in alloc_uobj 175 */ 176 if (reason == RDMA_REMOVE_ABORT) 177 uverbs_uobject_put(uobj); 178 179 return 0; 180 } 181 182 /* 183 * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY 184 * sequence. It should only be used from command callbacks. On success the 185 * caller must pair this with uobj_put_destroy(). This 186 * version requires the caller to have already obtained an 187 * LOOKUP_DESTROY uobject kref. 188 */ 189 int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs) 190 { 191 struct ib_uverbs_file *ufile = attrs->ufile; 192 int ret; 193 194 down_read(&ufile->hw_destroy_rwsem); 195 196 /* 197 * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left 198 * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY. 199 * This is because any other concurrent thread can still see the object 200 * in the xarray due to RCU. Leaving it locked ensures nothing else will 201 * touch it. 202 */ 203 ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE); 204 if (ret) 205 goto out_unlock; 206 207 ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY, attrs); 208 if (ret) { 209 atomic_set(&uobj->usecnt, 0); 210 goto out_unlock; 211 } 212 213 out_unlock: 214 up_read(&ufile->hw_destroy_rwsem); 215 return ret; 216 } 217 218 /* 219 * uobj_get_destroy destroys the HW object and returns a handle to the uobj 220 * with a NULL object pointer. The caller must pair this with 221 * uobj_put_destroy(). 222 */ 223 struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj, 224 u32 id, struct uverbs_attr_bundle *attrs) 225 { 226 struct ib_uobject *uobj; 227 int ret; 228 229 uobj = rdma_lookup_get_uobject(obj, attrs->ufile, id, 230 UVERBS_LOOKUP_DESTROY, attrs); 231 if (IS_ERR(uobj)) 232 return uobj; 233 234 ret = uobj_destroy(uobj, attrs); 235 if (ret) { 236 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY); 237 return ERR_PTR(ret); 238 } 239 240 return uobj; 241 } 242 243 /* 244 * Does both uobj_get_destroy() and uobj_put_destroy(). Returns 0 on success 245 * (negative errno on failure). For use by callers that do not need the uobj. 246 */ 247 int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id, 248 struct uverbs_attr_bundle *attrs) 249 { 250 struct ib_uobject *uobj; 251 252 uobj = __uobj_get_destroy(obj, id, attrs); 253 if (IS_ERR(uobj)) 254 return PTR_ERR(uobj); 255 uobj_put_destroy(uobj); 256 return 0; 257 } 258 259 /* alloc_uobj must be undone by uverbs_destroy_uobject() */ 260 static struct ib_uobject *alloc_uobj(struct uverbs_attr_bundle *attrs, 261 const struct uverbs_api_object *obj) 262 { 263 struct ib_uverbs_file *ufile = attrs->ufile; 264 struct ib_uobject *uobj; 265 266 if (!attrs->context) { 267 struct ib_ucontext *ucontext = 268 ib_uverbs_get_ucontext_file(ufile); 269 270 if (IS_ERR(ucontext)) 271 return ERR_CAST(ucontext); 272 attrs->context = ucontext; 273 } 274 275 uobj = kzalloc(obj->type_attrs->obj_size, GFP_KERNEL); 276 if (!uobj) 277 return ERR_PTR(-ENOMEM); 278 /* 279 * user_handle should be filled by the handler, 280 * The object is added to the list in the commit stage. 281 */ 282 uobj->ufile = ufile; 283 uobj->context = attrs->context; 284 INIT_LIST_HEAD(&uobj->list); 285 uobj->uapi_object = obj; 286 /* 287 * Allocated objects start out as write locked to deny any other 288 * syscalls from accessing them until they are committed. See 289 * rdma_alloc_commit_uobject 290 */ 291 atomic_set(&uobj->usecnt, -1); 292 kref_init(&uobj->ref); 293 294 return uobj; 295 } 296 297 static int idr_add_uobj(struct ib_uobject *uobj) 298 { 299 /* 300 * We start with allocating an idr pointing to NULL. This represents an 301 * object which isn't initialized yet. We'll replace it later on with 302 * the real object once we commit. 303 */ 304 return xa_alloc(&uobj->ufile->idr, &uobj->id, NULL, xa_limit_32b, 305 GFP_KERNEL); 306 } 307 308 /* Returns the ib_uobject or an error. The caller should check for IS_ERR. */ 309 static struct ib_uobject * 310 lookup_get_idr_uobject(const struct uverbs_api_object *obj, 311 struct ib_uverbs_file *ufile, s64 id, 312 enum rdma_lookup_mode mode) 313 { 314 struct ib_uobject *uobj; 315 316 if (id < 0 || id > ULONG_MAX) 317 return ERR_PTR(-EINVAL); 318 319 rcu_read_lock(); 320 /* 321 * The idr_find is guaranteed to return a pointer to something that 322 * isn't freed yet, or NULL, as the free after idr_remove goes through 323 * kfree_rcu(). However the object may still have been released and 324 * kfree() could be called at any time. 325 */ 326 uobj = xa_load(&ufile->idr, id); 327 if (!uobj || !kref_get_unless_zero(&uobj->ref)) 328 uobj = ERR_PTR(-ENOENT); 329 rcu_read_unlock(); 330 return uobj; 331 } 332 333 static struct ib_uobject * 334 lookup_get_fd_uobject(const struct uverbs_api_object *obj, 335 struct ib_uverbs_file *ufile, s64 id, 336 enum rdma_lookup_mode mode) 337 { 338 const struct uverbs_obj_fd_type *fd_type; 339 struct file *f; 340 struct ib_uobject *uobject; 341 int fdno = id; 342 343 if (fdno != id) 344 return ERR_PTR(-EINVAL); 345 346 if (mode != UVERBS_LOOKUP_READ) 347 return ERR_PTR(-EOPNOTSUPP); 348 349 if (!obj->type_attrs) 350 return ERR_PTR(-EIO); 351 fd_type = 352 container_of(obj->type_attrs, struct uverbs_obj_fd_type, type); 353 354 f = fget(fdno); 355 if (!f) 356 return ERR_PTR(-EBADF); 357 358 uobject = f->private_data; 359 /* 360 * fget(id) ensures we are not currently running 361 * uverbs_uobject_fd_release(), and the caller is expected to ensure 362 * that release is never done while a call to lookup is possible. 363 */ 364 if (f->f_op != fd_type->fops || uobject->ufile != ufile) { 365 fput(f); 366 return ERR_PTR(-EBADF); 367 } 368 369 uverbs_uobject_get(uobject); 370 return uobject; 371 } 372 373 struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj, 374 struct ib_uverbs_file *ufile, s64 id, 375 enum rdma_lookup_mode mode, 376 struct uverbs_attr_bundle *attrs) 377 { 378 struct ib_uobject *uobj; 379 int ret; 380 381 if (obj == ERR_PTR(-ENOMSG)) { 382 /* must be UVERBS_IDR_ANY_OBJECT, see uapi_get_object() */ 383 uobj = lookup_get_idr_uobject(NULL, ufile, id, mode); 384 if (IS_ERR(uobj)) 385 return uobj; 386 } else { 387 if (IS_ERR(obj)) 388 return ERR_PTR(-EINVAL); 389 390 uobj = obj->type_class->lookup_get(obj, ufile, id, mode); 391 if (IS_ERR(uobj)) 392 return uobj; 393 394 if (uobj->uapi_object != obj) { 395 ret = -EINVAL; 396 goto free; 397 } 398 } 399 400 /* 401 * If we have been disassociated block every command except for 402 * DESTROY based commands. 403 */ 404 if (mode != UVERBS_LOOKUP_DESTROY && 405 !srcu_dereference(ufile->device->ib_dev, 406 &ufile->device->disassociate_srcu)) { 407 ret = -EIO; 408 goto free; 409 } 410 411 ret = uverbs_try_lock_object(uobj, mode); 412 if (ret) 413 goto free; 414 if (attrs) 415 attrs->context = uobj->context; 416 417 return uobj; 418 free: 419 uobj->uapi_object->type_class->lookup_put(uobj, mode); 420 uverbs_uobject_put(uobj); 421 return ERR_PTR(ret); 422 } 423 424 static struct ib_uobject * 425 alloc_begin_idr_uobject(const struct uverbs_api_object *obj, 426 struct uverbs_attr_bundle *attrs) 427 { 428 int ret; 429 struct ib_uobject *uobj; 430 431 uobj = alloc_uobj(attrs, obj); 432 if (IS_ERR(uobj)) 433 return uobj; 434 435 ret = idr_add_uobj(uobj); 436 if (ret) 437 goto uobj_put; 438 439 ret = ib_rdmacg_try_charge(&uobj->cg_obj, uobj->context->device, 440 RDMACG_RESOURCE_HCA_OBJECT); 441 if (ret) 442 goto remove; 443 444 return uobj; 445 446 remove: 447 xa_erase(&attrs->ufile->idr, uobj->id); 448 uobj_put: 449 uverbs_uobject_put(uobj); 450 return ERR_PTR(ret); 451 } 452 453 static struct ib_uobject * 454 alloc_begin_fd_uobject(const struct uverbs_api_object *obj, 455 struct uverbs_attr_bundle *attrs) 456 { 457 const struct uverbs_obj_fd_type *fd_type; 458 int new_fd; 459 struct ib_uobject *uobj, *ret; 460 struct file *filp; 461 462 uobj = alloc_uobj(attrs, obj); 463 if (IS_ERR(uobj)) 464 return uobj; 465 466 fd_type = 467 container_of(obj->type_attrs, struct uverbs_obj_fd_type, type); 468 if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release && 469 fd_type->fops->release != &uverbs_async_event_release)) { 470 ret = ERR_PTR(-EINVAL); 471 goto err_fd; 472 } 473 474 new_fd = get_unused_fd_flags(O_CLOEXEC); 475 if (new_fd < 0) { 476 ret = ERR_PTR(new_fd); 477 goto err_fd; 478 } 479 480 /* Note that uverbs_uobject_fd_release() is called during abort */ 481 filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL, 482 fd_type->flags); 483 if (IS_ERR(filp)) { 484 ret = ERR_CAST(filp); 485 goto err_getfile; 486 } 487 uobj->object = filp; 488 489 uobj->id = new_fd; 490 return uobj; 491 492 err_getfile: 493 put_unused_fd(new_fd); 494 err_fd: 495 uverbs_uobject_put(uobj); 496 return ret; 497 } 498 499 struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj, 500 struct uverbs_attr_bundle *attrs) 501 { 502 struct ib_uverbs_file *ufile = attrs->ufile; 503 struct ib_uobject *ret; 504 505 if (IS_ERR(obj)) 506 return ERR_PTR(-EINVAL); 507 508 /* 509 * The hw_destroy_rwsem is held across the entire object creation and 510 * released during rdma_alloc_commit_uobject or 511 * rdma_alloc_abort_uobject 512 */ 513 if (!down_read_trylock(&ufile->hw_destroy_rwsem)) 514 return ERR_PTR(-EIO); 515 516 ret = obj->type_class->alloc_begin(obj, attrs); 517 if (IS_ERR(ret)) { 518 up_read(&ufile->hw_destroy_rwsem); 519 return ret; 520 } 521 return ret; 522 } 523 524 static void alloc_abort_idr_uobject(struct ib_uobject *uobj) 525 { 526 ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device, 527 RDMACG_RESOURCE_HCA_OBJECT); 528 529 xa_erase(&uobj->ufile->idr, uobj->id); 530 } 531 532 static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj, 533 enum rdma_remove_reason why, 534 struct uverbs_attr_bundle *attrs) 535 { 536 const struct uverbs_obj_idr_type *idr_type = 537 container_of(uobj->uapi_object->type_attrs, 538 struct uverbs_obj_idr_type, type); 539 int ret = idr_type->destroy_object(uobj, why, attrs); 540 541 if (ret) 542 return ret; 543 544 if (why == RDMA_REMOVE_ABORT) 545 return 0; 546 547 ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device, 548 RDMACG_RESOURCE_HCA_OBJECT); 549 550 return 0; 551 } 552 553 static void remove_handle_idr_uobject(struct ib_uobject *uobj) 554 { 555 xa_erase(&uobj->ufile->idr, uobj->id); 556 /* Matches the kref in alloc_commit_idr_uobject */ 557 uverbs_uobject_put(uobj); 558 } 559 560 static void alloc_abort_fd_uobject(struct ib_uobject *uobj) 561 { 562 struct file *filp = uobj->object; 563 564 fput(filp); 565 put_unused_fd(uobj->id); 566 } 567 568 static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj, 569 enum rdma_remove_reason why, 570 struct uverbs_attr_bundle *attrs) 571 { 572 const struct uverbs_obj_fd_type *fd_type = container_of( 573 uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type); 574 575 fd_type->destroy_object(uobj, why); 576 return 0; 577 } 578 579 static void remove_handle_fd_uobject(struct ib_uobject *uobj) 580 { 581 } 582 583 static void alloc_commit_idr_uobject(struct ib_uobject *uobj) 584 { 585 struct ib_uverbs_file *ufile = uobj->ufile; 586 void *old; 587 588 /* 589 * We already allocated this IDR with a NULL object, so 590 * this shouldn't fail. 591 * 592 * NOTE: Storing the uobj transfers our kref on uobj to the XArray. 593 * It will be put by remove_commit_idr_uobject() 594 */ 595 old = xa_store(&ufile->idr, uobj->id, uobj, GFP_KERNEL); 596 WARN_ON(old != NULL); 597 } 598 599 static void swap_idr_uobjects(struct ib_uobject *obj_old, 600 struct ib_uobject *obj_new) 601 { 602 struct ib_uverbs_file *ufile = obj_old->ufile; 603 void *old; 604 605 /* 606 * New must be an object that been allocated but not yet committed, this 607 * moves the pre-committed state to obj_old, new still must be comitted. 608 */ 609 old = xa_cmpxchg(&ufile->idr, obj_old->id, obj_old, XA_ZERO_ENTRY, 610 GFP_KERNEL); 611 if (WARN_ON(old != obj_old)) 612 return; 613 614 swap(obj_old->id, obj_new->id); 615 616 old = xa_cmpxchg(&ufile->idr, obj_old->id, NULL, obj_old, GFP_KERNEL); 617 WARN_ON(old != NULL); 618 } 619 620 static void alloc_commit_fd_uobject(struct ib_uobject *uobj) 621 { 622 int fd = uobj->id; 623 struct file *filp = uobj->object; 624 625 /* Matching put will be done in uverbs_uobject_fd_release() */ 626 kref_get(&uobj->ufile->ref); 627 628 /* This shouldn't be used anymore. Use the file object instead */ 629 uobj->id = 0; 630 631 /* 632 * NOTE: Once we install the file we loose ownership of our kref on 633 * uobj. It will be put by uverbs_uobject_fd_release() 634 */ 635 filp->private_data = uobj; 636 fd_install(fd, filp); 637 } 638 639 /* 640 * In all cases rdma_alloc_commit_uobject() consumes the kref to uobj and the 641 * caller can no longer assume uobj is valid. If this function fails it 642 * destroys the uboject, including the attached HW object. 643 */ 644 void rdma_alloc_commit_uobject(struct ib_uobject *uobj, 645 struct uverbs_attr_bundle *attrs) 646 { 647 struct ib_uverbs_file *ufile = attrs->ufile; 648 649 /* kref is held so long as the uobj is on the uobj list. */ 650 uverbs_uobject_get(uobj); 651 spin_lock_irq(&ufile->uobjects_lock); 652 list_add(&uobj->list, &ufile->uobjects); 653 spin_unlock_irq(&ufile->uobjects_lock); 654 655 /* matches atomic_set(-1) in alloc_uobj */ 656 atomic_set(&uobj->usecnt, 0); 657 658 /* alloc_commit consumes the uobj kref */ 659 uobj->uapi_object->type_class->alloc_commit(uobj); 660 661 /* Matches the down_read in rdma_alloc_begin_uobject */ 662 up_read(&ufile->hw_destroy_rwsem); 663 } 664 665 /* 666 * new_uobj will be assigned to the handle currently used by to_uobj, and 667 * to_uobj will be destroyed. 668 * 669 * Upon return the caller must do: 670 * rdma_alloc_commit_uobject(new_uobj) 671 * uobj_put_destroy(to_uobj) 672 * 673 * to_uobj must have a write get but the put mode switches to destroy once 674 * this is called. 675 */ 676 void rdma_assign_uobject(struct ib_uobject *to_uobj, struct ib_uobject *new_uobj, 677 struct uverbs_attr_bundle *attrs) 678 { 679 assert_uverbs_usecnt(new_uobj, UVERBS_LOOKUP_WRITE); 680 681 if (WARN_ON(to_uobj->uapi_object != new_uobj->uapi_object || 682 !to_uobj->uapi_object->type_class->swap_uobjects)) 683 return; 684 685 to_uobj->uapi_object->type_class->swap_uobjects(to_uobj, new_uobj); 686 687 /* 688 * If this fails then the uobject is still completely valid (though with 689 * a new ID) and we leak it until context close. 690 */ 691 uverbs_destroy_uobject(to_uobj, RDMA_REMOVE_DESTROY, attrs); 692 } 693 694 /* 695 * This consumes the kref for uobj. It is up to the caller to unwind the HW 696 * object and anything else connected to uobj before calling this. 697 */ 698 void rdma_alloc_abort_uobject(struct ib_uobject *uobj, 699 struct uverbs_attr_bundle *attrs, 700 bool hw_obj_valid) 701 { 702 struct ib_uverbs_file *ufile = uobj->ufile; 703 int ret; 704 705 if (hw_obj_valid) { 706 ret = uobj->uapi_object->type_class->destroy_hw( 707 uobj, RDMA_REMOVE_ABORT, attrs); 708 /* 709 * If the driver couldn't destroy the object then go ahead and 710 * commit it. Leaking objects that can't be destroyed is only 711 * done during FD close after the driver has a few more tries to 712 * destroy it. 713 */ 714 if (WARN_ON(ret)) 715 return rdma_alloc_commit_uobject(uobj, attrs); 716 } 717 718 uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs); 719 720 /* Matches the down_read in rdma_alloc_begin_uobject */ 721 up_read(&ufile->hw_destroy_rwsem); 722 } 723 724 static void lookup_put_idr_uobject(struct ib_uobject *uobj, 725 enum rdma_lookup_mode mode) 726 { 727 } 728 729 static void lookup_put_fd_uobject(struct ib_uobject *uobj, 730 enum rdma_lookup_mode mode) 731 { 732 struct file *filp = uobj->object; 733 734 WARN_ON(mode != UVERBS_LOOKUP_READ); 735 /* 736 * This indirectly calls uverbs_uobject_fd_release() and free the 737 * object 738 */ 739 fput(filp); 740 } 741 742 void rdma_lookup_put_uobject(struct ib_uobject *uobj, 743 enum rdma_lookup_mode mode) 744 { 745 assert_uverbs_usecnt(uobj, mode); 746 /* 747 * In order to unlock an object, either decrease its usecnt for 748 * read access or zero it in case of exclusive access. See 749 * uverbs_try_lock_object for locking schema information. 750 */ 751 switch (mode) { 752 case UVERBS_LOOKUP_READ: 753 atomic_dec(&uobj->usecnt); 754 break; 755 case UVERBS_LOOKUP_WRITE: 756 atomic_set(&uobj->usecnt, 0); 757 break; 758 case UVERBS_LOOKUP_DESTROY: 759 break; 760 } 761 762 uobj->uapi_object->type_class->lookup_put(uobj, mode); 763 /* Pairs with the kref obtained by type->lookup_get */ 764 uverbs_uobject_put(uobj); 765 } 766 767 void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile) 768 { 769 xa_init_flags(&ufile->idr, XA_FLAGS_ALLOC); 770 } 771 772 void release_ufile_idr_uobject(struct ib_uverbs_file *ufile) 773 { 774 struct ib_uobject *entry; 775 unsigned long id; 776 777 /* 778 * At this point uverbs_cleanup_ufile() is guaranteed to have run, and 779 * there are no HW objects left, however the xarray is still populated 780 * with anything that has not been cleaned up by userspace. Since the 781 * kref on ufile is 0, nothing is allowed to call lookup_get. 782 * 783 * This is an optimized equivalent to remove_handle_idr_uobject 784 */ 785 xa_for_each(&ufile->idr, id, entry) { 786 WARN_ON(entry->object); 787 uverbs_uobject_put(entry); 788 } 789 790 xa_destroy(&ufile->idr); 791 } 792 793 const struct uverbs_obj_type_class uverbs_idr_class = { 794 .alloc_begin = alloc_begin_idr_uobject, 795 .lookup_get = lookup_get_idr_uobject, 796 .alloc_commit = alloc_commit_idr_uobject, 797 .alloc_abort = alloc_abort_idr_uobject, 798 .lookup_put = lookup_put_idr_uobject, 799 .destroy_hw = destroy_hw_idr_uobject, 800 .remove_handle = remove_handle_idr_uobject, 801 .swap_uobjects = swap_idr_uobjects, 802 }; 803 EXPORT_SYMBOL(uverbs_idr_class); 804 805 /* 806 * Users of UVERBS_TYPE_ALLOC_FD should set this function as the struct 807 * file_operations release method. 808 */ 809 int uverbs_uobject_fd_release(struct inode *inode, struct file *filp) 810 { 811 struct ib_uverbs_file *ufile; 812 struct ib_uobject *uobj; 813 814 /* 815 * This can only happen if the fput came from alloc_abort_fd_uobject() 816 */ 817 if (!filp->private_data) 818 return 0; 819 uobj = filp->private_data; 820 ufile = uobj->ufile; 821 822 if (down_read_trylock(&ufile->hw_destroy_rwsem)) { 823 struct uverbs_attr_bundle attrs = { 824 .context = uobj->context, 825 .ufile = ufile, 826 }; 827 828 /* 829 * lookup_get_fd_uobject holds the kref on the struct file any 830 * time a FD uobj is locked, which prevents this release 831 * method from being invoked. Meaning we can always get the 832 * write lock here, or we have a kernel bug. 833 */ 834 WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE)); 835 uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE, &attrs); 836 up_read(&ufile->hw_destroy_rwsem); 837 } 838 839 /* Matches the get in alloc_commit_fd_uobject() */ 840 kref_put(&ufile->ref, ib_uverbs_release_file); 841 842 /* Pairs with filp->private_data in alloc_begin_fd_uobject */ 843 uverbs_uobject_put(uobj); 844 return 0; 845 } 846 EXPORT_SYMBOL(uverbs_uobject_fd_release); 847 848 /* 849 * Drop the ucontext off the ufile and completely disconnect it from the 850 * ib_device 851 */ 852 static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile, 853 enum rdma_remove_reason reason) 854 { 855 struct ib_ucontext *ucontext = ufile->ucontext; 856 struct ib_device *ib_dev = ucontext->device; 857 858 /* 859 * If we are closing the FD then the user mmap VMAs must have 860 * already been destroyed as they hold on to the filep, otherwise 861 * they need to be zap'd. 862 */ 863 if (reason == RDMA_REMOVE_DRIVER_REMOVE) { 864 uverbs_user_mmap_disassociate(ufile); 865 if (ib_dev->ops.disassociate_ucontext) 866 ib_dev->ops.disassociate_ucontext(ucontext); 867 } 868 869 ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev, 870 RDMACG_RESOURCE_HCA_HANDLE); 871 872 rdma_restrack_del(&ucontext->res); 873 874 ib_dev->ops.dealloc_ucontext(ucontext); 875 WARN_ON(!xa_empty(&ucontext->mmap_xa)); 876 kfree(ucontext); 877 878 ufile->ucontext = NULL; 879 } 880 881 static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile, 882 enum rdma_remove_reason reason) 883 { 884 struct uverbs_attr_bundle attrs = { .ufile = ufile }; 885 struct ib_ucontext *ucontext = ufile->ucontext; 886 struct ib_device *ib_dev = ucontext->device; 887 struct ib_uobject *obj, *next_obj; 888 int ret = -EINVAL; 889 890 if (ib_dev->ops.ufile_hw_cleanup) 891 ib_dev->ops.ufile_hw_cleanup(ufile); 892 893 /* 894 * This shouldn't run while executing other commands on this 895 * context. Thus, the only thing we should take care of is 896 * releasing a FD while traversing this list. The FD could be 897 * closed and released from the _release fop of this FD. 898 * In order to mitigate this, we add a lock. 899 * We take and release the lock per traversal in order to let 900 * other threads (which might still use the FDs) chance to run. 901 */ 902 list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) { 903 attrs.context = obj->context; 904 /* 905 * if we hit this WARN_ON, that means we are 906 * racing with a lookup_get. 907 */ 908 WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE)); 909 if (reason == RDMA_REMOVE_DRIVER_FAILURE) 910 obj->object = NULL; 911 if (!uverbs_destroy_uobject(obj, reason, &attrs)) 912 ret = 0; 913 else 914 atomic_set(&obj->usecnt, 0); 915 } 916 917 if (reason == RDMA_REMOVE_DRIVER_FAILURE) { 918 WARN_ON(!list_empty(&ufile->uobjects)); 919 return 0; 920 } 921 return ret; 922 } 923 924 /* 925 * Destroy the ucontext and every uobject associated with it. 926 * 927 * This is internally locked and can be called in parallel from multiple 928 * contexts. 929 */ 930 void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile, 931 enum rdma_remove_reason reason) 932 { 933 down_write(&ufile->hw_destroy_rwsem); 934 935 /* 936 * If a ucontext was never created then we can't have any uobjects to 937 * cleanup, nothing to do. 938 */ 939 if (!ufile->ucontext) 940 goto done; 941 942 while (!list_empty(&ufile->uobjects) && 943 !__uverbs_cleanup_ufile(ufile, reason)) { 944 } 945 946 if (WARN_ON(!list_empty(&ufile->uobjects))) 947 __uverbs_cleanup_ufile(ufile, RDMA_REMOVE_DRIVER_FAILURE); 948 ufile_destroy_ucontext(ufile, reason); 949 950 done: 951 up_write(&ufile->hw_destroy_rwsem); 952 } 953 954 const struct uverbs_obj_type_class uverbs_fd_class = { 955 .alloc_begin = alloc_begin_fd_uobject, 956 .lookup_get = lookup_get_fd_uobject, 957 .alloc_commit = alloc_commit_fd_uobject, 958 .alloc_abort = alloc_abort_fd_uobject, 959 .lookup_put = lookup_put_fd_uobject, 960 .destroy_hw = destroy_hw_fd_uobject, 961 .remove_handle = remove_handle_fd_uobject, 962 }; 963 EXPORT_SYMBOL(uverbs_fd_class); 964 965 struct ib_uobject * 966 uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access, 967 s64 id, struct uverbs_attr_bundle *attrs) 968 { 969 const struct uverbs_api_object *obj = 970 uapi_get_object(attrs->ufile->device->uapi, object_id); 971 972 switch (access) { 973 case UVERBS_ACCESS_READ: 974 return rdma_lookup_get_uobject(obj, attrs->ufile, id, 975 UVERBS_LOOKUP_READ, attrs); 976 case UVERBS_ACCESS_DESTROY: 977 /* Actual destruction is done inside uverbs_handle_method */ 978 return rdma_lookup_get_uobject(obj, attrs->ufile, id, 979 UVERBS_LOOKUP_DESTROY, attrs); 980 case UVERBS_ACCESS_WRITE: 981 return rdma_lookup_get_uobject(obj, attrs->ufile, id, 982 UVERBS_LOOKUP_WRITE, attrs); 983 case UVERBS_ACCESS_NEW: 984 return rdma_alloc_begin_uobject(obj, attrs); 985 default: 986 WARN_ON(true); 987 return ERR_PTR(-EOPNOTSUPP); 988 } 989 } 990 991 void uverbs_finalize_object(struct ib_uobject *uobj, 992 enum uverbs_obj_access access, bool hw_obj_valid, 993 bool commit, struct uverbs_attr_bundle *attrs) 994 { 995 /* 996 * refcounts should be handled at the object level and not at the 997 * uobject level. Refcounts of the objects themselves are done in 998 * handlers. 999 */ 1000 1001 switch (access) { 1002 case UVERBS_ACCESS_READ: 1003 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_READ); 1004 break; 1005 case UVERBS_ACCESS_WRITE: 1006 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); 1007 break; 1008 case UVERBS_ACCESS_DESTROY: 1009 if (uobj) 1010 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY); 1011 break; 1012 case UVERBS_ACCESS_NEW: 1013 if (commit) 1014 rdma_alloc_commit_uobject(uobj, attrs); 1015 else 1016 rdma_alloc_abort_uobject(uobj, attrs, hw_obj_valid); 1017 break; 1018 default: 1019 WARN_ON(true); 1020 } 1021 } 1022