1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 40 #include <asm/uaccess.h> 41 42 #include "uverbs.h" 43 44 static struct lock_class_key pd_lock_key; 45 static struct lock_class_key mr_lock_key; 46 static struct lock_class_key cq_lock_key; 47 static struct lock_class_key qp_lock_key; 48 static struct lock_class_key ah_lock_key; 49 static struct lock_class_key srq_lock_key; 50 static struct lock_class_key xrcd_lock_key; 51 52 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 53 do { \ 54 (udata)->inbuf = (void __user *) (ibuf); \ 55 (udata)->outbuf = (void __user *) (obuf); \ 56 (udata)->inlen = (ilen); \ 57 (udata)->outlen = (olen); \ 58 } while (0) 59 60 /* 61 * The ib_uobject locking scheme is as follows: 62 * 63 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 64 * needs to be held during all idr operations. When an object is 65 * looked up, a reference must be taken on the object's kref before 66 * dropping this lock. 67 * 68 * - Each object also has an rwsem. This rwsem must be held for 69 * reading while an operation that uses the object is performed. 70 * For example, while registering an MR, the associated PD's 71 * uobject.mutex must be held for reading. The rwsem must be held 72 * for writing while initializing or destroying an object. 73 * 74 * - In addition, each object has a "live" flag. If this flag is not 75 * set, then lookups of the object will fail even if it is found in 76 * the idr. This handles a reader that blocks and does not acquire 77 * the rwsem until after the object is destroyed. The destroy 78 * operation will set the live flag to 0 and then drop the rwsem; 79 * this will allow the reader to acquire the rwsem, see that the 80 * live flag is 0, and then drop the rwsem and its reference to 81 * object. The underlying storage will not be freed until the last 82 * reference to the object is dropped. 83 */ 84 85 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 86 struct ib_ucontext *context, struct lock_class_key *key) 87 { 88 uobj->user_handle = user_handle; 89 uobj->context = context; 90 kref_init(&uobj->ref); 91 init_rwsem(&uobj->mutex); 92 lockdep_set_class(&uobj->mutex, key); 93 uobj->live = 0; 94 } 95 96 static void release_uobj(struct kref *kref) 97 { 98 kfree(container_of(kref, struct ib_uobject, ref)); 99 } 100 101 static void put_uobj(struct ib_uobject *uobj) 102 { 103 kref_put(&uobj->ref, release_uobj); 104 } 105 106 static void put_uobj_read(struct ib_uobject *uobj) 107 { 108 up_read(&uobj->mutex); 109 put_uobj(uobj); 110 } 111 112 static void put_uobj_write(struct ib_uobject *uobj) 113 { 114 up_write(&uobj->mutex); 115 put_uobj(uobj); 116 } 117 118 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 119 { 120 int ret; 121 122 retry: 123 if (!idr_pre_get(idr, GFP_KERNEL)) 124 return -ENOMEM; 125 126 spin_lock(&ib_uverbs_idr_lock); 127 ret = idr_get_new(idr, uobj, &uobj->id); 128 spin_unlock(&ib_uverbs_idr_lock); 129 130 if (ret == -EAGAIN) 131 goto retry; 132 133 return ret; 134 } 135 136 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 137 { 138 spin_lock(&ib_uverbs_idr_lock); 139 idr_remove(idr, uobj->id); 140 spin_unlock(&ib_uverbs_idr_lock); 141 } 142 143 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 144 struct ib_ucontext *context) 145 { 146 struct ib_uobject *uobj; 147 148 spin_lock(&ib_uverbs_idr_lock); 149 uobj = idr_find(idr, id); 150 if (uobj) { 151 if (uobj->context == context) 152 kref_get(&uobj->ref); 153 else 154 uobj = NULL; 155 } 156 spin_unlock(&ib_uverbs_idr_lock); 157 158 return uobj; 159 } 160 161 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 162 struct ib_ucontext *context, int nested) 163 { 164 struct ib_uobject *uobj; 165 166 uobj = __idr_get_uobj(idr, id, context); 167 if (!uobj) 168 return NULL; 169 170 if (nested) 171 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 172 else 173 down_read(&uobj->mutex); 174 if (!uobj->live) { 175 put_uobj_read(uobj); 176 return NULL; 177 } 178 179 return uobj; 180 } 181 182 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 183 struct ib_ucontext *context) 184 { 185 struct ib_uobject *uobj; 186 187 uobj = __idr_get_uobj(idr, id, context); 188 if (!uobj) 189 return NULL; 190 191 down_write(&uobj->mutex); 192 if (!uobj->live) { 193 put_uobj_write(uobj); 194 return NULL; 195 } 196 197 return uobj; 198 } 199 200 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 201 int nested) 202 { 203 struct ib_uobject *uobj; 204 205 uobj = idr_read_uobj(idr, id, context, nested); 206 return uobj ? uobj->object : NULL; 207 } 208 209 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 210 { 211 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 212 } 213 214 static void put_pd_read(struct ib_pd *pd) 215 { 216 put_uobj_read(pd->uobject); 217 } 218 219 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 220 { 221 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 222 } 223 224 static void put_cq_read(struct ib_cq *cq) 225 { 226 put_uobj_read(cq->uobject); 227 } 228 229 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 230 { 231 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 232 } 233 234 static void put_ah_read(struct ib_ah *ah) 235 { 236 put_uobj_read(ah->uobject); 237 } 238 239 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 240 { 241 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 242 } 243 244 static void put_qp_read(struct ib_qp *qp) 245 { 246 put_uobj_read(qp->uobject); 247 } 248 249 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 250 { 251 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 252 } 253 254 static void put_srq_read(struct ib_srq *srq) 255 { 256 put_uobj_read(srq->uobject); 257 } 258 259 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, 260 struct ib_uobject **uobj) 261 { 262 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); 263 return *uobj ? (*uobj)->object : NULL; 264 } 265 266 static void put_xrcd_read(struct ib_uobject *uobj) 267 { 268 put_uobj_read(uobj); 269 } 270 271 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 272 const char __user *buf, 273 int in_len, int out_len) 274 { 275 struct ib_uverbs_get_context cmd; 276 struct ib_uverbs_get_context_resp resp; 277 struct ib_udata udata; 278 struct ib_device *ibdev = file->device->ib_dev; 279 struct ib_ucontext *ucontext; 280 struct file *filp; 281 int ret; 282 283 if (out_len < sizeof resp) 284 return -ENOSPC; 285 286 if (copy_from_user(&cmd, buf, sizeof cmd)) 287 return -EFAULT; 288 289 mutex_lock(&file->mutex); 290 291 if (file->ucontext) { 292 ret = -EINVAL; 293 goto err; 294 } 295 296 INIT_UDATA(&udata, buf + sizeof cmd, 297 (unsigned long) cmd.response + sizeof resp, 298 in_len - sizeof cmd, out_len - sizeof resp); 299 300 ucontext = ibdev->alloc_ucontext(ibdev, &udata); 301 if (IS_ERR(ucontext)) { 302 ret = PTR_ERR(ucontext); 303 goto err; 304 } 305 306 ucontext->device = ibdev; 307 INIT_LIST_HEAD(&ucontext->pd_list); 308 INIT_LIST_HEAD(&ucontext->mr_list); 309 INIT_LIST_HEAD(&ucontext->mw_list); 310 INIT_LIST_HEAD(&ucontext->cq_list); 311 INIT_LIST_HEAD(&ucontext->qp_list); 312 INIT_LIST_HEAD(&ucontext->srq_list); 313 INIT_LIST_HEAD(&ucontext->ah_list); 314 INIT_LIST_HEAD(&ucontext->xrcd_list); 315 ucontext->closing = 0; 316 317 resp.num_comp_vectors = file->device->num_comp_vectors; 318 319 ret = get_unused_fd(); 320 if (ret < 0) 321 goto err_free; 322 resp.async_fd = ret; 323 324 filp = ib_uverbs_alloc_event_file(file, 1); 325 if (IS_ERR(filp)) { 326 ret = PTR_ERR(filp); 327 goto err_fd; 328 } 329 330 if (copy_to_user((void __user *) (unsigned long) cmd.response, 331 &resp, sizeof resp)) { 332 ret = -EFAULT; 333 goto err_file; 334 } 335 336 file->async_file = filp->private_data; 337 338 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev, 339 ib_uverbs_event_handler); 340 ret = ib_register_event_handler(&file->event_handler); 341 if (ret) 342 goto err_file; 343 344 kref_get(&file->async_file->ref); 345 kref_get(&file->ref); 346 file->ucontext = ucontext; 347 348 fd_install(resp.async_fd, filp); 349 350 mutex_unlock(&file->mutex); 351 352 return in_len; 353 354 err_file: 355 fput(filp); 356 357 err_fd: 358 put_unused_fd(resp.async_fd); 359 360 err_free: 361 ibdev->dealloc_ucontext(ucontext); 362 363 err: 364 mutex_unlock(&file->mutex); 365 return ret; 366 } 367 368 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 369 const char __user *buf, 370 int in_len, int out_len) 371 { 372 struct ib_uverbs_query_device cmd; 373 struct ib_uverbs_query_device_resp resp; 374 struct ib_device_attr attr; 375 int ret; 376 377 if (out_len < sizeof resp) 378 return -ENOSPC; 379 380 if (copy_from_user(&cmd, buf, sizeof cmd)) 381 return -EFAULT; 382 383 ret = ib_query_device(file->device->ib_dev, &attr); 384 if (ret) 385 return ret; 386 387 memset(&resp, 0, sizeof resp); 388 389 resp.fw_ver = attr.fw_ver; 390 resp.node_guid = file->device->ib_dev->node_guid; 391 resp.sys_image_guid = attr.sys_image_guid; 392 resp.max_mr_size = attr.max_mr_size; 393 resp.page_size_cap = attr.page_size_cap; 394 resp.vendor_id = attr.vendor_id; 395 resp.vendor_part_id = attr.vendor_part_id; 396 resp.hw_ver = attr.hw_ver; 397 resp.max_qp = attr.max_qp; 398 resp.max_qp_wr = attr.max_qp_wr; 399 resp.device_cap_flags = attr.device_cap_flags; 400 resp.max_sge = attr.max_sge; 401 resp.max_sge_rd = attr.max_sge_rd; 402 resp.max_cq = attr.max_cq; 403 resp.max_cqe = attr.max_cqe; 404 resp.max_mr = attr.max_mr; 405 resp.max_pd = attr.max_pd; 406 resp.max_qp_rd_atom = attr.max_qp_rd_atom; 407 resp.max_ee_rd_atom = attr.max_ee_rd_atom; 408 resp.max_res_rd_atom = attr.max_res_rd_atom; 409 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; 410 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; 411 resp.atomic_cap = attr.atomic_cap; 412 resp.max_ee = attr.max_ee; 413 resp.max_rdd = attr.max_rdd; 414 resp.max_mw = attr.max_mw; 415 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; 416 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; 417 resp.max_mcast_grp = attr.max_mcast_grp; 418 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; 419 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; 420 resp.max_ah = attr.max_ah; 421 resp.max_fmr = attr.max_fmr; 422 resp.max_map_per_fmr = attr.max_map_per_fmr; 423 resp.max_srq = attr.max_srq; 424 resp.max_srq_wr = attr.max_srq_wr; 425 resp.max_srq_sge = attr.max_srq_sge; 426 resp.max_pkeys = attr.max_pkeys; 427 resp.local_ca_ack_delay = attr.local_ca_ack_delay; 428 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; 429 430 if (copy_to_user((void __user *) (unsigned long) cmd.response, 431 &resp, sizeof resp)) 432 return -EFAULT; 433 434 return in_len; 435 } 436 437 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 438 const char __user *buf, 439 int in_len, int out_len) 440 { 441 struct ib_uverbs_query_port cmd; 442 struct ib_uverbs_query_port_resp resp; 443 struct ib_port_attr attr; 444 int ret; 445 446 if (out_len < sizeof resp) 447 return -ENOSPC; 448 449 if (copy_from_user(&cmd, buf, sizeof cmd)) 450 return -EFAULT; 451 452 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr); 453 if (ret) 454 return ret; 455 456 memset(&resp, 0, sizeof resp); 457 458 resp.state = attr.state; 459 resp.max_mtu = attr.max_mtu; 460 resp.active_mtu = attr.active_mtu; 461 resp.gid_tbl_len = attr.gid_tbl_len; 462 resp.port_cap_flags = attr.port_cap_flags; 463 resp.max_msg_sz = attr.max_msg_sz; 464 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 465 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 466 resp.pkey_tbl_len = attr.pkey_tbl_len; 467 resp.lid = attr.lid; 468 resp.sm_lid = attr.sm_lid; 469 resp.lmc = attr.lmc; 470 resp.max_vl_num = attr.max_vl_num; 471 resp.sm_sl = attr.sm_sl; 472 resp.subnet_timeout = attr.subnet_timeout; 473 resp.init_type_reply = attr.init_type_reply; 474 resp.active_width = attr.active_width; 475 resp.active_speed = attr.active_speed; 476 resp.phys_state = attr.phys_state; 477 resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev, 478 cmd.port_num); 479 480 if (copy_to_user((void __user *) (unsigned long) cmd.response, 481 &resp, sizeof resp)) 482 return -EFAULT; 483 484 return in_len; 485 } 486 487 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 488 const char __user *buf, 489 int in_len, int out_len) 490 { 491 struct ib_uverbs_alloc_pd cmd; 492 struct ib_uverbs_alloc_pd_resp resp; 493 struct ib_udata udata; 494 struct ib_uobject *uobj; 495 struct ib_pd *pd; 496 int ret; 497 498 if (out_len < sizeof resp) 499 return -ENOSPC; 500 501 if (copy_from_user(&cmd, buf, sizeof cmd)) 502 return -EFAULT; 503 504 INIT_UDATA(&udata, buf + sizeof cmd, 505 (unsigned long) cmd.response + sizeof resp, 506 in_len - sizeof cmd, out_len - sizeof resp); 507 508 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 509 if (!uobj) 510 return -ENOMEM; 511 512 init_uobj(uobj, 0, file->ucontext, &pd_lock_key); 513 down_write(&uobj->mutex); 514 515 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 516 file->ucontext, &udata); 517 if (IS_ERR(pd)) { 518 ret = PTR_ERR(pd); 519 goto err; 520 } 521 522 pd->device = file->device->ib_dev; 523 pd->uobject = uobj; 524 atomic_set(&pd->usecnt, 0); 525 526 uobj->object = pd; 527 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 528 if (ret) 529 goto err_idr; 530 531 memset(&resp, 0, sizeof resp); 532 resp.pd_handle = uobj->id; 533 534 if (copy_to_user((void __user *) (unsigned long) cmd.response, 535 &resp, sizeof resp)) { 536 ret = -EFAULT; 537 goto err_copy; 538 } 539 540 mutex_lock(&file->mutex); 541 list_add_tail(&uobj->list, &file->ucontext->pd_list); 542 mutex_unlock(&file->mutex); 543 544 uobj->live = 1; 545 546 up_write(&uobj->mutex); 547 548 return in_len; 549 550 err_copy: 551 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 552 553 err_idr: 554 ib_dealloc_pd(pd); 555 556 err: 557 put_uobj_write(uobj); 558 return ret; 559 } 560 561 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 562 const char __user *buf, 563 int in_len, int out_len) 564 { 565 struct ib_uverbs_dealloc_pd cmd; 566 struct ib_uobject *uobj; 567 int ret; 568 569 if (copy_from_user(&cmd, buf, sizeof cmd)) 570 return -EFAULT; 571 572 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 573 if (!uobj) 574 return -EINVAL; 575 576 ret = ib_dealloc_pd(uobj->object); 577 if (!ret) 578 uobj->live = 0; 579 580 put_uobj_write(uobj); 581 582 if (ret) 583 return ret; 584 585 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 586 587 mutex_lock(&file->mutex); 588 list_del(&uobj->list); 589 mutex_unlock(&file->mutex); 590 591 put_uobj(uobj); 592 593 return in_len; 594 } 595 596 struct xrcd_table_entry { 597 struct rb_node node; 598 struct ib_xrcd *xrcd; 599 struct inode *inode; 600 }; 601 602 static int xrcd_table_insert(struct ib_uverbs_device *dev, 603 struct inode *inode, 604 struct ib_xrcd *xrcd) 605 { 606 struct xrcd_table_entry *entry, *scan; 607 struct rb_node **p = &dev->xrcd_tree.rb_node; 608 struct rb_node *parent = NULL; 609 610 entry = kmalloc(sizeof *entry, GFP_KERNEL); 611 if (!entry) 612 return -ENOMEM; 613 614 entry->xrcd = xrcd; 615 entry->inode = inode; 616 617 while (*p) { 618 parent = *p; 619 scan = rb_entry(parent, struct xrcd_table_entry, node); 620 621 if (inode < scan->inode) { 622 p = &(*p)->rb_left; 623 } else if (inode > scan->inode) { 624 p = &(*p)->rb_right; 625 } else { 626 kfree(entry); 627 return -EEXIST; 628 } 629 } 630 631 rb_link_node(&entry->node, parent, p); 632 rb_insert_color(&entry->node, &dev->xrcd_tree); 633 igrab(inode); 634 return 0; 635 } 636 637 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 638 struct inode *inode) 639 { 640 struct xrcd_table_entry *entry; 641 struct rb_node *p = dev->xrcd_tree.rb_node; 642 643 while (p) { 644 entry = rb_entry(p, struct xrcd_table_entry, node); 645 646 if (inode < entry->inode) 647 p = p->rb_left; 648 else if (inode > entry->inode) 649 p = p->rb_right; 650 else 651 return entry; 652 } 653 654 return NULL; 655 } 656 657 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 658 { 659 struct xrcd_table_entry *entry; 660 661 entry = xrcd_table_search(dev, inode); 662 if (!entry) 663 return NULL; 664 665 return entry->xrcd; 666 } 667 668 static void xrcd_table_delete(struct ib_uverbs_device *dev, 669 struct inode *inode) 670 { 671 struct xrcd_table_entry *entry; 672 673 entry = xrcd_table_search(dev, inode); 674 if (entry) { 675 iput(inode); 676 rb_erase(&entry->node, &dev->xrcd_tree); 677 kfree(entry); 678 } 679 } 680 681 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 682 const char __user *buf, int in_len, 683 int out_len) 684 { 685 struct ib_uverbs_open_xrcd cmd; 686 struct ib_uverbs_open_xrcd_resp resp; 687 struct ib_udata udata; 688 struct ib_uxrcd_object *obj; 689 struct ib_xrcd *xrcd = NULL; 690 struct file *f = NULL; 691 struct inode *inode = NULL; 692 int ret = 0; 693 int new_xrcd = 0; 694 695 if (out_len < sizeof resp) 696 return -ENOSPC; 697 698 if (copy_from_user(&cmd, buf, sizeof cmd)) 699 return -EFAULT; 700 701 INIT_UDATA(&udata, buf + sizeof cmd, 702 (unsigned long) cmd.response + sizeof resp, 703 in_len - sizeof cmd, out_len - sizeof resp); 704 705 mutex_lock(&file->device->xrcd_tree_mutex); 706 707 if (cmd.fd != -1) { 708 /* search for file descriptor */ 709 f = fget(cmd.fd); 710 if (!f) { 711 ret = -EBADF; 712 goto err_tree_mutex_unlock; 713 } 714 715 inode = f->f_dentry->d_inode; 716 if (!inode) { 717 ret = -EBADF; 718 goto err_tree_mutex_unlock; 719 } 720 721 xrcd = find_xrcd(file->device, inode); 722 if (!xrcd && !(cmd.oflags & O_CREAT)) { 723 /* no file descriptor. Need CREATE flag */ 724 ret = -EAGAIN; 725 goto err_tree_mutex_unlock; 726 } 727 728 if (xrcd && cmd.oflags & O_EXCL) { 729 ret = -EINVAL; 730 goto err_tree_mutex_unlock; 731 } 732 } 733 734 obj = kmalloc(sizeof *obj, GFP_KERNEL); 735 if (!obj) { 736 ret = -ENOMEM; 737 goto err_tree_mutex_unlock; 738 } 739 740 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_key); 741 742 down_write(&obj->uobject.mutex); 743 744 if (!xrcd) { 745 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev, 746 file->ucontext, &udata); 747 if (IS_ERR(xrcd)) { 748 ret = PTR_ERR(xrcd); 749 goto err; 750 } 751 752 xrcd->inode = inode; 753 xrcd->device = file->device->ib_dev; 754 atomic_set(&xrcd->usecnt, 0); 755 mutex_init(&xrcd->tgt_qp_mutex); 756 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 757 new_xrcd = 1; 758 } 759 760 atomic_set(&obj->refcnt, 0); 761 obj->uobject.object = xrcd; 762 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 763 if (ret) 764 goto err_idr; 765 766 memset(&resp, 0, sizeof resp); 767 resp.xrcd_handle = obj->uobject.id; 768 769 if (inode) { 770 if (new_xrcd) { 771 /* create new inode/xrcd table entry */ 772 ret = xrcd_table_insert(file->device, inode, xrcd); 773 if (ret) 774 goto err_insert_xrcd; 775 } 776 atomic_inc(&xrcd->usecnt); 777 } 778 779 if (copy_to_user((void __user *) (unsigned long) cmd.response, 780 &resp, sizeof resp)) { 781 ret = -EFAULT; 782 goto err_copy; 783 } 784 785 if (f) 786 fput(f); 787 788 mutex_lock(&file->mutex); 789 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); 790 mutex_unlock(&file->mutex); 791 792 obj->uobject.live = 1; 793 up_write(&obj->uobject.mutex); 794 795 mutex_unlock(&file->device->xrcd_tree_mutex); 796 return in_len; 797 798 err_copy: 799 if (inode) { 800 if (new_xrcd) 801 xrcd_table_delete(file->device, inode); 802 atomic_dec(&xrcd->usecnt); 803 } 804 805 err_insert_xrcd: 806 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 807 808 err_idr: 809 ib_dealloc_xrcd(xrcd); 810 811 err: 812 put_uobj_write(&obj->uobject); 813 814 err_tree_mutex_unlock: 815 if (f) 816 fput(f); 817 818 mutex_unlock(&file->device->xrcd_tree_mutex); 819 820 return ret; 821 } 822 823 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 824 const char __user *buf, int in_len, 825 int out_len) 826 { 827 struct ib_uverbs_close_xrcd cmd; 828 struct ib_uobject *uobj; 829 struct ib_xrcd *xrcd = NULL; 830 struct inode *inode = NULL; 831 struct ib_uxrcd_object *obj; 832 int live; 833 int ret = 0; 834 835 if (copy_from_user(&cmd, buf, sizeof cmd)) 836 return -EFAULT; 837 838 mutex_lock(&file->device->xrcd_tree_mutex); 839 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); 840 if (!uobj) { 841 ret = -EINVAL; 842 goto out; 843 } 844 845 xrcd = uobj->object; 846 inode = xrcd->inode; 847 obj = container_of(uobj, struct ib_uxrcd_object, uobject); 848 if (atomic_read(&obj->refcnt)) { 849 put_uobj_write(uobj); 850 ret = -EBUSY; 851 goto out; 852 } 853 854 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { 855 ret = ib_dealloc_xrcd(uobj->object); 856 if (!ret) 857 uobj->live = 0; 858 } 859 860 live = uobj->live; 861 if (inode && ret) 862 atomic_inc(&xrcd->usecnt); 863 864 put_uobj_write(uobj); 865 866 if (ret) 867 goto out; 868 869 if (inode && !live) 870 xrcd_table_delete(file->device, inode); 871 872 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); 873 mutex_lock(&file->mutex); 874 list_del(&uobj->list); 875 mutex_unlock(&file->mutex); 876 877 put_uobj(uobj); 878 ret = in_len; 879 880 out: 881 mutex_unlock(&file->device->xrcd_tree_mutex); 882 return ret; 883 } 884 885 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 886 struct ib_xrcd *xrcd) 887 { 888 struct inode *inode; 889 890 inode = xrcd->inode; 891 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 892 return; 893 894 ib_dealloc_xrcd(xrcd); 895 896 if (inode) 897 xrcd_table_delete(dev, inode); 898 } 899 900 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 901 const char __user *buf, int in_len, 902 int out_len) 903 { 904 struct ib_uverbs_reg_mr cmd; 905 struct ib_uverbs_reg_mr_resp resp; 906 struct ib_udata udata; 907 struct ib_uobject *uobj; 908 struct ib_pd *pd; 909 struct ib_mr *mr; 910 int ret; 911 912 if (out_len < sizeof resp) 913 return -ENOSPC; 914 915 if (copy_from_user(&cmd, buf, sizeof cmd)) 916 return -EFAULT; 917 918 INIT_UDATA(&udata, buf + sizeof cmd, 919 (unsigned long) cmd.response + sizeof resp, 920 in_len - sizeof cmd, out_len - sizeof resp); 921 922 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 923 return -EINVAL; 924 925 /* 926 * Local write permission is required if remote write or 927 * remote atomic permission is also requested. 928 */ 929 if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 930 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE)) 931 return -EINVAL; 932 933 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 934 if (!uobj) 935 return -ENOMEM; 936 937 init_uobj(uobj, 0, file->ucontext, &mr_lock_key); 938 down_write(&uobj->mutex); 939 940 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 941 if (!pd) { 942 ret = -EINVAL; 943 goto err_free; 944 } 945 946 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 947 cmd.access_flags, &udata); 948 if (IS_ERR(mr)) { 949 ret = PTR_ERR(mr); 950 goto err_put; 951 } 952 953 mr->device = pd->device; 954 mr->pd = pd; 955 mr->uobject = uobj; 956 atomic_inc(&pd->usecnt); 957 atomic_set(&mr->usecnt, 0); 958 959 uobj->object = mr; 960 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 961 if (ret) 962 goto err_unreg; 963 964 memset(&resp, 0, sizeof resp); 965 resp.lkey = mr->lkey; 966 resp.rkey = mr->rkey; 967 resp.mr_handle = uobj->id; 968 969 if (copy_to_user((void __user *) (unsigned long) cmd.response, 970 &resp, sizeof resp)) { 971 ret = -EFAULT; 972 goto err_copy; 973 } 974 975 put_pd_read(pd); 976 977 mutex_lock(&file->mutex); 978 list_add_tail(&uobj->list, &file->ucontext->mr_list); 979 mutex_unlock(&file->mutex); 980 981 uobj->live = 1; 982 983 up_write(&uobj->mutex); 984 985 return in_len; 986 987 err_copy: 988 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 989 990 err_unreg: 991 ib_dereg_mr(mr); 992 993 err_put: 994 put_pd_read(pd); 995 996 err_free: 997 put_uobj_write(uobj); 998 return ret; 999 } 1000 1001 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 1002 const char __user *buf, int in_len, 1003 int out_len) 1004 { 1005 struct ib_uverbs_dereg_mr cmd; 1006 struct ib_mr *mr; 1007 struct ib_uobject *uobj; 1008 int ret = -EINVAL; 1009 1010 if (copy_from_user(&cmd, buf, sizeof cmd)) 1011 return -EFAULT; 1012 1013 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 1014 if (!uobj) 1015 return -EINVAL; 1016 1017 mr = uobj->object; 1018 1019 ret = ib_dereg_mr(mr); 1020 if (!ret) 1021 uobj->live = 0; 1022 1023 put_uobj_write(uobj); 1024 1025 if (ret) 1026 return ret; 1027 1028 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1029 1030 mutex_lock(&file->mutex); 1031 list_del(&uobj->list); 1032 mutex_unlock(&file->mutex); 1033 1034 put_uobj(uobj); 1035 1036 return in_len; 1037 } 1038 1039 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 1040 const char __user *buf, int in_len, 1041 int out_len) 1042 { 1043 struct ib_uverbs_create_comp_channel cmd; 1044 struct ib_uverbs_create_comp_channel_resp resp; 1045 struct file *filp; 1046 int ret; 1047 1048 if (out_len < sizeof resp) 1049 return -ENOSPC; 1050 1051 if (copy_from_user(&cmd, buf, sizeof cmd)) 1052 return -EFAULT; 1053 1054 ret = get_unused_fd(); 1055 if (ret < 0) 1056 return ret; 1057 resp.fd = ret; 1058 1059 filp = ib_uverbs_alloc_event_file(file, 0); 1060 if (IS_ERR(filp)) { 1061 put_unused_fd(resp.fd); 1062 return PTR_ERR(filp); 1063 } 1064 1065 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1066 &resp, sizeof resp)) { 1067 put_unused_fd(resp.fd); 1068 fput(filp); 1069 return -EFAULT; 1070 } 1071 1072 fd_install(resp.fd, filp); 1073 return in_len; 1074 } 1075 1076 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1077 const char __user *buf, int in_len, 1078 int out_len) 1079 { 1080 struct ib_uverbs_create_cq cmd; 1081 struct ib_uverbs_create_cq_resp resp; 1082 struct ib_udata udata; 1083 struct ib_ucq_object *obj; 1084 struct ib_uverbs_event_file *ev_file = NULL; 1085 struct ib_cq *cq; 1086 int ret; 1087 1088 if (out_len < sizeof resp) 1089 return -ENOSPC; 1090 1091 if (copy_from_user(&cmd, buf, sizeof cmd)) 1092 return -EFAULT; 1093 1094 INIT_UDATA(&udata, buf + sizeof cmd, 1095 (unsigned long) cmd.response + sizeof resp, 1096 in_len - sizeof cmd, out_len - sizeof resp); 1097 1098 if (cmd.comp_vector >= file->device->num_comp_vectors) 1099 return -EINVAL; 1100 1101 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1102 if (!obj) 1103 return -ENOMEM; 1104 1105 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key); 1106 down_write(&obj->uobject.mutex); 1107 1108 if (cmd.comp_channel >= 0) { 1109 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel); 1110 if (!ev_file) { 1111 ret = -EINVAL; 1112 goto err; 1113 } 1114 } 1115 1116 obj->uverbs_file = file; 1117 obj->comp_events_reported = 0; 1118 obj->async_events_reported = 0; 1119 INIT_LIST_HEAD(&obj->comp_list); 1120 INIT_LIST_HEAD(&obj->async_list); 1121 1122 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 1123 cmd.comp_vector, 1124 file->ucontext, &udata); 1125 if (IS_ERR(cq)) { 1126 ret = PTR_ERR(cq); 1127 goto err_file; 1128 } 1129 1130 cq->device = file->device->ib_dev; 1131 cq->uobject = &obj->uobject; 1132 cq->comp_handler = ib_uverbs_comp_handler; 1133 cq->event_handler = ib_uverbs_cq_event_handler; 1134 cq->cq_context = ev_file; 1135 atomic_set(&cq->usecnt, 0); 1136 1137 obj->uobject.object = cq; 1138 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1139 if (ret) 1140 goto err_free; 1141 1142 memset(&resp, 0, sizeof resp); 1143 resp.cq_handle = obj->uobject.id; 1144 resp.cqe = cq->cqe; 1145 1146 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1147 &resp, sizeof resp)) { 1148 ret = -EFAULT; 1149 goto err_copy; 1150 } 1151 1152 mutex_lock(&file->mutex); 1153 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 1154 mutex_unlock(&file->mutex); 1155 1156 obj->uobject.live = 1; 1157 1158 up_write(&obj->uobject.mutex); 1159 1160 return in_len; 1161 1162 err_copy: 1163 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1164 1165 err_free: 1166 ib_destroy_cq(cq); 1167 1168 err_file: 1169 if (ev_file) 1170 ib_uverbs_release_ucq(file, ev_file, obj); 1171 1172 err: 1173 put_uobj_write(&obj->uobject); 1174 return ret; 1175 } 1176 1177 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1178 const char __user *buf, int in_len, 1179 int out_len) 1180 { 1181 struct ib_uverbs_resize_cq cmd; 1182 struct ib_uverbs_resize_cq_resp resp; 1183 struct ib_udata udata; 1184 struct ib_cq *cq; 1185 int ret = -EINVAL; 1186 1187 if (copy_from_user(&cmd, buf, sizeof cmd)) 1188 return -EFAULT; 1189 1190 INIT_UDATA(&udata, buf + sizeof cmd, 1191 (unsigned long) cmd.response + sizeof resp, 1192 in_len - sizeof cmd, out_len - sizeof resp); 1193 1194 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1195 if (!cq) 1196 return -EINVAL; 1197 1198 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1199 if (ret) 1200 goto out; 1201 1202 resp.cqe = cq->cqe; 1203 1204 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1205 &resp, sizeof resp.cqe)) 1206 ret = -EFAULT; 1207 1208 out: 1209 put_cq_read(cq); 1210 1211 return ret ? ret : in_len; 1212 } 1213 1214 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1215 { 1216 struct ib_uverbs_wc tmp; 1217 1218 tmp.wr_id = wc->wr_id; 1219 tmp.status = wc->status; 1220 tmp.opcode = wc->opcode; 1221 tmp.vendor_err = wc->vendor_err; 1222 tmp.byte_len = wc->byte_len; 1223 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1224 tmp.qp_num = wc->qp->qp_num; 1225 tmp.src_qp = wc->src_qp; 1226 tmp.wc_flags = wc->wc_flags; 1227 tmp.pkey_index = wc->pkey_index; 1228 tmp.slid = wc->slid; 1229 tmp.sl = wc->sl; 1230 tmp.dlid_path_bits = wc->dlid_path_bits; 1231 tmp.port_num = wc->port_num; 1232 tmp.reserved = 0; 1233 1234 if (copy_to_user(dest, &tmp, sizeof tmp)) 1235 return -EFAULT; 1236 1237 return 0; 1238 } 1239 1240 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1241 const char __user *buf, int in_len, 1242 int out_len) 1243 { 1244 struct ib_uverbs_poll_cq cmd; 1245 struct ib_uverbs_poll_cq_resp resp; 1246 u8 __user *header_ptr; 1247 u8 __user *data_ptr; 1248 struct ib_cq *cq; 1249 struct ib_wc wc; 1250 int ret; 1251 1252 if (copy_from_user(&cmd, buf, sizeof cmd)) 1253 return -EFAULT; 1254 1255 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1256 if (!cq) 1257 return -EINVAL; 1258 1259 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1260 header_ptr = (void __user *)(unsigned long) cmd.response; 1261 data_ptr = header_ptr + sizeof resp; 1262 1263 memset(&resp, 0, sizeof resp); 1264 while (resp.count < cmd.ne) { 1265 ret = ib_poll_cq(cq, 1, &wc); 1266 if (ret < 0) 1267 goto out_put; 1268 if (!ret) 1269 break; 1270 1271 ret = copy_wc_to_user(data_ptr, &wc); 1272 if (ret) 1273 goto out_put; 1274 1275 data_ptr += sizeof(struct ib_uverbs_wc); 1276 ++resp.count; 1277 } 1278 1279 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1280 ret = -EFAULT; 1281 goto out_put; 1282 } 1283 1284 ret = in_len; 1285 1286 out_put: 1287 put_cq_read(cq); 1288 return ret; 1289 } 1290 1291 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1292 const char __user *buf, int in_len, 1293 int out_len) 1294 { 1295 struct ib_uverbs_req_notify_cq cmd; 1296 struct ib_cq *cq; 1297 1298 if (copy_from_user(&cmd, buf, sizeof cmd)) 1299 return -EFAULT; 1300 1301 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1302 if (!cq) 1303 return -EINVAL; 1304 1305 ib_req_notify_cq(cq, cmd.solicited_only ? 1306 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1307 1308 put_cq_read(cq); 1309 1310 return in_len; 1311 } 1312 1313 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1314 const char __user *buf, int in_len, 1315 int out_len) 1316 { 1317 struct ib_uverbs_destroy_cq cmd; 1318 struct ib_uverbs_destroy_cq_resp resp; 1319 struct ib_uobject *uobj; 1320 struct ib_cq *cq; 1321 struct ib_ucq_object *obj; 1322 struct ib_uverbs_event_file *ev_file; 1323 int ret = -EINVAL; 1324 1325 if (copy_from_user(&cmd, buf, sizeof cmd)) 1326 return -EFAULT; 1327 1328 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 1329 if (!uobj) 1330 return -EINVAL; 1331 cq = uobj->object; 1332 ev_file = cq->cq_context; 1333 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1334 1335 ret = ib_destroy_cq(cq); 1336 if (!ret) 1337 uobj->live = 0; 1338 1339 put_uobj_write(uobj); 1340 1341 if (ret) 1342 return ret; 1343 1344 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1345 1346 mutex_lock(&file->mutex); 1347 list_del(&uobj->list); 1348 mutex_unlock(&file->mutex); 1349 1350 ib_uverbs_release_ucq(file, ev_file, obj); 1351 1352 memset(&resp, 0, sizeof resp); 1353 resp.comp_events_reported = obj->comp_events_reported; 1354 resp.async_events_reported = obj->async_events_reported; 1355 1356 put_uobj(uobj); 1357 1358 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1359 &resp, sizeof resp)) 1360 return -EFAULT; 1361 1362 return in_len; 1363 } 1364 1365 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1366 const char __user *buf, int in_len, 1367 int out_len) 1368 { 1369 struct ib_uverbs_create_qp cmd; 1370 struct ib_uverbs_create_qp_resp resp; 1371 struct ib_udata udata; 1372 struct ib_uqp_object *obj; 1373 struct ib_device *device; 1374 struct ib_pd *pd = NULL; 1375 struct ib_xrcd *xrcd = NULL; 1376 struct ib_uobject *uninitialized_var(xrcd_uobj); 1377 struct ib_cq *scq = NULL, *rcq = NULL; 1378 struct ib_srq *srq = NULL; 1379 struct ib_qp *qp; 1380 struct ib_qp_init_attr attr; 1381 int ret; 1382 1383 if (out_len < sizeof resp) 1384 return -ENOSPC; 1385 1386 if (copy_from_user(&cmd, buf, sizeof cmd)) 1387 return -EFAULT; 1388 1389 INIT_UDATA(&udata, buf + sizeof cmd, 1390 (unsigned long) cmd.response + sizeof resp, 1391 in_len - sizeof cmd, out_len - sizeof resp); 1392 1393 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1394 if (!obj) 1395 return -ENOMEM; 1396 1397 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); 1398 down_write(&obj->uevent.uobject.mutex); 1399 1400 if (cmd.qp_type == IB_QPT_XRC_TGT) { 1401 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 1402 if (!xrcd) { 1403 ret = -EINVAL; 1404 goto err_put; 1405 } 1406 device = xrcd->device; 1407 } else { 1408 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1409 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0); 1410 if (!pd || !scq) { 1411 ret = -EINVAL; 1412 goto err_put; 1413 } 1414 1415 if (cmd.qp_type == IB_QPT_XRC_INI) { 1416 cmd.max_recv_wr = cmd.max_recv_sge = 0; 1417 } else { 1418 if (cmd.is_srq) { 1419 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 1420 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1421 ret = -EINVAL; 1422 goto err_put; 1423 } 1424 } 1425 rcq = (cmd.recv_cq_handle == cmd.send_cq_handle) ? 1426 scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1); 1427 if (!rcq) { 1428 ret = -EINVAL; 1429 goto err_put; 1430 } 1431 } 1432 device = pd->device; 1433 } 1434 1435 attr.event_handler = ib_uverbs_qp_event_handler; 1436 attr.qp_context = file; 1437 attr.send_cq = scq; 1438 attr.recv_cq = rcq; 1439 attr.srq = srq; 1440 attr.xrcd = xrcd; 1441 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 1442 attr.qp_type = cmd.qp_type; 1443 attr.create_flags = 0; 1444 1445 attr.cap.max_send_wr = cmd.max_send_wr; 1446 attr.cap.max_recv_wr = cmd.max_recv_wr; 1447 attr.cap.max_send_sge = cmd.max_send_sge; 1448 attr.cap.max_recv_sge = cmd.max_recv_sge; 1449 attr.cap.max_inline_data = cmd.max_inline_data; 1450 1451 obj->uevent.events_reported = 0; 1452 INIT_LIST_HEAD(&obj->uevent.event_list); 1453 INIT_LIST_HEAD(&obj->mcast_list); 1454 1455 if (cmd.qp_type == IB_QPT_XRC_TGT) 1456 qp = ib_create_qp(pd, &attr); 1457 else 1458 qp = device->create_qp(pd, &attr, &udata); 1459 1460 if (IS_ERR(qp)) { 1461 ret = PTR_ERR(qp); 1462 goto err_put; 1463 } 1464 1465 if (cmd.qp_type != IB_QPT_XRC_TGT) { 1466 qp->real_qp = qp; 1467 qp->device = device; 1468 qp->pd = pd; 1469 qp->send_cq = attr.send_cq; 1470 qp->recv_cq = attr.recv_cq; 1471 qp->srq = attr.srq; 1472 qp->event_handler = attr.event_handler; 1473 qp->qp_context = attr.qp_context; 1474 qp->qp_type = attr.qp_type; 1475 atomic_inc(&pd->usecnt); 1476 atomic_inc(&attr.send_cq->usecnt); 1477 if (attr.recv_cq) 1478 atomic_inc(&attr.recv_cq->usecnt); 1479 if (attr.srq) 1480 atomic_inc(&attr.srq->usecnt); 1481 } 1482 qp->uobject = &obj->uevent.uobject; 1483 1484 obj->uevent.uobject.object = qp; 1485 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1486 if (ret) 1487 goto err_destroy; 1488 1489 memset(&resp, 0, sizeof resp); 1490 resp.qpn = qp->qp_num; 1491 resp.qp_handle = obj->uevent.uobject.id; 1492 resp.max_recv_sge = attr.cap.max_recv_sge; 1493 resp.max_send_sge = attr.cap.max_send_sge; 1494 resp.max_recv_wr = attr.cap.max_recv_wr; 1495 resp.max_send_wr = attr.cap.max_send_wr; 1496 resp.max_inline_data = attr.cap.max_inline_data; 1497 1498 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1499 &resp, sizeof resp)) { 1500 ret = -EFAULT; 1501 goto err_copy; 1502 } 1503 1504 if (xrcd) 1505 put_xrcd_read(xrcd_uobj); 1506 if (pd) 1507 put_pd_read(pd); 1508 if (scq) 1509 put_cq_read(scq); 1510 if (rcq && rcq != scq) 1511 put_cq_read(rcq); 1512 if (srq) 1513 put_srq_read(srq); 1514 1515 mutex_lock(&file->mutex); 1516 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1517 mutex_unlock(&file->mutex); 1518 1519 obj->uevent.uobject.live = 1; 1520 1521 up_write(&obj->uevent.uobject.mutex); 1522 1523 return in_len; 1524 1525 err_copy: 1526 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1527 1528 err_destroy: 1529 ib_destroy_qp(qp); 1530 1531 err_put: 1532 if (xrcd) 1533 put_xrcd_read(xrcd_uobj); 1534 if (pd) 1535 put_pd_read(pd); 1536 if (scq) 1537 put_cq_read(scq); 1538 if (rcq && rcq != scq) 1539 put_cq_read(rcq); 1540 if (srq) 1541 put_srq_read(srq); 1542 1543 put_uobj_write(&obj->uevent.uobject); 1544 return ret; 1545 } 1546 1547 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1548 const char __user *buf, int in_len, int out_len) 1549 { 1550 struct ib_uverbs_open_qp cmd; 1551 struct ib_uverbs_create_qp_resp resp; 1552 struct ib_udata udata; 1553 struct ib_uqp_object *obj; 1554 struct ib_xrcd *xrcd; 1555 struct ib_uobject *uninitialized_var(xrcd_uobj); 1556 struct ib_qp *qp; 1557 struct ib_qp_open_attr attr; 1558 int ret; 1559 1560 if (out_len < sizeof resp) 1561 return -ENOSPC; 1562 1563 if (copy_from_user(&cmd, buf, sizeof cmd)) 1564 return -EFAULT; 1565 1566 INIT_UDATA(&udata, buf + sizeof cmd, 1567 (unsigned long) cmd.response + sizeof resp, 1568 in_len - sizeof cmd, out_len - sizeof resp); 1569 1570 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1571 if (!obj) 1572 return -ENOMEM; 1573 1574 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); 1575 down_write(&obj->uevent.uobject.mutex); 1576 1577 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 1578 if (!xrcd) { 1579 ret = -EINVAL; 1580 goto err_put; 1581 } 1582 1583 attr.event_handler = ib_uverbs_qp_event_handler; 1584 attr.qp_context = file; 1585 attr.qp_num = cmd.qpn; 1586 attr.qp_type = cmd.qp_type; 1587 1588 obj->uevent.events_reported = 0; 1589 INIT_LIST_HEAD(&obj->uevent.event_list); 1590 INIT_LIST_HEAD(&obj->mcast_list); 1591 1592 qp = ib_open_qp(xrcd, &attr); 1593 if (IS_ERR(qp)) { 1594 ret = PTR_ERR(qp); 1595 goto err_put; 1596 } 1597 1598 qp->uobject = &obj->uevent.uobject; 1599 1600 obj->uevent.uobject.object = qp; 1601 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1602 if (ret) 1603 goto err_destroy; 1604 1605 memset(&resp, 0, sizeof resp); 1606 resp.qpn = qp->qp_num; 1607 resp.qp_handle = obj->uevent.uobject.id; 1608 1609 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1610 &resp, sizeof resp)) { 1611 ret = -EFAULT; 1612 goto err_remove; 1613 } 1614 1615 put_xrcd_read(xrcd_uobj); 1616 1617 mutex_lock(&file->mutex); 1618 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1619 mutex_unlock(&file->mutex); 1620 1621 obj->uevent.uobject.live = 1; 1622 1623 up_write(&obj->uevent.uobject.mutex); 1624 1625 return in_len; 1626 1627 err_remove: 1628 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1629 1630 err_destroy: 1631 ib_destroy_qp(qp); 1632 1633 err_put: 1634 put_xrcd_read(xrcd_uobj); 1635 put_uobj_write(&obj->uevent.uobject); 1636 return ret; 1637 } 1638 1639 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1640 const char __user *buf, int in_len, 1641 int out_len) 1642 { 1643 struct ib_uverbs_query_qp cmd; 1644 struct ib_uverbs_query_qp_resp resp; 1645 struct ib_qp *qp; 1646 struct ib_qp_attr *attr; 1647 struct ib_qp_init_attr *init_attr; 1648 int ret; 1649 1650 if (copy_from_user(&cmd, buf, sizeof cmd)) 1651 return -EFAULT; 1652 1653 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1654 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1655 if (!attr || !init_attr) { 1656 ret = -ENOMEM; 1657 goto out; 1658 } 1659 1660 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1661 if (!qp) { 1662 ret = -EINVAL; 1663 goto out; 1664 } 1665 1666 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1667 1668 put_qp_read(qp); 1669 1670 if (ret) 1671 goto out; 1672 1673 memset(&resp, 0, sizeof resp); 1674 1675 resp.qp_state = attr->qp_state; 1676 resp.cur_qp_state = attr->cur_qp_state; 1677 resp.path_mtu = attr->path_mtu; 1678 resp.path_mig_state = attr->path_mig_state; 1679 resp.qkey = attr->qkey; 1680 resp.rq_psn = attr->rq_psn; 1681 resp.sq_psn = attr->sq_psn; 1682 resp.dest_qp_num = attr->dest_qp_num; 1683 resp.qp_access_flags = attr->qp_access_flags; 1684 resp.pkey_index = attr->pkey_index; 1685 resp.alt_pkey_index = attr->alt_pkey_index; 1686 resp.sq_draining = attr->sq_draining; 1687 resp.max_rd_atomic = attr->max_rd_atomic; 1688 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1689 resp.min_rnr_timer = attr->min_rnr_timer; 1690 resp.port_num = attr->port_num; 1691 resp.timeout = attr->timeout; 1692 resp.retry_cnt = attr->retry_cnt; 1693 resp.rnr_retry = attr->rnr_retry; 1694 resp.alt_port_num = attr->alt_port_num; 1695 resp.alt_timeout = attr->alt_timeout; 1696 1697 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 1698 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 1699 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 1700 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 1701 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 1702 resp.dest.dlid = attr->ah_attr.dlid; 1703 resp.dest.sl = attr->ah_attr.sl; 1704 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 1705 resp.dest.static_rate = attr->ah_attr.static_rate; 1706 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 1707 resp.dest.port_num = attr->ah_attr.port_num; 1708 1709 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 1710 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 1711 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 1712 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 1713 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 1714 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 1715 resp.alt_dest.sl = attr->alt_ah_attr.sl; 1716 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 1717 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 1718 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 1719 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 1720 1721 resp.max_send_wr = init_attr->cap.max_send_wr; 1722 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1723 resp.max_send_sge = init_attr->cap.max_send_sge; 1724 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1725 resp.max_inline_data = init_attr->cap.max_inline_data; 1726 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1727 1728 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1729 &resp, sizeof resp)) 1730 ret = -EFAULT; 1731 1732 out: 1733 kfree(attr); 1734 kfree(init_attr); 1735 1736 return ret ? ret : in_len; 1737 } 1738 1739 /* Remove ignored fields set in the attribute mask */ 1740 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 1741 { 1742 switch (qp_type) { 1743 case IB_QPT_XRC_INI: 1744 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 1745 case IB_QPT_XRC_TGT: 1746 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 1747 IB_QP_RNR_RETRY); 1748 default: 1749 return mask; 1750 } 1751 } 1752 1753 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 1754 const char __user *buf, int in_len, 1755 int out_len) 1756 { 1757 struct ib_uverbs_modify_qp cmd; 1758 struct ib_udata udata; 1759 struct ib_qp *qp; 1760 struct ib_qp_attr *attr; 1761 int ret; 1762 1763 if (copy_from_user(&cmd, buf, sizeof cmd)) 1764 return -EFAULT; 1765 1766 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 1767 out_len); 1768 1769 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1770 if (!attr) 1771 return -ENOMEM; 1772 1773 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1774 if (!qp) { 1775 ret = -EINVAL; 1776 goto out; 1777 } 1778 1779 attr->qp_state = cmd.qp_state; 1780 attr->cur_qp_state = cmd.cur_qp_state; 1781 attr->path_mtu = cmd.path_mtu; 1782 attr->path_mig_state = cmd.path_mig_state; 1783 attr->qkey = cmd.qkey; 1784 attr->rq_psn = cmd.rq_psn; 1785 attr->sq_psn = cmd.sq_psn; 1786 attr->dest_qp_num = cmd.dest_qp_num; 1787 attr->qp_access_flags = cmd.qp_access_flags; 1788 attr->pkey_index = cmd.pkey_index; 1789 attr->alt_pkey_index = cmd.alt_pkey_index; 1790 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 1791 attr->max_rd_atomic = cmd.max_rd_atomic; 1792 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 1793 attr->min_rnr_timer = cmd.min_rnr_timer; 1794 attr->port_num = cmd.port_num; 1795 attr->timeout = cmd.timeout; 1796 attr->retry_cnt = cmd.retry_cnt; 1797 attr->rnr_retry = cmd.rnr_retry; 1798 attr->alt_port_num = cmd.alt_port_num; 1799 attr->alt_timeout = cmd.alt_timeout; 1800 1801 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 1802 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 1803 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 1804 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 1805 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 1806 attr->ah_attr.dlid = cmd.dest.dlid; 1807 attr->ah_attr.sl = cmd.dest.sl; 1808 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 1809 attr->ah_attr.static_rate = cmd.dest.static_rate; 1810 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 1811 attr->ah_attr.port_num = cmd.dest.port_num; 1812 1813 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 1814 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 1815 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 1816 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 1817 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 1818 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 1819 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 1820 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 1821 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 1822 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 1823 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 1824 1825 if (qp->real_qp == qp) { 1826 ret = qp->device->modify_qp(qp, attr, 1827 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 1828 } else { 1829 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 1830 } 1831 1832 put_qp_read(qp); 1833 1834 if (ret) 1835 goto out; 1836 1837 ret = in_len; 1838 1839 out: 1840 kfree(attr); 1841 1842 return ret; 1843 } 1844 1845 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 1846 const char __user *buf, int in_len, 1847 int out_len) 1848 { 1849 struct ib_uverbs_destroy_qp cmd; 1850 struct ib_uverbs_destroy_qp_resp resp; 1851 struct ib_uobject *uobj; 1852 struct ib_qp *qp; 1853 struct ib_uqp_object *obj; 1854 int ret = -EINVAL; 1855 1856 if (copy_from_user(&cmd, buf, sizeof cmd)) 1857 return -EFAULT; 1858 1859 memset(&resp, 0, sizeof resp); 1860 1861 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 1862 if (!uobj) 1863 return -EINVAL; 1864 qp = uobj->object; 1865 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 1866 1867 if (!list_empty(&obj->mcast_list)) { 1868 put_uobj_write(uobj); 1869 return -EBUSY; 1870 } 1871 1872 ret = ib_destroy_qp(qp); 1873 if (!ret) 1874 uobj->live = 0; 1875 1876 put_uobj_write(uobj); 1877 1878 if (ret) 1879 return ret; 1880 1881 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 1882 1883 mutex_lock(&file->mutex); 1884 list_del(&uobj->list); 1885 mutex_unlock(&file->mutex); 1886 1887 ib_uverbs_release_uevent(file, &obj->uevent); 1888 1889 resp.events_reported = obj->uevent.events_reported; 1890 1891 put_uobj(uobj); 1892 1893 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1894 &resp, sizeof resp)) 1895 return -EFAULT; 1896 1897 return in_len; 1898 } 1899 1900 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 1901 const char __user *buf, int in_len, 1902 int out_len) 1903 { 1904 struct ib_uverbs_post_send cmd; 1905 struct ib_uverbs_post_send_resp resp; 1906 struct ib_uverbs_send_wr *user_wr; 1907 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 1908 struct ib_qp *qp; 1909 int i, sg_ind; 1910 int is_ud; 1911 ssize_t ret = -EINVAL; 1912 1913 if (copy_from_user(&cmd, buf, sizeof cmd)) 1914 return -EFAULT; 1915 1916 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 1917 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 1918 return -EINVAL; 1919 1920 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 1921 return -EINVAL; 1922 1923 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 1924 if (!user_wr) 1925 return -ENOMEM; 1926 1927 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1928 if (!qp) 1929 goto out; 1930 1931 is_ud = qp->qp_type == IB_QPT_UD; 1932 sg_ind = 0; 1933 last = NULL; 1934 for (i = 0; i < cmd.wr_count; ++i) { 1935 if (copy_from_user(user_wr, 1936 buf + sizeof cmd + i * cmd.wqe_size, 1937 cmd.wqe_size)) { 1938 ret = -EFAULT; 1939 goto out_put; 1940 } 1941 1942 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 1943 ret = -EINVAL; 1944 goto out_put; 1945 } 1946 1947 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 1948 user_wr->num_sge * sizeof (struct ib_sge), 1949 GFP_KERNEL); 1950 if (!next) { 1951 ret = -ENOMEM; 1952 goto out_put; 1953 } 1954 1955 if (!last) 1956 wr = next; 1957 else 1958 last->next = next; 1959 last = next; 1960 1961 next->next = NULL; 1962 next->wr_id = user_wr->wr_id; 1963 next->num_sge = user_wr->num_sge; 1964 next->opcode = user_wr->opcode; 1965 next->send_flags = user_wr->send_flags; 1966 1967 if (is_ud) { 1968 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, 1969 file->ucontext); 1970 if (!next->wr.ud.ah) { 1971 ret = -EINVAL; 1972 goto out_put; 1973 } 1974 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; 1975 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 1976 } else { 1977 switch (next->opcode) { 1978 case IB_WR_RDMA_WRITE_WITH_IMM: 1979 next->ex.imm_data = 1980 (__be32 __force) user_wr->ex.imm_data; 1981 case IB_WR_RDMA_WRITE: 1982 case IB_WR_RDMA_READ: 1983 next->wr.rdma.remote_addr = 1984 user_wr->wr.rdma.remote_addr; 1985 next->wr.rdma.rkey = 1986 user_wr->wr.rdma.rkey; 1987 break; 1988 case IB_WR_SEND_WITH_IMM: 1989 next->ex.imm_data = 1990 (__be32 __force) user_wr->ex.imm_data; 1991 break; 1992 case IB_WR_SEND_WITH_INV: 1993 next->ex.invalidate_rkey = 1994 user_wr->ex.invalidate_rkey; 1995 break; 1996 case IB_WR_ATOMIC_CMP_AND_SWP: 1997 case IB_WR_ATOMIC_FETCH_AND_ADD: 1998 next->wr.atomic.remote_addr = 1999 user_wr->wr.atomic.remote_addr; 2000 next->wr.atomic.compare_add = 2001 user_wr->wr.atomic.compare_add; 2002 next->wr.atomic.swap = user_wr->wr.atomic.swap; 2003 next->wr.atomic.rkey = user_wr->wr.atomic.rkey; 2004 break; 2005 default: 2006 break; 2007 } 2008 } 2009 2010 if (next->num_sge) { 2011 next->sg_list = (void *) next + 2012 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2013 if (copy_from_user(next->sg_list, 2014 buf + sizeof cmd + 2015 cmd.wr_count * cmd.wqe_size + 2016 sg_ind * sizeof (struct ib_sge), 2017 next->num_sge * sizeof (struct ib_sge))) { 2018 ret = -EFAULT; 2019 goto out_put; 2020 } 2021 sg_ind += next->num_sge; 2022 } else 2023 next->sg_list = NULL; 2024 } 2025 2026 resp.bad_wr = 0; 2027 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2028 if (ret) 2029 for (next = wr; next; next = next->next) { 2030 ++resp.bad_wr; 2031 if (next == bad_wr) 2032 break; 2033 } 2034 2035 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2036 &resp, sizeof resp)) 2037 ret = -EFAULT; 2038 2039 out_put: 2040 put_qp_read(qp); 2041 2042 while (wr) { 2043 if (is_ud && wr->wr.ud.ah) 2044 put_ah_read(wr->wr.ud.ah); 2045 next = wr->next; 2046 kfree(wr); 2047 wr = next; 2048 } 2049 2050 out: 2051 kfree(user_wr); 2052 2053 return ret ? ret : in_len; 2054 } 2055 2056 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2057 int in_len, 2058 u32 wr_count, 2059 u32 sge_count, 2060 u32 wqe_size) 2061 { 2062 struct ib_uverbs_recv_wr *user_wr; 2063 struct ib_recv_wr *wr = NULL, *last, *next; 2064 int sg_ind; 2065 int i; 2066 int ret; 2067 2068 if (in_len < wqe_size * wr_count + 2069 sge_count * sizeof (struct ib_uverbs_sge)) 2070 return ERR_PTR(-EINVAL); 2071 2072 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2073 return ERR_PTR(-EINVAL); 2074 2075 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2076 if (!user_wr) 2077 return ERR_PTR(-ENOMEM); 2078 2079 sg_ind = 0; 2080 last = NULL; 2081 for (i = 0; i < wr_count; ++i) { 2082 if (copy_from_user(user_wr, buf + i * wqe_size, 2083 wqe_size)) { 2084 ret = -EFAULT; 2085 goto err; 2086 } 2087 2088 if (user_wr->num_sge + sg_ind > sge_count) { 2089 ret = -EINVAL; 2090 goto err; 2091 } 2092 2093 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2094 user_wr->num_sge * sizeof (struct ib_sge), 2095 GFP_KERNEL); 2096 if (!next) { 2097 ret = -ENOMEM; 2098 goto err; 2099 } 2100 2101 if (!last) 2102 wr = next; 2103 else 2104 last->next = next; 2105 last = next; 2106 2107 next->next = NULL; 2108 next->wr_id = user_wr->wr_id; 2109 next->num_sge = user_wr->num_sge; 2110 2111 if (next->num_sge) { 2112 next->sg_list = (void *) next + 2113 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2114 if (copy_from_user(next->sg_list, 2115 buf + wr_count * wqe_size + 2116 sg_ind * sizeof (struct ib_sge), 2117 next->num_sge * sizeof (struct ib_sge))) { 2118 ret = -EFAULT; 2119 goto err; 2120 } 2121 sg_ind += next->num_sge; 2122 } else 2123 next->sg_list = NULL; 2124 } 2125 2126 kfree(user_wr); 2127 return wr; 2128 2129 err: 2130 kfree(user_wr); 2131 2132 while (wr) { 2133 next = wr->next; 2134 kfree(wr); 2135 wr = next; 2136 } 2137 2138 return ERR_PTR(ret); 2139 } 2140 2141 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2142 const char __user *buf, int in_len, 2143 int out_len) 2144 { 2145 struct ib_uverbs_post_recv cmd; 2146 struct ib_uverbs_post_recv_resp resp; 2147 struct ib_recv_wr *wr, *next, *bad_wr; 2148 struct ib_qp *qp; 2149 ssize_t ret = -EINVAL; 2150 2151 if (copy_from_user(&cmd, buf, sizeof cmd)) 2152 return -EFAULT; 2153 2154 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2155 in_len - sizeof cmd, cmd.wr_count, 2156 cmd.sge_count, cmd.wqe_size); 2157 if (IS_ERR(wr)) 2158 return PTR_ERR(wr); 2159 2160 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2161 if (!qp) 2162 goto out; 2163 2164 resp.bad_wr = 0; 2165 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2166 2167 put_qp_read(qp); 2168 2169 if (ret) 2170 for (next = wr; next; next = next->next) { 2171 ++resp.bad_wr; 2172 if (next == bad_wr) 2173 break; 2174 } 2175 2176 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2177 &resp, sizeof resp)) 2178 ret = -EFAULT; 2179 2180 out: 2181 while (wr) { 2182 next = wr->next; 2183 kfree(wr); 2184 wr = next; 2185 } 2186 2187 return ret ? ret : in_len; 2188 } 2189 2190 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2191 const char __user *buf, int in_len, 2192 int out_len) 2193 { 2194 struct ib_uverbs_post_srq_recv cmd; 2195 struct ib_uverbs_post_srq_recv_resp resp; 2196 struct ib_recv_wr *wr, *next, *bad_wr; 2197 struct ib_srq *srq; 2198 ssize_t ret = -EINVAL; 2199 2200 if (copy_from_user(&cmd, buf, sizeof cmd)) 2201 return -EFAULT; 2202 2203 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2204 in_len - sizeof cmd, cmd.wr_count, 2205 cmd.sge_count, cmd.wqe_size); 2206 if (IS_ERR(wr)) 2207 return PTR_ERR(wr); 2208 2209 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2210 if (!srq) 2211 goto out; 2212 2213 resp.bad_wr = 0; 2214 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2215 2216 put_srq_read(srq); 2217 2218 if (ret) 2219 for (next = wr; next; next = next->next) { 2220 ++resp.bad_wr; 2221 if (next == bad_wr) 2222 break; 2223 } 2224 2225 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2226 &resp, sizeof resp)) 2227 ret = -EFAULT; 2228 2229 out: 2230 while (wr) { 2231 next = wr->next; 2232 kfree(wr); 2233 wr = next; 2234 } 2235 2236 return ret ? ret : in_len; 2237 } 2238 2239 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2240 const char __user *buf, int in_len, 2241 int out_len) 2242 { 2243 struct ib_uverbs_create_ah cmd; 2244 struct ib_uverbs_create_ah_resp resp; 2245 struct ib_uobject *uobj; 2246 struct ib_pd *pd; 2247 struct ib_ah *ah; 2248 struct ib_ah_attr attr; 2249 int ret; 2250 2251 if (out_len < sizeof resp) 2252 return -ENOSPC; 2253 2254 if (copy_from_user(&cmd, buf, sizeof cmd)) 2255 return -EFAULT; 2256 2257 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 2258 if (!uobj) 2259 return -ENOMEM; 2260 2261 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key); 2262 down_write(&uobj->mutex); 2263 2264 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 2265 if (!pd) { 2266 ret = -EINVAL; 2267 goto err; 2268 } 2269 2270 attr.dlid = cmd.attr.dlid; 2271 attr.sl = cmd.attr.sl; 2272 attr.src_path_bits = cmd.attr.src_path_bits; 2273 attr.static_rate = cmd.attr.static_rate; 2274 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 2275 attr.port_num = cmd.attr.port_num; 2276 attr.grh.flow_label = cmd.attr.grh.flow_label; 2277 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 2278 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 2279 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 2280 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 2281 2282 ah = ib_create_ah(pd, &attr); 2283 if (IS_ERR(ah)) { 2284 ret = PTR_ERR(ah); 2285 goto err_put; 2286 } 2287 2288 ah->uobject = uobj; 2289 uobj->object = ah; 2290 2291 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 2292 if (ret) 2293 goto err_destroy; 2294 2295 resp.ah_handle = uobj->id; 2296 2297 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2298 &resp, sizeof resp)) { 2299 ret = -EFAULT; 2300 goto err_copy; 2301 } 2302 2303 put_pd_read(pd); 2304 2305 mutex_lock(&file->mutex); 2306 list_add_tail(&uobj->list, &file->ucontext->ah_list); 2307 mutex_unlock(&file->mutex); 2308 2309 uobj->live = 1; 2310 2311 up_write(&uobj->mutex); 2312 2313 return in_len; 2314 2315 err_copy: 2316 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2317 2318 err_destroy: 2319 ib_destroy_ah(ah); 2320 2321 err_put: 2322 put_pd_read(pd); 2323 2324 err: 2325 put_uobj_write(uobj); 2326 return ret; 2327 } 2328 2329 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2330 const char __user *buf, int in_len, int out_len) 2331 { 2332 struct ib_uverbs_destroy_ah cmd; 2333 struct ib_ah *ah; 2334 struct ib_uobject *uobj; 2335 int ret; 2336 2337 if (copy_from_user(&cmd, buf, sizeof cmd)) 2338 return -EFAULT; 2339 2340 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 2341 if (!uobj) 2342 return -EINVAL; 2343 ah = uobj->object; 2344 2345 ret = ib_destroy_ah(ah); 2346 if (!ret) 2347 uobj->live = 0; 2348 2349 put_uobj_write(uobj); 2350 2351 if (ret) 2352 return ret; 2353 2354 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2355 2356 mutex_lock(&file->mutex); 2357 list_del(&uobj->list); 2358 mutex_unlock(&file->mutex); 2359 2360 put_uobj(uobj); 2361 2362 return in_len; 2363 } 2364 2365 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2366 const char __user *buf, int in_len, 2367 int out_len) 2368 { 2369 struct ib_uverbs_attach_mcast cmd; 2370 struct ib_qp *qp; 2371 struct ib_uqp_object *obj; 2372 struct ib_uverbs_mcast_entry *mcast; 2373 int ret; 2374 2375 if (copy_from_user(&cmd, buf, sizeof cmd)) 2376 return -EFAULT; 2377 2378 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2379 if (!qp) 2380 return -EINVAL; 2381 2382 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2383 2384 list_for_each_entry(mcast, &obj->mcast_list, list) 2385 if (cmd.mlid == mcast->lid && 2386 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2387 ret = 0; 2388 goto out_put; 2389 } 2390 2391 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2392 if (!mcast) { 2393 ret = -ENOMEM; 2394 goto out_put; 2395 } 2396 2397 mcast->lid = cmd.mlid; 2398 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2399 2400 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2401 if (!ret) 2402 list_add_tail(&mcast->list, &obj->mcast_list); 2403 else 2404 kfree(mcast); 2405 2406 out_put: 2407 put_qp_read(qp); 2408 2409 return ret ? ret : in_len; 2410 } 2411 2412 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2413 const char __user *buf, int in_len, 2414 int out_len) 2415 { 2416 struct ib_uverbs_detach_mcast cmd; 2417 struct ib_uqp_object *obj; 2418 struct ib_qp *qp; 2419 struct ib_uverbs_mcast_entry *mcast; 2420 int ret = -EINVAL; 2421 2422 if (copy_from_user(&cmd, buf, sizeof cmd)) 2423 return -EFAULT; 2424 2425 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2426 if (!qp) 2427 return -EINVAL; 2428 2429 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 2430 if (ret) 2431 goto out_put; 2432 2433 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2434 2435 list_for_each_entry(mcast, &obj->mcast_list, list) 2436 if (cmd.mlid == mcast->lid && 2437 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2438 list_del(&mcast->list); 2439 kfree(mcast); 2440 break; 2441 } 2442 2443 out_put: 2444 put_qp_read(qp); 2445 2446 return ret ? ret : in_len; 2447 } 2448 2449 int __uverbs_create_xsrq(struct ib_uverbs_file *file, 2450 struct ib_uverbs_create_xsrq *cmd, 2451 struct ib_udata *udata) 2452 { 2453 struct ib_uverbs_create_srq_resp resp; 2454 struct ib_usrq_object *obj; 2455 struct ib_pd *pd; 2456 struct ib_srq *srq; 2457 struct ib_uobject *uninitialized_var(xrcd_uobj); 2458 struct ib_srq_init_attr attr; 2459 int ret; 2460 2461 obj = kmalloc(sizeof *obj, GFP_KERNEL); 2462 if (!obj) 2463 return -ENOMEM; 2464 2465 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_key); 2466 down_write(&obj->uevent.uobject.mutex); 2467 2468 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 2469 if (!pd) { 2470 ret = -EINVAL; 2471 goto err; 2472 } 2473 2474 if (cmd->srq_type == IB_SRQT_XRC) { 2475 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); 2476 if (!attr.ext.xrc.cq) { 2477 ret = -EINVAL; 2478 goto err_put_pd; 2479 } 2480 2481 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); 2482 if (!attr.ext.xrc.xrcd) { 2483 ret = -EINVAL; 2484 goto err_put_cq; 2485 } 2486 2487 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 2488 atomic_inc(&obj->uxrcd->refcnt); 2489 } 2490 2491 attr.event_handler = ib_uverbs_srq_event_handler; 2492 attr.srq_context = file; 2493 attr.srq_type = cmd->srq_type; 2494 attr.attr.max_wr = cmd->max_wr; 2495 attr.attr.max_sge = cmd->max_sge; 2496 attr.attr.srq_limit = cmd->srq_limit; 2497 2498 obj->uevent.events_reported = 0; 2499 INIT_LIST_HEAD(&obj->uevent.event_list); 2500 2501 srq = pd->device->create_srq(pd, &attr, udata); 2502 if (IS_ERR(srq)) { 2503 ret = PTR_ERR(srq); 2504 goto err_put; 2505 } 2506 2507 srq->device = pd->device; 2508 srq->pd = pd; 2509 srq->srq_type = cmd->srq_type; 2510 srq->uobject = &obj->uevent.uobject; 2511 srq->event_handler = attr.event_handler; 2512 srq->srq_context = attr.srq_context; 2513 2514 if (cmd->srq_type == IB_SRQT_XRC) { 2515 srq->ext.xrc.cq = attr.ext.xrc.cq; 2516 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 2517 atomic_inc(&attr.ext.xrc.cq->usecnt); 2518 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 2519 } 2520 2521 atomic_inc(&pd->usecnt); 2522 atomic_set(&srq->usecnt, 0); 2523 2524 obj->uevent.uobject.object = srq; 2525 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 2526 if (ret) 2527 goto err_destroy; 2528 2529 memset(&resp, 0, sizeof resp); 2530 resp.srq_handle = obj->uevent.uobject.id; 2531 resp.max_wr = attr.attr.max_wr; 2532 resp.max_sge = attr.attr.max_sge; 2533 if (cmd->srq_type == IB_SRQT_XRC) 2534 resp.srqn = srq->ext.xrc.srq_num; 2535 2536 if (copy_to_user((void __user *) (unsigned long) cmd->response, 2537 &resp, sizeof resp)) { 2538 ret = -EFAULT; 2539 goto err_copy; 2540 } 2541 2542 if (cmd->srq_type == IB_SRQT_XRC) { 2543 put_uobj_read(xrcd_uobj); 2544 put_cq_read(attr.ext.xrc.cq); 2545 } 2546 put_pd_read(pd); 2547 2548 mutex_lock(&file->mutex); 2549 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); 2550 mutex_unlock(&file->mutex); 2551 2552 obj->uevent.uobject.live = 1; 2553 2554 up_write(&obj->uevent.uobject.mutex); 2555 2556 return 0; 2557 2558 err_copy: 2559 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 2560 2561 err_destroy: 2562 ib_destroy_srq(srq); 2563 2564 err_put: 2565 if (cmd->srq_type == IB_SRQT_XRC) { 2566 atomic_dec(&obj->uxrcd->refcnt); 2567 put_uobj_read(xrcd_uobj); 2568 } 2569 2570 err_put_cq: 2571 if (cmd->srq_type == IB_SRQT_XRC) 2572 put_cq_read(attr.ext.xrc.cq); 2573 2574 err_put_pd: 2575 put_pd_read(pd); 2576 2577 err: 2578 put_uobj_write(&obj->uevent.uobject); 2579 return ret; 2580 } 2581 2582 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 2583 const char __user *buf, int in_len, 2584 int out_len) 2585 { 2586 struct ib_uverbs_create_srq cmd; 2587 struct ib_uverbs_create_xsrq xcmd; 2588 struct ib_uverbs_create_srq_resp resp; 2589 struct ib_udata udata; 2590 int ret; 2591 2592 if (out_len < sizeof resp) 2593 return -ENOSPC; 2594 2595 if (copy_from_user(&cmd, buf, sizeof cmd)) 2596 return -EFAULT; 2597 2598 xcmd.response = cmd.response; 2599 xcmd.user_handle = cmd.user_handle; 2600 xcmd.srq_type = IB_SRQT_BASIC; 2601 xcmd.pd_handle = cmd.pd_handle; 2602 xcmd.max_wr = cmd.max_wr; 2603 xcmd.max_sge = cmd.max_sge; 2604 xcmd.srq_limit = cmd.srq_limit; 2605 2606 INIT_UDATA(&udata, buf + sizeof cmd, 2607 (unsigned long) cmd.response + sizeof resp, 2608 in_len - sizeof cmd, out_len - sizeof resp); 2609 2610 ret = __uverbs_create_xsrq(file, &xcmd, &udata); 2611 if (ret) 2612 return ret; 2613 2614 return in_len; 2615 } 2616 2617 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 2618 const char __user *buf, int in_len, int out_len) 2619 { 2620 struct ib_uverbs_create_xsrq cmd; 2621 struct ib_uverbs_create_srq_resp resp; 2622 struct ib_udata udata; 2623 int ret; 2624 2625 if (out_len < sizeof resp) 2626 return -ENOSPC; 2627 2628 if (copy_from_user(&cmd, buf, sizeof cmd)) 2629 return -EFAULT; 2630 2631 INIT_UDATA(&udata, buf + sizeof cmd, 2632 (unsigned long) cmd.response + sizeof resp, 2633 in_len - sizeof cmd, out_len - sizeof resp); 2634 2635 ret = __uverbs_create_xsrq(file, &cmd, &udata); 2636 if (ret) 2637 return ret; 2638 2639 return in_len; 2640 } 2641 2642 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 2643 const char __user *buf, int in_len, 2644 int out_len) 2645 { 2646 struct ib_uverbs_modify_srq cmd; 2647 struct ib_udata udata; 2648 struct ib_srq *srq; 2649 struct ib_srq_attr attr; 2650 int ret; 2651 2652 if (copy_from_user(&cmd, buf, sizeof cmd)) 2653 return -EFAULT; 2654 2655 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 2656 out_len); 2657 2658 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2659 if (!srq) 2660 return -EINVAL; 2661 2662 attr.max_wr = cmd.max_wr; 2663 attr.srq_limit = cmd.srq_limit; 2664 2665 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 2666 2667 put_srq_read(srq); 2668 2669 return ret ? ret : in_len; 2670 } 2671 2672 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 2673 const char __user *buf, 2674 int in_len, int out_len) 2675 { 2676 struct ib_uverbs_query_srq cmd; 2677 struct ib_uverbs_query_srq_resp resp; 2678 struct ib_srq_attr attr; 2679 struct ib_srq *srq; 2680 int ret; 2681 2682 if (out_len < sizeof resp) 2683 return -ENOSPC; 2684 2685 if (copy_from_user(&cmd, buf, sizeof cmd)) 2686 return -EFAULT; 2687 2688 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2689 if (!srq) 2690 return -EINVAL; 2691 2692 ret = ib_query_srq(srq, &attr); 2693 2694 put_srq_read(srq); 2695 2696 if (ret) 2697 return ret; 2698 2699 memset(&resp, 0, sizeof resp); 2700 2701 resp.max_wr = attr.max_wr; 2702 resp.max_sge = attr.max_sge; 2703 resp.srq_limit = attr.srq_limit; 2704 2705 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2706 &resp, sizeof resp)) 2707 return -EFAULT; 2708 2709 return in_len; 2710 } 2711 2712 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 2713 const char __user *buf, int in_len, 2714 int out_len) 2715 { 2716 struct ib_uverbs_destroy_srq cmd; 2717 struct ib_uverbs_destroy_srq_resp resp; 2718 struct ib_uobject *uobj; 2719 struct ib_srq *srq; 2720 struct ib_uevent_object *obj; 2721 int ret = -EINVAL; 2722 2723 if (copy_from_user(&cmd, buf, sizeof cmd)) 2724 return -EFAULT; 2725 2726 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 2727 if (!uobj) 2728 return -EINVAL; 2729 srq = uobj->object; 2730 obj = container_of(uobj, struct ib_uevent_object, uobject); 2731 2732 ret = ib_destroy_srq(srq); 2733 if (!ret) 2734 uobj->live = 0; 2735 2736 put_uobj_write(uobj); 2737 2738 if (ret) 2739 return ret; 2740 2741 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 2742 2743 mutex_lock(&file->mutex); 2744 list_del(&uobj->list); 2745 mutex_unlock(&file->mutex); 2746 2747 ib_uverbs_release_uevent(file, obj); 2748 2749 memset(&resp, 0, sizeof resp); 2750 resp.events_reported = obj->events_reported; 2751 2752 put_uobj(uobj); 2753 2754 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2755 &resp, sizeof resp)) 2756 ret = -EFAULT; 2757 2758 return ret ? ret : in_len; 2759 } 2760