1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 40 #include <asm/uaccess.h> 41 42 #include "uverbs.h" 43 44 struct uverbs_lock_class { 45 struct lock_class_key key; 46 char name[16]; 47 }; 48 49 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; 50 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; 51 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" }; 52 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; 53 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; 54 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 55 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 56 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 57 58 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 59 do { \ 60 (udata)->inbuf = (void __user *) (ibuf); \ 61 (udata)->outbuf = (void __user *) (obuf); \ 62 (udata)->inlen = (ilen); \ 63 (udata)->outlen = (olen); \ 64 } while (0) 65 66 /* 67 * The ib_uobject locking scheme is as follows: 68 * 69 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 70 * needs to be held during all idr operations. When an object is 71 * looked up, a reference must be taken on the object's kref before 72 * dropping this lock. 73 * 74 * - Each object also has an rwsem. This rwsem must be held for 75 * reading while an operation that uses the object is performed. 76 * For example, while registering an MR, the associated PD's 77 * uobject.mutex must be held for reading. The rwsem must be held 78 * for writing while initializing or destroying an object. 79 * 80 * - In addition, each object has a "live" flag. If this flag is not 81 * set, then lookups of the object will fail even if it is found in 82 * the idr. This handles a reader that blocks and does not acquire 83 * the rwsem until after the object is destroyed. The destroy 84 * operation will set the live flag to 0 and then drop the rwsem; 85 * this will allow the reader to acquire the rwsem, see that the 86 * live flag is 0, and then drop the rwsem and its reference to 87 * object. The underlying storage will not be freed until the last 88 * reference to the object is dropped. 89 */ 90 91 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 92 struct ib_ucontext *context, struct uverbs_lock_class *c) 93 { 94 uobj->user_handle = user_handle; 95 uobj->context = context; 96 kref_init(&uobj->ref); 97 init_rwsem(&uobj->mutex); 98 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name); 99 uobj->live = 0; 100 } 101 102 static void release_uobj(struct kref *kref) 103 { 104 kfree(container_of(kref, struct ib_uobject, ref)); 105 } 106 107 static void put_uobj(struct ib_uobject *uobj) 108 { 109 kref_put(&uobj->ref, release_uobj); 110 } 111 112 static void put_uobj_read(struct ib_uobject *uobj) 113 { 114 up_read(&uobj->mutex); 115 put_uobj(uobj); 116 } 117 118 static void put_uobj_write(struct ib_uobject *uobj) 119 { 120 up_write(&uobj->mutex); 121 put_uobj(uobj); 122 } 123 124 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 125 { 126 int ret; 127 128 retry: 129 if (!idr_pre_get(idr, GFP_KERNEL)) 130 return -ENOMEM; 131 132 spin_lock(&ib_uverbs_idr_lock); 133 ret = idr_get_new(idr, uobj, &uobj->id); 134 spin_unlock(&ib_uverbs_idr_lock); 135 136 if (ret == -EAGAIN) 137 goto retry; 138 139 return ret; 140 } 141 142 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 143 { 144 spin_lock(&ib_uverbs_idr_lock); 145 idr_remove(idr, uobj->id); 146 spin_unlock(&ib_uverbs_idr_lock); 147 } 148 149 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 150 struct ib_ucontext *context) 151 { 152 struct ib_uobject *uobj; 153 154 spin_lock(&ib_uverbs_idr_lock); 155 uobj = idr_find(idr, id); 156 if (uobj) { 157 if (uobj->context == context) 158 kref_get(&uobj->ref); 159 else 160 uobj = NULL; 161 } 162 spin_unlock(&ib_uverbs_idr_lock); 163 164 return uobj; 165 } 166 167 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 168 struct ib_ucontext *context, int nested) 169 { 170 struct ib_uobject *uobj; 171 172 uobj = __idr_get_uobj(idr, id, context); 173 if (!uobj) 174 return NULL; 175 176 if (nested) 177 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 178 else 179 down_read(&uobj->mutex); 180 if (!uobj->live) { 181 put_uobj_read(uobj); 182 return NULL; 183 } 184 185 return uobj; 186 } 187 188 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 189 struct ib_ucontext *context) 190 { 191 struct ib_uobject *uobj; 192 193 uobj = __idr_get_uobj(idr, id, context); 194 if (!uobj) 195 return NULL; 196 197 down_write(&uobj->mutex); 198 if (!uobj->live) { 199 put_uobj_write(uobj); 200 return NULL; 201 } 202 203 return uobj; 204 } 205 206 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 207 int nested) 208 { 209 struct ib_uobject *uobj; 210 211 uobj = idr_read_uobj(idr, id, context, nested); 212 return uobj ? uobj->object : NULL; 213 } 214 215 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 216 { 217 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 218 } 219 220 static void put_pd_read(struct ib_pd *pd) 221 { 222 put_uobj_read(pd->uobject); 223 } 224 225 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 226 { 227 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 228 } 229 230 static void put_cq_read(struct ib_cq *cq) 231 { 232 put_uobj_read(cq->uobject); 233 } 234 235 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 236 { 237 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 238 } 239 240 static void put_ah_read(struct ib_ah *ah) 241 { 242 put_uobj_read(ah->uobject); 243 } 244 245 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 246 { 247 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 248 } 249 250 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) 251 { 252 struct ib_uobject *uobj; 253 254 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context); 255 return uobj ? uobj->object : NULL; 256 } 257 258 static void put_qp_read(struct ib_qp *qp) 259 { 260 put_uobj_read(qp->uobject); 261 } 262 263 static void put_qp_write(struct ib_qp *qp) 264 { 265 put_uobj_write(qp->uobject); 266 } 267 268 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 269 { 270 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 271 } 272 273 static void put_srq_read(struct ib_srq *srq) 274 { 275 put_uobj_read(srq->uobject); 276 } 277 278 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, 279 struct ib_uobject **uobj) 280 { 281 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); 282 return *uobj ? (*uobj)->object : NULL; 283 } 284 285 static void put_xrcd_read(struct ib_uobject *uobj) 286 { 287 put_uobj_read(uobj); 288 } 289 290 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 291 const char __user *buf, 292 int in_len, int out_len) 293 { 294 struct ib_uverbs_get_context cmd; 295 struct ib_uverbs_get_context_resp resp; 296 struct ib_udata udata; 297 struct ib_device *ibdev = file->device->ib_dev; 298 struct ib_ucontext *ucontext; 299 struct file *filp; 300 int ret; 301 302 if (out_len < sizeof resp) 303 return -ENOSPC; 304 305 if (copy_from_user(&cmd, buf, sizeof cmd)) 306 return -EFAULT; 307 308 mutex_lock(&file->mutex); 309 310 if (file->ucontext) { 311 ret = -EINVAL; 312 goto err; 313 } 314 315 INIT_UDATA(&udata, buf + sizeof cmd, 316 (unsigned long) cmd.response + sizeof resp, 317 in_len - sizeof cmd, out_len - sizeof resp); 318 319 ucontext = ibdev->alloc_ucontext(ibdev, &udata); 320 if (IS_ERR(ucontext)) { 321 ret = PTR_ERR(ucontext); 322 goto err; 323 } 324 325 ucontext->device = ibdev; 326 INIT_LIST_HEAD(&ucontext->pd_list); 327 INIT_LIST_HEAD(&ucontext->mr_list); 328 INIT_LIST_HEAD(&ucontext->mw_list); 329 INIT_LIST_HEAD(&ucontext->cq_list); 330 INIT_LIST_HEAD(&ucontext->qp_list); 331 INIT_LIST_HEAD(&ucontext->srq_list); 332 INIT_LIST_HEAD(&ucontext->ah_list); 333 INIT_LIST_HEAD(&ucontext->xrcd_list); 334 ucontext->closing = 0; 335 336 resp.num_comp_vectors = file->device->num_comp_vectors; 337 338 ret = get_unused_fd(); 339 if (ret < 0) 340 goto err_free; 341 resp.async_fd = ret; 342 343 filp = ib_uverbs_alloc_event_file(file, 1); 344 if (IS_ERR(filp)) { 345 ret = PTR_ERR(filp); 346 goto err_fd; 347 } 348 349 if (copy_to_user((void __user *) (unsigned long) cmd.response, 350 &resp, sizeof resp)) { 351 ret = -EFAULT; 352 goto err_file; 353 } 354 355 file->async_file = filp->private_data; 356 357 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev, 358 ib_uverbs_event_handler); 359 ret = ib_register_event_handler(&file->event_handler); 360 if (ret) 361 goto err_file; 362 363 kref_get(&file->async_file->ref); 364 kref_get(&file->ref); 365 file->ucontext = ucontext; 366 367 fd_install(resp.async_fd, filp); 368 369 mutex_unlock(&file->mutex); 370 371 return in_len; 372 373 err_file: 374 fput(filp); 375 376 err_fd: 377 put_unused_fd(resp.async_fd); 378 379 err_free: 380 ibdev->dealloc_ucontext(ucontext); 381 382 err: 383 mutex_unlock(&file->mutex); 384 return ret; 385 } 386 387 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 388 const char __user *buf, 389 int in_len, int out_len) 390 { 391 struct ib_uverbs_query_device cmd; 392 struct ib_uverbs_query_device_resp resp; 393 struct ib_device_attr attr; 394 int ret; 395 396 if (out_len < sizeof resp) 397 return -ENOSPC; 398 399 if (copy_from_user(&cmd, buf, sizeof cmd)) 400 return -EFAULT; 401 402 ret = ib_query_device(file->device->ib_dev, &attr); 403 if (ret) 404 return ret; 405 406 memset(&resp, 0, sizeof resp); 407 408 resp.fw_ver = attr.fw_ver; 409 resp.node_guid = file->device->ib_dev->node_guid; 410 resp.sys_image_guid = attr.sys_image_guid; 411 resp.max_mr_size = attr.max_mr_size; 412 resp.page_size_cap = attr.page_size_cap; 413 resp.vendor_id = attr.vendor_id; 414 resp.vendor_part_id = attr.vendor_part_id; 415 resp.hw_ver = attr.hw_ver; 416 resp.max_qp = attr.max_qp; 417 resp.max_qp_wr = attr.max_qp_wr; 418 resp.device_cap_flags = attr.device_cap_flags; 419 resp.max_sge = attr.max_sge; 420 resp.max_sge_rd = attr.max_sge_rd; 421 resp.max_cq = attr.max_cq; 422 resp.max_cqe = attr.max_cqe; 423 resp.max_mr = attr.max_mr; 424 resp.max_pd = attr.max_pd; 425 resp.max_qp_rd_atom = attr.max_qp_rd_atom; 426 resp.max_ee_rd_atom = attr.max_ee_rd_atom; 427 resp.max_res_rd_atom = attr.max_res_rd_atom; 428 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; 429 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; 430 resp.atomic_cap = attr.atomic_cap; 431 resp.max_ee = attr.max_ee; 432 resp.max_rdd = attr.max_rdd; 433 resp.max_mw = attr.max_mw; 434 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; 435 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; 436 resp.max_mcast_grp = attr.max_mcast_grp; 437 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; 438 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; 439 resp.max_ah = attr.max_ah; 440 resp.max_fmr = attr.max_fmr; 441 resp.max_map_per_fmr = attr.max_map_per_fmr; 442 resp.max_srq = attr.max_srq; 443 resp.max_srq_wr = attr.max_srq_wr; 444 resp.max_srq_sge = attr.max_srq_sge; 445 resp.max_pkeys = attr.max_pkeys; 446 resp.local_ca_ack_delay = attr.local_ca_ack_delay; 447 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; 448 449 if (copy_to_user((void __user *) (unsigned long) cmd.response, 450 &resp, sizeof resp)) 451 return -EFAULT; 452 453 return in_len; 454 } 455 456 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 457 const char __user *buf, 458 int in_len, int out_len) 459 { 460 struct ib_uverbs_query_port cmd; 461 struct ib_uverbs_query_port_resp resp; 462 struct ib_port_attr attr; 463 int ret; 464 465 if (out_len < sizeof resp) 466 return -ENOSPC; 467 468 if (copy_from_user(&cmd, buf, sizeof cmd)) 469 return -EFAULT; 470 471 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr); 472 if (ret) 473 return ret; 474 475 memset(&resp, 0, sizeof resp); 476 477 resp.state = attr.state; 478 resp.max_mtu = attr.max_mtu; 479 resp.active_mtu = attr.active_mtu; 480 resp.gid_tbl_len = attr.gid_tbl_len; 481 resp.port_cap_flags = attr.port_cap_flags; 482 resp.max_msg_sz = attr.max_msg_sz; 483 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 484 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 485 resp.pkey_tbl_len = attr.pkey_tbl_len; 486 resp.lid = attr.lid; 487 resp.sm_lid = attr.sm_lid; 488 resp.lmc = attr.lmc; 489 resp.max_vl_num = attr.max_vl_num; 490 resp.sm_sl = attr.sm_sl; 491 resp.subnet_timeout = attr.subnet_timeout; 492 resp.init_type_reply = attr.init_type_reply; 493 resp.active_width = attr.active_width; 494 resp.active_speed = attr.active_speed; 495 resp.phys_state = attr.phys_state; 496 resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev, 497 cmd.port_num); 498 499 if (copy_to_user((void __user *) (unsigned long) cmd.response, 500 &resp, sizeof resp)) 501 return -EFAULT; 502 503 return in_len; 504 } 505 506 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 507 const char __user *buf, 508 int in_len, int out_len) 509 { 510 struct ib_uverbs_alloc_pd cmd; 511 struct ib_uverbs_alloc_pd_resp resp; 512 struct ib_udata udata; 513 struct ib_uobject *uobj; 514 struct ib_pd *pd; 515 int ret; 516 517 if (out_len < sizeof resp) 518 return -ENOSPC; 519 520 if (copy_from_user(&cmd, buf, sizeof cmd)) 521 return -EFAULT; 522 523 INIT_UDATA(&udata, buf + sizeof cmd, 524 (unsigned long) cmd.response + sizeof resp, 525 in_len - sizeof cmd, out_len - sizeof resp); 526 527 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 528 if (!uobj) 529 return -ENOMEM; 530 531 init_uobj(uobj, 0, file->ucontext, &pd_lock_class); 532 down_write(&uobj->mutex); 533 534 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 535 file->ucontext, &udata); 536 if (IS_ERR(pd)) { 537 ret = PTR_ERR(pd); 538 goto err; 539 } 540 541 pd->device = file->device->ib_dev; 542 pd->uobject = uobj; 543 atomic_set(&pd->usecnt, 0); 544 545 uobj->object = pd; 546 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 547 if (ret) 548 goto err_idr; 549 550 memset(&resp, 0, sizeof resp); 551 resp.pd_handle = uobj->id; 552 553 if (copy_to_user((void __user *) (unsigned long) cmd.response, 554 &resp, sizeof resp)) { 555 ret = -EFAULT; 556 goto err_copy; 557 } 558 559 mutex_lock(&file->mutex); 560 list_add_tail(&uobj->list, &file->ucontext->pd_list); 561 mutex_unlock(&file->mutex); 562 563 uobj->live = 1; 564 565 up_write(&uobj->mutex); 566 567 return in_len; 568 569 err_copy: 570 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 571 572 err_idr: 573 ib_dealloc_pd(pd); 574 575 err: 576 put_uobj_write(uobj); 577 return ret; 578 } 579 580 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 581 const char __user *buf, 582 int in_len, int out_len) 583 { 584 struct ib_uverbs_dealloc_pd cmd; 585 struct ib_uobject *uobj; 586 int ret; 587 588 if (copy_from_user(&cmd, buf, sizeof cmd)) 589 return -EFAULT; 590 591 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 592 if (!uobj) 593 return -EINVAL; 594 595 ret = ib_dealloc_pd(uobj->object); 596 if (!ret) 597 uobj->live = 0; 598 599 put_uobj_write(uobj); 600 601 if (ret) 602 return ret; 603 604 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 605 606 mutex_lock(&file->mutex); 607 list_del(&uobj->list); 608 mutex_unlock(&file->mutex); 609 610 put_uobj(uobj); 611 612 return in_len; 613 } 614 615 struct xrcd_table_entry { 616 struct rb_node node; 617 struct ib_xrcd *xrcd; 618 struct inode *inode; 619 }; 620 621 static int xrcd_table_insert(struct ib_uverbs_device *dev, 622 struct inode *inode, 623 struct ib_xrcd *xrcd) 624 { 625 struct xrcd_table_entry *entry, *scan; 626 struct rb_node **p = &dev->xrcd_tree.rb_node; 627 struct rb_node *parent = NULL; 628 629 entry = kmalloc(sizeof *entry, GFP_KERNEL); 630 if (!entry) 631 return -ENOMEM; 632 633 entry->xrcd = xrcd; 634 entry->inode = inode; 635 636 while (*p) { 637 parent = *p; 638 scan = rb_entry(parent, struct xrcd_table_entry, node); 639 640 if (inode < scan->inode) { 641 p = &(*p)->rb_left; 642 } else if (inode > scan->inode) { 643 p = &(*p)->rb_right; 644 } else { 645 kfree(entry); 646 return -EEXIST; 647 } 648 } 649 650 rb_link_node(&entry->node, parent, p); 651 rb_insert_color(&entry->node, &dev->xrcd_tree); 652 igrab(inode); 653 return 0; 654 } 655 656 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 657 struct inode *inode) 658 { 659 struct xrcd_table_entry *entry; 660 struct rb_node *p = dev->xrcd_tree.rb_node; 661 662 while (p) { 663 entry = rb_entry(p, struct xrcd_table_entry, node); 664 665 if (inode < entry->inode) 666 p = p->rb_left; 667 else if (inode > entry->inode) 668 p = p->rb_right; 669 else 670 return entry; 671 } 672 673 return NULL; 674 } 675 676 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 677 { 678 struct xrcd_table_entry *entry; 679 680 entry = xrcd_table_search(dev, inode); 681 if (!entry) 682 return NULL; 683 684 return entry->xrcd; 685 } 686 687 static void xrcd_table_delete(struct ib_uverbs_device *dev, 688 struct inode *inode) 689 { 690 struct xrcd_table_entry *entry; 691 692 entry = xrcd_table_search(dev, inode); 693 if (entry) { 694 iput(inode); 695 rb_erase(&entry->node, &dev->xrcd_tree); 696 kfree(entry); 697 } 698 } 699 700 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 701 const char __user *buf, int in_len, 702 int out_len) 703 { 704 struct ib_uverbs_open_xrcd cmd; 705 struct ib_uverbs_open_xrcd_resp resp; 706 struct ib_udata udata; 707 struct ib_uxrcd_object *obj; 708 struct ib_xrcd *xrcd = NULL; 709 struct fd f = {NULL, 0}; 710 struct inode *inode = NULL; 711 int ret = 0; 712 int new_xrcd = 0; 713 714 if (out_len < sizeof resp) 715 return -ENOSPC; 716 717 if (copy_from_user(&cmd, buf, sizeof cmd)) 718 return -EFAULT; 719 720 INIT_UDATA(&udata, buf + sizeof cmd, 721 (unsigned long) cmd.response + sizeof resp, 722 in_len - sizeof cmd, out_len - sizeof resp); 723 724 mutex_lock(&file->device->xrcd_tree_mutex); 725 726 if (cmd.fd != -1) { 727 /* search for file descriptor */ 728 f = fdget(cmd.fd); 729 if (!f.file) { 730 ret = -EBADF; 731 goto err_tree_mutex_unlock; 732 } 733 734 inode = file_inode(f.file); 735 xrcd = find_xrcd(file->device, inode); 736 if (!xrcd && !(cmd.oflags & O_CREAT)) { 737 /* no file descriptor. Need CREATE flag */ 738 ret = -EAGAIN; 739 goto err_tree_mutex_unlock; 740 } 741 742 if (xrcd && cmd.oflags & O_EXCL) { 743 ret = -EINVAL; 744 goto err_tree_mutex_unlock; 745 } 746 } 747 748 obj = kmalloc(sizeof *obj, GFP_KERNEL); 749 if (!obj) { 750 ret = -ENOMEM; 751 goto err_tree_mutex_unlock; 752 } 753 754 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class); 755 756 down_write(&obj->uobject.mutex); 757 758 if (!xrcd) { 759 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev, 760 file->ucontext, &udata); 761 if (IS_ERR(xrcd)) { 762 ret = PTR_ERR(xrcd); 763 goto err; 764 } 765 766 xrcd->inode = inode; 767 xrcd->device = file->device->ib_dev; 768 atomic_set(&xrcd->usecnt, 0); 769 mutex_init(&xrcd->tgt_qp_mutex); 770 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 771 new_xrcd = 1; 772 } 773 774 atomic_set(&obj->refcnt, 0); 775 obj->uobject.object = xrcd; 776 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 777 if (ret) 778 goto err_idr; 779 780 memset(&resp, 0, sizeof resp); 781 resp.xrcd_handle = obj->uobject.id; 782 783 if (inode) { 784 if (new_xrcd) { 785 /* create new inode/xrcd table entry */ 786 ret = xrcd_table_insert(file->device, inode, xrcd); 787 if (ret) 788 goto err_insert_xrcd; 789 } 790 atomic_inc(&xrcd->usecnt); 791 } 792 793 if (copy_to_user((void __user *) (unsigned long) cmd.response, 794 &resp, sizeof resp)) { 795 ret = -EFAULT; 796 goto err_copy; 797 } 798 799 if (f.file) 800 fdput(f); 801 802 mutex_lock(&file->mutex); 803 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); 804 mutex_unlock(&file->mutex); 805 806 obj->uobject.live = 1; 807 up_write(&obj->uobject.mutex); 808 809 mutex_unlock(&file->device->xrcd_tree_mutex); 810 return in_len; 811 812 err_copy: 813 if (inode) { 814 if (new_xrcd) 815 xrcd_table_delete(file->device, inode); 816 atomic_dec(&xrcd->usecnt); 817 } 818 819 err_insert_xrcd: 820 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 821 822 err_idr: 823 ib_dealloc_xrcd(xrcd); 824 825 err: 826 put_uobj_write(&obj->uobject); 827 828 err_tree_mutex_unlock: 829 if (f.file) 830 fdput(f); 831 832 mutex_unlock(&file->device->xrcd_tree_mutex); 833 834 return ret; 835 } 836 837 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 838 const char __user *buf, int in_len, 839 int out_len) 840 { 841 struct ib_uverbs_close_xrcd cmd; 842 struct ib_uobject *uobj; 843 struct ib_xrcd *xrcd = NULL; 844 struct inode *inode = NULL; 845 struct ib_uxrcd_object *obj; 846 int live; 847 int ret = 0; 848 849 if (copy_from_user(&cmd, buf, sizeof cmd)) 850 return -EFAULT; 851 852 mutex_lock(&file->device->xrcd_tree_mutex); 853 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); 854 if (!uobj) { 855 ret = -EINVAL; 856 goto out; 857 } 858 859 xrcd = uobj->object; 860 inode = xrcd->inode; 861 obj = container_of(uobj, struct ib_uxrcd_object, uobject); 862 if (atomic_read(&obj->refcnt)) { 863 put_uobj_write(uobj); 864 ret = -EBUSY; 865 goto out; 866 } 867 868 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { 869 ret = ib_dealloc_xrcd(uobj->object); 870 if (!ret) 871 uobj->live = 0; 872 } 873 874 live = uobj->live; 875 if (inode && ret) 876 atomic_inc(&xrcd->usecnt); 877 878 put_uobj_write(uobj); 879 880 if (ret) 881 goto out; 882 883 if (inode && !live) 884 xrcd_table_delete(file->device, inode); 885 886 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); 887 mutex_lock(&file->mutex); 888 list_del(&uobj->list); 889 mutex_unlock(&file->mutex); 890 891 put_uobj(uobj); 892 ret = in_len; 893 894 out: 895 mutex_unlock(&file->device->xrcd_tree_mutex); 896 return ret; 897 } 898 899 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 900 struct ib_xrcd *xrcd) 901 { 902 struct inode *inode; 903 904 inode = xrcd->inode; 905 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 906 return; 907 908 ib_dealloc_xrcd(xrcd); 909 910 if (inode) 911 xrcd_table_delete(dev, inode); 912 } 913 914 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 915 const char __user *buf, int in_len, 916 int out_len) 917 { 918 struct ib_uverbs_reg_mr cmd; 919 struct ib_uverbs_reg_mr_resp resp; 920 struct ib_udata udata; 921 struct ib_uobject *uobj; 922 struct ib_pd *pd; 923 struct ib_mr *mr; 924 int ret; 925 926 if (out_len < sizeof resp) 927 return -ENOSPC; 928 929 if (copy_from_user(&cmd, buf, sizeof cmd)) 930 return -EFAULT; 931 932 INIT_UDATA(&udata, buf + sizeof cmd, 933 (unsigned long) cmd.response + sizeof resp, 934 in_len - sizeof cmd, out_len - sizeof resp); 935 936 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 937 return -EINVAL; 938 939 /* 940 * Local write permission is required if remote write or 941 * remote atomic permission is also requested. 942 */ 943 if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 944 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE)) 945 return -EINVAL; 946 947 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 948 if (!uobj) 949 return -ENOMEM; 950 951 init_uobj(uobj, 0, file->ucontext, &mr_lock_class); 952 down_write(&uobj->mutex); 953 954 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 955 if (!pd) { 956 ret = -EINVAL; 957 goto err_free; 958 } 959 960 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 961 cmd.access_flags, &udata); 962 if (IS_ERR(mr)) { 963 ret = PTR_ERR(mr); 964 goto err_put; 965 } 966 967 mr->device = pd->device; 968 mr->pd = pd; 969 mr->uobject = uobj; 970 atomic_inc(&pd->usecnt); 971 atomic_set(&mr->usecnt, 0); 972 973 uobj->object = mr; 974 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 975 if (ret) 976 goto err_unreg; 977 978 memset(&resp, 0, sizeof resp); 979 resp.lkey = mr->lkey; 980 resp.rkey = mr->rkey; 981 resp.mr_handle = uobj->id; 982 983 if (copy_to_user((void __user *) (unsigned long) cmd.response, 984 &resp, sizeof resp)) { 985 ret = -EFAULT; 986 goto err_copy; 987 } 988 989 put_pd_read(pd); 990 991 mutex_lock(&file->mutex); 992 list_add_tail(&uobj->list, &file->ucontext->mr_list); 993 mutex_unlock(&file->mutex); 994 995 uobj->live = 1; 996 997 up_write(&uobj->mutex); 998 999 return in_len; 1000 1001 err_copy: 1002 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1003 1004 err_unreg: 1005 ib_dereg_mr(mr); 1006 1007 err_put: 1008 put_pd_read(pd); 1009 1010 err_free: 1011 put_uobj_write(uobj); 1012 return ret; 1013 } 1014 1015 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 1016 const char __user *buf, int in_len, 1017 int out_len) 1018 { 1019 struct ib_uverbs_dereg_mr cmd; 1020 struct ib_mr *mr; 1021 struct ib_uobject *uobj; 1022 int ret = -EINVAL; 1023 1024 if (copy_from_user(&cmd, buf, sizeof cmd)) 1025 return -EFAULT; 1026 1027 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 1028 if (!uobj) 1029 return -EINVAL; 1030 1031 mr = uobj->object; 1032 1033 ret = ib_dereg_mr(mr); 1034 if (!ret) 1035 uobj->live = 0; 1036 1037 put_uobj_write(uobj); 1038 1039 if (ret) 1040 return ret; 1041 1042 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1043 1044 mutex_lock(&file->mutex); 1045 list_del(&uobj->list); 1046 mutex_unlock(&file->mutex); 1047 1048 put_uobj(uobj); 1049 1050 return in_len; 1051 } 1052 1053 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 1054 const char __user *buf, int in_len, 1055 int out_len) 1056 { 1057 struct ib_uverbs_alloc_mw cmd; 1058 struct ib_uverbs_alloc_mw_resp resp; 1059 struct ib_uobject *uobj; 1060 struct ib_pd *pd; 1061 struct ib_mw *mw; 1062 int ret; 1063 1064 if (out_len < sizeof(resp)) 1065 return -ENOSPC; 1066 1067 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1068 return -EFAULT; 1069 1070 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 1071 if (!uobj) 1072 return -ENOMEM; 1073 1074 init_uobj(uobj, 0, file->ucontext, &mw_lock_class); 1075 down_write(&uobj->mutex); 1076 1077 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1078 if (!pd) { 1079 ret = -EINVAL; 1080 goto err_free; 1081 } 1082 1083 mw = pd->device->alloc_mw(pd, cmd.mw_type); 1084 if (IS_ERR(mw)) { 1085 ret = PTR_ERR(mw); 1086 goto err_put; 1087 } 1088 1089 mw->device = pd->device; 1090 mw->pd = pd; 1091 mw->uobject = uobj; 1092 atomic_inc(&pd->usecnt); 1093 1094 uobj->object = mw; 1095 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj); 1096 if (ret) 1097 goto err_unalloc; 1098 1099 memset(&resp, 0, sizeof(resp)); 1100 resp.rkey = mw->rkey; 1101 resp.mw_handle = uobj->id; 1102 1103 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1104 &resp, sizeof(resp))) { 1105 ret = -EFAULT; 1106 goto err_copy; 1107 } 1108 1109 put_pd_read(pd); 1110 1111 mutex_lock(&file->mutex); 1112 list_add_tail(&uobj->list, &file->ucontext->mw_list); 1113 mutex_unlock(&file->mutex); 1114 1115 uobj->live = 1; 1116 1117 up_write(&uobj->mutex); 1118 1119 return in_len; 1120 1121 err_copy: 1122 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1123 1124 err_unalloc: 1125 ib_dealloc_mw(mw); 1126 1127 err_put: 1128 put_pd_read(pd); 1129 1130 err_free: 1131 put_uobj_write(uobj); 1132 return ret; 1133 } 1134 1135 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 1136 const char __user *buf, int in_len, 1137 int out_len) 1138 { 1139 struct ib_uverbs_dealloc_mw cmd; 1140 struct ib_mw *mw; 1141 struct ib_uobject *uobj; 1142 int ret = -EINVAL; 1143 1144 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1145 return -EFAULT; 1146 1147 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext); 1148 if (!uobj) 1149 return -EINVAL; 1150 1151 mw = uobj->object; 1152 1153 ret = ib_dealloc_mw(mw); 1154 if (!ret) 1155 uobj->live = 0; 1156 1157 put_uobj_write(uobj); 1158 1159 if (ret) 1160 return ret; 1161 1162 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1163 1164 mutex_lock(&file->mutex); 1165 list_del(&uobj->list); 1166 mutex_unlock(&file->mutex); 1167 1168 put_uobj(uobj); 1169 1170 return in_len; 1171 } 1172 1173 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 1174 const char __user *buf, int in_len, 1175 int out_len) 1176 { 1177 struct ib_uverbs_create_comp_channel cmd; 1178 struct ib_uverbs_create_comp_channel_resp resp; 1179 struct file *filp; 1180 int ret; 1181 1182 if (out_len < sizeof resp) 1183 return -ENOSPC; 1184 1185 if (copy_from_user(&cmd, buf, sizeof cmd)) 1186 return -EFAULT; 1187 1188 ret = get_unused_fd(); 1189 if (ret < 0) 1190 return ret; 1191 resp.fd = ret; 1192 1193 filp = ib_uverbs_alloc_event_file(file, 0); 1194 if (IS_ERR(filp)) { 1195 put_unused_fd(resp.fd); 1196 return PTR_ERR(filp); 1197 } 1198 1199 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1200 &resp, sizeof resp)) { 1201 put_unused_fd(resp.fd); 1202 fput(filp); 1203 return -EFAULT; 1204 } 1205 1206 fd_install(resp.fd, filp); 1207 return in_len; 1208 } 1209 1210 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1211 const char __user *buf, int in_len, 1212 int out_len) 1213 { 1214 struct ib_uverbs_create_cq cmd; 1215 struct ib_uverbs_create_cq_resp resp; 1216 struct ib_udata udata; 1217 struct ib_ucq_object *obj; 1218 struct ib_uverbs_event_file *ev_file = NULL; 1219 struct ib_cq *cq; 1220 int ret; 1221 1222 if (out_len < sizeof resp) 1223 return -ENOSPC; 1224 1225 if (copy_from_user(&cmd, buf, sizeof cmd)) 1226 return -EFAULT; 1227 1228 INIT_UDATA(&udata, buf + sizeof cmd, 1229 (unsigned long) cmd.response + sizeof resp, 1230 in_len - sizeof cmd, out_len - sizeof resp); 1231 1232 if (cmd.comp_vector >= file->device->num_comp_vectors) 1233 return -EINVAL; 1234 1235 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1236 if (!obj) 1237 return -ENOMEM; 1238 1239 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class); 1240 down_write(&obj->uobject.mutex); 1241 1242 if (cmd.comp_channel >= 0) { 1243 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel); 1244 if (!ev_file) { 1245 ret = -EINVAL; 1246 goto err; 1247 } 1248 } 1249 1250 obj->uverbs_file = file; 1251 obj->comp_events_reported = 0; 1252 obj->async_events_reported = 0; 1253 INIT_LIST_HEAD(&obj->comp_list); 1254 INIT_LIST_HEAD(&obj->async_list); 1255 1256 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 1257 cmd.comp_vector, 1258 file->ucontext, &udata); 1259 if (IS_ERR(cq)) { 1260 ret = PTR_ERR(cq); 1261 goto err_file; 1262 } 1263 1264 cq->device = file->device->ib_dev; 1265 cq->uobject = &obj->uobject; 1266 cq->comp_handler = ib_uverbs_comp_handler; 1267 cq->event_handler = ib_uverbs_cq_event_handler; 1268 cq->cq_context = ev_file; 1269 atomic_set(&cq->usecnt, 0); 1270 1271 obj->uobject.object = cq; 1272 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1273 if (ret) 1274 goto err_free; 1275 1276 memset(&resp, 0, sizeof resp); 1277 resp.cq_handle = obj->uobject.id; 1278 resp.cqe = cq->cqe; 1279 1280 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1281 &resp, sizeof resp)) { 1282 ret = -EFAULT; 1283 goto err_copy; 1284 } 1285 1286 mutex_lock(&file->mutex); 1287 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 1288 mutex_unlock(&file->mutex); 1289 1290 obj->uobject.live = 1; 1291 1292 up_write(&obj->uobject.mutex); 1293 1294 return in_len; 1295 1296 err_copy: 1297 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1298 1299 err_free: 1300 ib_destroy_cq(cq); 1301 1302 err_file: 1303 if (ev_file) 1304 ib_uverbs_release_ucq(file, ev_file, obj); 1305 1306 err: 1307 put_uobj_write(&obj->uobject); 1308 return ret; 1309 } 1310 1311 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1312 const char __user *buf, int in_len, 1313 int out_len) 1314 { 1315 struct ib_uverbs_resize_cq cmd; 1316 struct ib_uverbs_resize_cq_resp resp; 1317 struct ib_udata udata; 1318 struct ib_cq *cq; 1319 int ret = -EINVAL; 1320 1321 if (copy_from_user(&cmd, buf, sizeof cmd)) 1322 return -EFAULT; 1323 1324 INIT_UDATA(&udata, buf + sizeof cmd, 1325 (unsigned long) cmd.response + sizeof resp, 1326 in_len - sizeof cmd, out_len - sizeof resp); 1327 1328 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1329 if (!cq) 1330 return -EINVAL; 1331 1332 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1333 if (ret) 1334 goto out; 1335 1336 resp.cqe = cq->cqe; 1337 1338 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1339 &resp, sizeof resp.cqe)) 1340 ret = -EFAULT; 1341 1342 out: 1343 put_cq_read(cq); 1344 1345 return ret ? ret : in_len; 1346 } 1347 1348 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1349 { 1350 struct ib_uverbs_wc tmp; 1351 1352 tmp.wr_id = wc->wr_id; 1353 tmp.status = wc->status; 1354 tmp.opcode = wc->opcode; 1355 tmp.vendor_err = wc->vendor_err; 1356 tmp.byte_len = wc->byte_len; 1357 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1358 tmp.qp_num = wc->qp->qp_num; 1359 tmp.src_qp = wc->src_qp; 1360 tmp.wc_flags = wc->wc_flags; 1361 tmp.pkey_index = wc->pkey_index; 1362 tmp.slid = wc->slid; 1363 tmp.sl = wc->sl; 1364 tmp.dlid_path_bits = wc->dlid_path_bits; 1365 tmp.port_num = wc->port_num; 1366 tmp.reserved = 0; 1367 1368 if (copy_to_user(dest, &tmp, sizeof tmp)) 1369 return -EFAULT; 1370 1371 return 0; 1372 } 1373 1374 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1375 const char __user *buf, int in_len, 1376 int out_len) 1377 { 1378 struct ib_uverbs_poll_cq cmd; 1379 struct ib_uverbs_poll_cq_resp resp; 1380 u8 __user *header_ptr; 1381 u8 __user *data_ptr; 1382 struct ib_cq *cq; 1383 struct ib_wc wc; 1384 int ret; 1385 1386 if (copy_from_user(&cmd, buf, sizeof cmd)) 1387 return -EFAULT; 1388 1389 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1390 if (!cq) 1391 return -EINVAL; 1392 1393 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1394 header_ptr = (void __user *)(unsigned long) cmd.response; 1395 data_ptr = header_ptr + sizeof resp; 1396 1397 memset(&resp, 0, sizeof resp); 1398 while (resp.count < cmd.ne) { 1399 ret = ib_poll_cq(cq, 1, &wc); 1400 if (ret < 0) 1401 goto out_put; 1402 if (!ret) 1403 break; 1404 1405 ret = copy_wc_to_user(data_ptr, &wc); 1406 if (ret) 1407 goto out_put; 1408 1409 data_ptr += sizeof(struct ib_uverbs_wc); 1410 ++resp.count; 1411 } 1412 1413 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1414 ret = -EFAULT; 1415 goto out_put; 1416 } 1417 1418 ret = in_len; 1419 1420 out_put: 1421 put_cq_read(cq); 1422 return ret; 1423 } 1424 1425 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1426 const char __user *buf, int in_len, 1427 int out_len) 1428 { 1429 struct ib_uverbs_req_notify_cq cmd; 1430 struct ib_cq *cq; 1431 1432 if (copy_from_user(&cmd, buf, sizeof cmd)) 1433 return -EFAULT; 1434 1435 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1436 if (!cq) 1437 return -EINVAL; 1438 1439 ib_req_notify_cq(cq, cmd.solicited_only ? 1440 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1441 1442 put_cq_read(cq); 1443 1444 return in_len; 1445 } 1446 1447 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1448 const char __user *buf, int in_len, 1449 int out_len) 1450 { 1451 struct ib_uverbs_destroy_cq cmd; 1452 struct ib_uverbs_destroy_cq_resp resp; 1453 struct ib_uobject *uobj; 1454 struct ib_cq *cq; 1455 struct ib_ucq_object *obj; 1456 struct ib_uverbs_event_file *ev_file; 1457 int ret = -EINVAL; 1458 1459 if (copy_from_user(&cmd, buf, sizeof cmd)) 1460 return -EFAULT; 1461 1462 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 1463 if (!uobj) 1464 return -EINVAL; 1465 cq = uobj->object; 1466 ev_file = cq->cq_context; 1467 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1468 1469 ret = ib_destroy_cq(cq); 1470 if (!ret) 1471 uobj->live = 0; 1472 1473 put_uobj_write(uobj); 1474 1475 if (ret) 1476 return ret; 1477 1478 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1479 1480 mutex_lock(&file->mutex); 1481 list_del(&uobj->list); 1482 mutex_unlock(&file->mutex); 1483 1484 ib_uverbs_release_ucq(file, ev_file, obj); 1485 1486 memset(&resp, 0, sizeof resp); 1487 resp.comp_events_reported = obj->comp_events_reported; 1488 resp.async_events_reported = obj->async_events_reported; 1489 1490 put_uobj(uobj); 1491 1492 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1493 &resp, sizeof resp)) 1494 return -EFAULT; 1495 1496 return in_len; 1497 } 1498 1499 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1500 const char __user *buf, int in_len, 1501 int out_len) 1502 { 1503 struct ib_uverbs_create_qp cmd; 1504 struct ib_uverbs_create_qp_resp resp; 1505 struct ib_udata udata; 1506 struct ib_uqp_object *obj; 1507 struct ib_device *device; 1508 struct ib_pd *pd = NULL; 1509 struct ib_xrcd *xrcd = NULL; 1510 struct ib_uobject *uninitialized_var(xrcd_uobj); 1511 struct ib_cq *scq = NULL, *rcq = NULL; 1512 struct ib_srq *srq = NULL; 1513 struct ib_qp *qp; 1514 struct ib_qp_init_attr attr; 1515 int ret; 1516 1517 if (out_len < sizeof resp) 1518 return -ENOSPC; 1519 1520 if (copy_from_user(&cmd, buf, sizeof cmd)) 1521 return -EFAULT; 1522 1523 if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1524 return -EPERM; 1525 1526 INIT_UDATA(&udata, buf + sizeof cmd, 1527 (unsigned long) cmd.response + sizeof resp, 1528 in_len - sizeof cmd, out_len - sizeof resp); 1529 1530 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1531 if (!obj) 1532 return -ENOMEM; 1533 1534 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 1535 down_write(&obj->uevent.uobject.mutex); 1536 1537 if (cmd.qp_type == IB_QPT_XRC_TGT) { 1538 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 1539 if (!xrcd) { 1540 ret = -EINVAL; 1541 goto err_put; 1542 } 1543 device = xrcd->device; 1544 } else { 1545 if (cmd.qp_type == IB_QPT_XRC_INI) { 1546 cmd.max_recv_wr = cmd.max_recv_sge = 0; 1547 } else { 1548 if (cmd.is_srq) { 1549 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 1550 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1551 ret = -EINVAL; 1552 goto err_put; 1553 } 1554 } 1555 1556 if (cmd.recv_cq_handle != cmd.send_cq_handle) { 1557 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0); 1558 if (!rcq) { 1559 ret = -EINVAL; 1560 goto err_put; 1561 } 1562 } 1563 } 1564 1565 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq); 1566 rcq = rcq ?: scq; 1567 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1568 if (!pd || !scq) { 1569 ret = -EINVAL; 1570 goto err_put; 1571 } 1572 1573 device = pd->device; 1574 } 1575 1576 attr.event_handler = ib_uverbs_qp_event_handler; 1577 attr.qp_context = file; 1578 attr.send_cq = scq; 1579 attr.recv_cq = rcq; 1580 attr.srq = srq; 1581 attr.xrcd = xrcd; 1582 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 1583 attr.qp_type = cmd.qp_type; 1584 attr.create_flags = 0; 1585 1586 attr.cap.max_send_wr = cmd.max_send_wr; 1587 attr.cap.max_recv_wr = cmd.max_recv_wr; 1588 attr.cap.max_send_sge = cmd.max_send_sge; 1589 attr.cap.max_recv_sge = cmd.max_recv_sge; 1590 attr.cap.max_inline_data = cmd.max_inline_data; 1591 1592 obj->uevent.events_reported = 0; 1593 INIT_LIST_HEAD(&obj->uevent.event_list); 1594 INIT_LIST_HEAD(&obj->mcast_list); 1595 1596 if (cmd.qp_type == IB_QPT_XRC_TGT) 1597 qp = ib_create_qp(pd, &attr); 1598 else 1599 qp = device->create_qp(pd, &attr, &udata); 1600 1601 if (IS_ERR(qp)) { 1602 ret = PTR_ERR(qp); 1603 goto err_put; 1604 } 1605 1606 if (cmd.qp_type != IB_QPT_XRC_TGT) { 1607 qp->real_qp = qp; 1608 qp->device = device; 1609 qp->pd = pd; 1610 qp->send_cq = attr.send_cq; 1611 qp->recv_cq = attr.recv_cq; 1612 qp->srq = attr.srq; 1613 qp->event_handler = attr.event_handler; 1614 qp->qp_context = attr.qp_context; 1615 qp->qp_type = attr.qp_type; 1616 atomic_set(&qp->usecnt, 0); 1617 atomic_inc(&pd->usecnt); 1618 atomic_inc(&attr.send_cq->usecnt); 1619 if (attr.recv_cq) 1620 atomic_inc(&attr.recv_cq->usecnt); 1621 if (attr.srq) 1622 atomic_inc(&attr.srq->usecnt); 1623 } 1624 qp->uobject = &obj->uevent.uobject; 1625 1626 obj->uevent.uobject.object = qp; 1627 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1628 if (ret) 1629 goto err_destroy; 1630 1631 memset(&resp, 0, sizeof resp); 1632 resp.qpn = qp->qp_num; 1633 resp.qp_handle = obj->uevent.uobject.id; 1634 resp.max_recv_sge = attr.cap.max_recv_sge; 1635 resp.max_send_sge = attr.cap.max_send_sge; 1636 resp.max_recv_wr = attr.cap.max_recv_wr; 1637 resp.max_send_wr = attr.cap.max_send_wr; 1638 resp.max_inline_data = attr.cap.max_inline_data; 1639 1640 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1641 &resp, sizeof resp)) { 1642 ret = -EFAULT; 1643 goto err_copy; 1644 } 1645 1646 if (xrcd) 1647 put_xrcd_read(xrcd_uobj); 1648 if (pd) 1649 put_pd_read(pd); 1650 if (scq) 1651 put_cq_read(scq); 1652 if (rcq && rcq != scq) 1653 put_cq_read(rcq); 1654 if (srq) 1655 put_srq_read(srq); 1656 1657 mutex_lock(&file->mutex); 1658 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1659 mutex_unlock(&file->mutex); 1660 1661 obj->uevent.uobject.live = 1; 1662 1663 up_write(&obj->uevent.uobject.mutex); 1664 1665 return in_len; 1666 1667 err_copy: 1668 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1669 1670 err_destroy: 1671 ib_destroy_qp(qp); 1672 1673 err_put: 1674 if (xrcd) 1675 put_xrcd_read(xrcd_uobj); 1676 if (pd) 1677 put_pd_read(pd); 1678 if (scq) 1679 put_cq_read(scq); 1680 if (rcq && rcq != scq) 1681 put_cq_read(rcq); 1682 if (srq) 1683 put_srq_read(srq); 1684 1685 put_uobj_write(&obj->uevent.uobject); 1686 return ret; 1687 } 1688 1689 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1690 const char __user *buf, int in_len, int out_len) 1691 { 1692 struct ib_uverbs_open_qp cmd; 1693 struct ib_uverbs_create_qp_resp resp; 1694 struct ib_udata udata; 1695 struct ib_uqp_object *obj; 1696 struct ib_xrcd *xrcd; 1697 struct ib_uobject *uninitialized_var(xrcd_uobj); 1698 struct ib_qp *qp; 1699 struct ib_qp_open_attr attr; 1700 int ret; 1701 1702 if (out_len < sizeof resp) 1703 return -ENOSPC; 1704 1705 if (copy_from_user(&cmd, buf, sizeof cmd)) 1706 return -EFAULT; 1707 1708 INIT_UDATA(&udata, buf + sizeof cmd, 1709 (unsigned long) cmd.response + sizeof resp, 1710 in_len - sizeof cmd, out_len - sizeof resp); 1711 1712 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1713 if (!obj) 1714 return -ENOMEM; 1715 1716 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 1717 down_write(&obj->uevent.uobject.mutex); 1718 1719 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 1720 if (!xrcd) { 1721 ret = -EINVAL; 1722 goto err_put; 1723 } 1724 1725 attr.event_handler = ib_uverbs_qp_event_handler; 1726 attr.qp_context = file; 1727 attr.qp_num = cmd.qpn; 1728 attr.qp_type = cmd.qp_type; 1729 1730 obj->uevent.events_reported = 0; 1731 INIT_LIST_HEAD(&obj->uevent.event_list); 1732 INIT_LIST_HEAD(&obj->mcast_list); 1733 1734 qp = ib_open_qp(xrcd, &attr); 1735 if (IS_ERR(qp)) { 1736 ret = PTR_ERR(qp); 1737 goto err_put; 1738 } 1739 1740 qp->uobject = &obj->uevent.uobject; 1741 1742 obj->uevent.uobject.object = qp; 1743 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1744 if (ret) 1745 goto err_destroy; 1746 1747 memset(&resp, 0, sizeof resp); 1748 resp.qpn = qp->qp_num; 1749 resp.qp_handle = obj->uevent.uobject.id; 1750 1751 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1752 &resp, sizeof resp)) { 1753 ret = -EFAULT; 1754 goto err_remove; 1755 } 1756 1757 put_xrcd_read(xrcd_uobj); 1758 1759 mutex_lock(&file->mutex); 1760 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1761 mutex_unlock(&file->mutex); 1762 1763 obj->uevent.uobject.live = 1; 1764 1765 up_write(&obj->uevent.uobject.mutex); 1766 1767 return in_len; 1768 1769 err_remove: 1770 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1771 1772 err_destroy: 1773 ib_destroy_qp(qp); 1774 1775 err_put: 1776 put_xrcd_read(xrcd_uobj); 1777 put_uobj_write(&obj->uevent.uobject); 1778 return ret; 1779 } 1780 1781 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1782 const char __user *buf, int in_len, 1783 int out_len) 1784 { 1785 struct ib_uverbs_query_qp cmd; 1786 struct ib_uverbs_query_qp_resp resp; 1787 struct ib_qp *qp; 1788 struct ib_qp_attr *attr; 1789 struct ib_qp_init_attr *init_attr; 1790 int ret; 1791 1792 if (copy_from_user(&cmd, buf, sizeof cmd)) 1793 return -EFAULT; 1794 1795 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1796 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1797 if (!attr || !init_attr) { 1798 ret = -ENOMEM; 1799 goto out; 1800 } 1801 1802 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1803 if (!qp) { 1804 ret = -EINVAL; 1805 goto out; 1806 } 1807 1808 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1809 1810 put_qp_read(qp); 1811 1812 if (ret) 1813 goto out; 1814 1815 memset(&resp, 0, sizeof resp); 1816 1817 resp.qp_state = attr->qp_state; 1818 resp.cur_qp_state = attr->cur_qp_state; 1819 resp.path_mtu = attr->path_mtu; 1820 resp.path_mig_state = attr->path_mig_state; 1821 resp.qkey = attr->qkey; 1822 resp.rq_psn = attr->rq_psn; 1823 resp.sq_psn = attr->sq_psn; 1824 resp.dest_qp_num = attr->dest_qp_num; 1825 resp.qp_access_flags = attr->qp_access_flags; 1826 resp.pkey_index = attr->pkey_index; 1827 resp.alt_pkey_index = attr->alt_pkey_index; 1828 resp.sq_draining = attr->sq_draining; 1829 resp.max_rd_atomic = attr->max_rd_atomic; 1830 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1831 resp.min_rnr_timer = attr->min_rnr_timer; 1832 resp.port_num = attr->port_num; 1833 resp.timeout = attr->timeout; 1834 resp.retry_cnt = attr->retry_cnt; 1835 resp.rnr_retry = attr->rnr_retry; 1836 resp.alt_port_num = attr->alt_port_num; 1837 resp.alt_timeout = attr->alt_timeout; 1838 1839 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 1840 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 1841 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 1842 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 1843 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 1844 resp.dest.dlid = attr->ah_attr.dlid; 1845 resp.dest.sl = attr->ah_attr.sl; 1846 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 1847 resp.dest.static_rate = attr->ah_attr.static_rate; 1848 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 1849 resp.dest.port_num = attr->ah_attr.port_num; 1850 1851 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 1852 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 1853 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 1854 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 1855 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 1856 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 1857 resp.alt_dest.sl = attr->alt_ah_attr.sl; 1858 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 1859 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 1860 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 1861 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 1862 1863 resp.max_send_wr = init_attr->cap.max_send_wr; 1864 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1865 resp.max_send_sge = init_attr->cap.max_send_sge; 1866 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1867 resp.max_inline_data = init_attr->cap.max_inline_data; 1868 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1869 1870 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1871 &resp, sizeof resp)) 1872 ret = -EFAULT; 1873 1874 out: 1875 kfree(attr); 1876 kfree(init_attr); 1877 1878 return ret ? ret : in_len; 1879 } 1880 1881 /* Remove ignored fields set in the attribute mask */ 1882 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 1883 { 1884 switch (qp_type) { 1885 case IB_QPT_XRC_INI: 1886 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 1887 case IB_QPT_XRC_TGT: 1888 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 1889 IB_QP_RNR_RETRY); 1890 default: 1891 return mask; 1892 } 1893 } 1894 1895 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 1896 const char __user *buf, int in_len, 1897 int out_len) 1898 { 1899 struct ib_uverbs_modify_qp cmd; 1900 struct ib_udata udata; 1901 struct ib_qp *qp; 1902 struct ib_qp_attr *attr; 1903 int ret; 1904 1905 if (copy_from_user(&cmd, buf, sizeof cmd)) 1906 return -EFAULT; 1907 1908 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 1909 out_len); 1910 1911 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1912 if (!attr) 1913 return -ENOMEM; 1914 1915 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1916 if (!qp) { 1917 ret = -EINVAL; 1918 goto out; 1919 } 1920 1921 attr->qp_state = cmd.qp_state; 1922 attr->cur_qp_state = cmd.cur_qp_state; 1923 attr->path_mtu = cmd.path_mtu; 1924 attr->path_mig_state = cmd.path_mig_state; 1925 attr->qkey = cmd.qkey; 1926 attr->rq_psn = cmd.rq_psn; 1927 attr->sq_psn = cmd.sq_psn; 1928 attr->dest_qp_num = cmd.dest_qp_num; 1929 attr->qp_access_flags = cmd.qp_access_flags; 1930 attr->pkey_index = cmd.pkey_index; 1931 attr->alt_pkey_index = cmd.alt_pkey_index; 1932 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 1933 attr->max_rd_atomic = cmd.max_rd_atomic; 1934 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 1935 attr->min_rnr_timer = cmd.min_rnr_timer; 1936 attr->port_num = cmd.port_num; 1937 attr->timeout = cmd.timeout; 1938 attr->retry_cnt = cmd.retry_cnt; 1939 attr->rnr_retry = cmd.rnr_retry; 1940 attr->alt_port_num = cmd.alt_port_num; 1941 attr->alt_timeout = cmd.alt_timeout; 1942 1943 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 1944 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 1945 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 1946 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 1947 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 1948 attr->ah_attr.dlid = cmd.dest.dlid; 1949 attr->ah_attr.sl = cmd.dest.sl; 1950 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 1951 attr->ah_attr.static_rate = cmd.dest.static_rate; 1952 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 1953 attr->ah_attr.port_num = cmd.dest.port_num; 1954 1955 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 1956 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 1957 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 1958 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 1959 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 1960 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 1961 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 1962 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 1963 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 1964 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 1965 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 1966 1967 if (qp->real_qp == qp) { 1968 ret = qp->device->modify_qp(qp, attr, 1969 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 1970 } else { 1971 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 1972 } 1973 1974 put_qp_read(qp); 1975 1976 if (ret) 1977 goto out; 1978 1979 ret = in_len; 1980 1981 out: 1982 kfree(attr); 1983 1984 return ret; 1985 } 1986 1987 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 1988 const char __user *buf, int in_len, 1989 int out_len) 1990 { 1991 struct ib_uverbs_destroy_qp cmd; 1992 struct ib_uverbs_destroy_qp_resp resp; 1993 struct ib_uobject *uobj; 1994 struct ib_qp *qp; 1995 struct ib_uqp_object *obj; 1996 int ret = -EINVAL; 1997 1998 if (copy_from_user(&cmd, buf, sizeof cmd)) 1999 return -EFAULT; 2000 2001 memset(&resp, 0, sizeof resp); 2002 2003 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 2004 if (!uobj) 2005 return -EINVAL; 2006 qp = uobj->object; 2007 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2008 2009 if (!list_empty(&obj->mcast_list)) { 2010 put_uobj_write(uobj); 2011 return -EBUSY; 2012 } 2013 2014 ret = ib_destroy_qp(qp); 2015 if (!ret) 2016 uobj->live = 0; 2017 2018 put_uobj_write(uobj); 2019 2020 if (ret) 2021 return ret; 2022 2023 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 2024 2025 mutex_lock(&file->mutex); 2026 list_del(&uobj->list); 2027 mutex_unlock(&file->mutex); 2028 2029 ib_uverbs_release_uevent(file, &obj->uevent); 2030 2031 resp.events_reported = obj->uevent.events_reported; 2032 2033 put_uobj(uobj); 2034 2035 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2036 &resp, sizeof resp)) 2037 return -EFAULT; 2038 2039 return in_len; 2040 } 2041 2042 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2043 const char __user *buf, int in_len, 2044 int out_len) 2045 { 2046 struct ib_uverbs_post_send cmd; 2047 struct ib_uverbs_post_send_resp resp; 2048 struct ib_uverbs_send_wr *user_wr; 2049 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2050 struct ib_qp *qp; 2051 int i, sg_ind; 2052 int is_ud; 2053 ssize_t ret = -EINVAL; 2054 2055 if (copy_from_user(&cmd, buf, sizeof cmd)) 2056 return -EFAULT; 2057 2058 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2059 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2060 return -EINVAL; 2061 2062 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2063 return -EINVAL; 2064 2065 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2066 if (!user_wr) 2067 return -ENOMEM; 2068 2069 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2070 if (!qp) 2071 goto out; 2072 2073 is_ud = qp->qp_type == IB_QPT_UD; 2074 sg_ind = 0; 2075 last = NULL; 2076 for (i = 0; i < cmd.wr_count; ++i) { 2077 if (copy_from_user(user_wr, 2078 buf + sizeof cmd + i * cmd.wqe_size, 2079 cmd.wqe_size)) { 2080 ret = -EFAULT; 2081 goto out_put; 2082 } 2083 2084 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2085 ret = -EINVAL; 2086 goto out_put; 2087 } 2088 2089 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2090 user_wr->num_sge * sizeof (struct ib_sge), 2091 GFP_KERNEL); 2092 if (!next) { 2093 ret = -ENOMEM; 2094 goto out_put; 2095 } 2096 2097 if (!last) 2098 wr = next; 2099 else 2100 last->next = next; 2101 last = next; 2102 2103 next->next = NULL; 2104 next->wr_id = user_wr->wr_id; 2105 next->num_sge = user_wr->num_sge; 2106 next->opcode = user_wr->opcode; 2107 next->send_flags = user_wr->send_flags; 2108 2109 if (is_ud) { 2110 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, 2111 file->ucontext); 2112 if (!next->wr.ud.ah) { 2113 ret = -EINVAL; 2114 goto out_put; 2115 } 2116 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; 2117 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 2118 } else { 2119 switch (next->opcode) { 2120 case IB_WR_RDMA_WRITE_WITH_IMM: 2121 next->ex.imm_data = 2122 (__be32 __force) user_wr->ex.imm_data; 2123 case IB_WR_RDMA_WRITE: 2124 case IB_WR_RDMA_READ: 2125 next->wr.rdma.remote_addr = 2126 user_wr->wr.rdma.remote_addr; 2127 next->wr.rdma.rkey = 2128 user_wr->wr.rdma.rkey; 2129 break; 2130 case IB_WR_SEND_WITH_IMM: 2131 next->ex.imm_data = 2132 (__be32 __force) user_wr->ex.imm_data; 2133 break; 2134 case IB_WR_SEND_WITH_INV: 2135 next->ex.invalidate_rkey = 2136 user_wr->ex.invalidate_rkey; 2137 break; 2138 case IB_WR_ATOMIC_CMP_AND_SWP: 2139 case IB_WR_ATOMIC_FETCH_AND_ADD: 2140 next->wr.atomic.remote_addr = 2141 user_wr->wr.atomic.remote_addr; 2142 next->wr.atomic.compare_add = 2143 user_wr->wr.atomic.compare_add; 2144 next->wr.atomic.swap = user_wr->wr.atomic.swap; 2145 next->wr.atomic.rkey = user_wr->wr.atomic.rkey; 2146 break; 2147 default: 2148 break; 2149 } 2150 } 2151 2152 if (next->num_sge) { 2153 next->sg_list = (void *) next + 2154 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2155 if (copy_from_user(next->sg_list, 2156 buf + sizeof cmd + 2157 cmd.wr_count * cmd.wqe_size + 2158 sg_ind * sizeof (struct ib_sge), 2159 next->num_sge * sizeof (struct ib_sge))) { 2160 ret = -EFAULT; 2161 goto out_put; 2162 } 2163 sg_ind += next->num_sge; 2164 } else 2165 next->sg_list = NULL; 2166 } 2167 2168 resp.bad_wr = 0; 2169 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2170 if (ret) 2171 for (next = wr; next; next = next->next) { 2172 ++resp.bad_wr; 2173 if (next == bad_wr) 2174 break; 2175 } 2176 2177 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2178 &resp, sizeof resp)) 2179 ret = -EFAULT; 2180 2181 out_put: 2182 put_qp_read(qp); 2183 2184 while (wr) { 2185 if (is_ud && wr->wr.ud.ah) 2186 put_ah_read(wr->wr.ud.ah); 2187 next = wr->next; 2188 kfree(wr); 2189 wr = next; 2190 } 2191 2192 out: 2193 kfree(user_wr); 2194 2195 return ret ? ret : in_len; 2196 } 2197 2198 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2199 int in_len, 2200 u32 wr_count, 2201 u32 sge_count, 2202 u32 wqe_size) 2203 { 2204 struct ib_uverbs_recv_wr *user_wr; 2205 struct ib_recv_wr *wr = NULL, *last, *next; 2206 int sg_ind; 2207 int i; 2208 int ret; 2209 2210 if (in_len < wqe_size * wr_count + 2211 sge_count * sizeof (struct ib_uverbs_sge)) 2212 return ERR_PTR(-EINVAL); 2213 2214 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2215 return ERR_PTR(-EINVAL); 2216 2217 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2218 if (!user_wr) 2219 return ERR_PTR(-ENOMEM); 2220 2221 sg_ind = 0; 2222 last = NULL; 2223 for (i = 0; i < wr_count; ++i) { 2224 if (copy_from_user(user_wr, buf + i * wqe_size, 2225 wqe_size)) { 2226 ret = -EFAULT; 2227 goto err; 2228 } 2229 2230 if (user_wr->num_sge + sg_ind > sge_count) { 2231 ret = -EINVAL; 2232 goto err; 2233 } 2234 2235 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2236 user_wr->num_sge * sizeof (struct ib_sge), 2237 GFP_KERNEL); 2238 if (!next) { 2239 ret = -ENOMEM; 2240 goto err; 2241 } 2242 2243 if (!last) 2244 wr = next; 2245 else 2246 last->next = next; 2247 last = next; 2248 2249 next->next = NULL; 2250 next->wr_id = user_wr->wr_id; 2251 next->num_sge = user_wr->num_sge; 2252 2253 if (next->num_sge) { 2254 next->sg_list = (void *) next + 2255 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2256 if (copy_from_user(next->sg_list, 2257 buf + wr_count * wqe_size + 2258 sg_ind * sizeof (struct ib_sge), 2259 next->num_sge * sizeof (struct ib_sge))) { 2260 ret = -EFAULT; 2261 goto err; 2262 } 2263 sg_ind += next->num_sge; 2264 } else 2265 next->sg_list = NULL; 2266 } 2267 2268 kfree(user_wr); 2269 return wr; 2270 2271 err: 2272 kfree(user_wr); 2273 2274 while (wr) { 2275 next = wr->next; 2276 kfree(wr); 2277 wr = next; 2278 } 2279 2280 return ERR_PTR(ret); 2281 } 2282 2283 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2284 const char __user *buf, int in_len, 2285 int out_len) 2286 { 2287 struct ib_uverbs_post_recv cmd; 2288 struct ib_uverbs_post_recv_resp resp; 2289 struct ib_recv_wr *wr, *next, *bad_wr; 2290 struct ib_qp *qp; 2291 ssize_t ret = -EINVAL; 2292 2293 if (copy_from_user(&cmd, buf, sizeof cmd)) 2294 return -EFAULT; 2295 2296 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2297 in_len - sizeof cmd, cmd.wr_count, 2298 cmd.sge_count, cmd.wqe_size); 2299 if (IS_ERR(wr)) 2300 return PTR_ERR(wr); 2301 2302 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2303 if (!qp) 2304 goto out; 2305 2306 resp.bad_wr = 0; 2307 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2308 2309 put_qp_read(qp); 2310 2311 if (ret) 2312 for (next = wr; next; next = next->next) { 2313 ++resp.bad_wr; 2314 if (next == bad_wr) 2315 break; 2316 } 2317 2318 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2319 &resp, sizeof resp)) 2320 ret = -EFAULT; 2321 2322 out: 2323 while (wr) { 2324 next = wr->next; 2325 kfree(wr); 2326 wr = next; 2327 } 2328 2329 return ret ? ret : in_len; 2330 } 2331 2332 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2333 const char __user *buf, int in_len, 2334 int out_len) 2335 { 2336 struct ib_uverbs_post_srq_recv cmd; 2337 struct ib_uverbs_post_srq_recv_resp resp; 2338 struct ib_recv_wr *wr, *next, *bad_wr; 2339 struct ib_srq *srq; 2340 ssize_t ret = -EINVAL; 2341 2342 if (copy_from_user(&cmd, buf, sizeof cmd)) 2343 return -EFAULT; 2344 2345 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2346 in_len - sizeof cmd, cmd.wr_count, 2347 cmd.sge_count, cmd.wqe_size); 2348 if (IS_ERR(wr)) 2349 return PTR_ERR(wr); 2350 2351 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2352 if (!srq) 2353 goto out; 2354 2355 resp.bad_wr = 0; 2356 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2357 2358 put_srq_read(srq); 2359 2360 if (ret) 2361 for (next = wr; next; next = next->next) { 2362 ++resp.bad_wr; 2363 if (next == bad_wr) 2364 break; 2365 } 2366 2367 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2368 &resp, sizeof resp)) 2369 ret = -EFAULT; 2370 2371 out: 2372 while (wr) { 2373 next = wr->next; 2374 kfree(wr); 2375 wr = next; 2376 } 2377 2378 return ret ? ret : in_len; 2379 } 2380 2381 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2382 const char __user *buf, int in_len, 2383 int out_len) 2384 { 2385 struct ib_uverbs_create_ah cmd; 2386 struct ib_uverbs_create_ah_resp resp; 2387 struct ib_uobject *uobj; 2388 struct ib_pd *pd; 2389 struct ib_ah *ah; 2390 struct ib_ah_attr attr; 2391 int ret; 2392 2393 if (out_len < sizeof resp) 2394 return -ENOSPC; 2395 2396 if (copy_from_user(&cmd, buf, sizeof cmd)) 2397 return -EFAULT; 2398 2399 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 2400 if (!uobj) 2401 return -ENOMEM; 2402 2403 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class); 2404 down_write(&uobj->mutex); 2405 2406 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 2407 if (!pd) { 2408 ret = -EINVAL; 2409 goto err; 2410 } 2411 2412 attr.dlid = cmd.attr.dlid; 2413 attr.sl = cmd.attr.sl; 2414 attr.src_path_bits = cmd.attr.src_path_bits; 2415 attr.static_rate = cmd.attr.static_rate; 2416 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 2417 attr.port_num = cmd.attr.port_num; 2418 attr.grh.flow_label = cmd.attr.grh.flow_label; 2419 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 2420 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 2421 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 2422 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 2423 2424 ah = ib_create_ah(pd, &attr); 2425 if (IS_ERR(ah)) { 2426 ret = PTR_ERR(ah); 2427 goto err_put; 2428 } 2429 2430 ah->uobject = uobj; 2431 uobj->object = ah; 2432 2433 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 2434 if (ret) 2435 goto err_destroy; 2436 2437 resp.ah_handle = uobj->id; 2438 2439 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2440 &resp, sizeof resp)) { 2441 ret = -EFAULT; 2442 goto err_copy; 2443 } 2444 2445 put_pd_read(pd); 2446 2447 mutex_lock(&file->mutex); 2448 list_add_tail(&uobj->list, &file->ucontext->ah_list); 2449 mutex_unlock(&file->mutex); 2450 2451 uobj->live = 1; 2452 2453 up_write(&uobj->mutex); 2454 2455 return in_len; 2456 2457 err_copy: 2458 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2459 2460 err_destroy: 2461 ib_destroy_ah(ah); 2462 2463 err_put: 2464 put_pd_read(pd); 2465 2466 err: 2467 put_uobj_write(uobj); 2468 return ret; 2469 } 2470 2471 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2472 const char __user *buf, int in_len, int out_len) 2473 { 2474 struct ib_uverbs_destroy_ah cmd; 2475 struct ib_ah *ah; 2476 struct ib_uobject *uobj; 2477 int ret; 2478 2479 if (copy_from_user(&cmd, buf, sizeof cmd)) 2480 return -EFAULT; 2481 2482 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 2483 if (!uobj) 2484 return -EINVAL; 2485 ah = uobj->object; 2486 2487 ret = ib_destroy_ah(ah); 2488 if (!ret) 2489 uobj->live = 0; 2490 2491 put_uobj_write(uobj); 2492 2493 if (ret) 2494 return ret; 2495 2496 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2497 2498 mutex_lock(&file->mutex); 2499 list_del(&uobj->list); 2500 mutex_unlock(&file->mutex); 2501 2502 put_uobj(uobj); 2503 2504 return in_len; 2505 } 2506 2507 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2508 const char __user *buf, int in_len, 2509 int out_len) 2510 { 2511 struct ib_uverbs_attach_mcast cmd; 2512 struct ib_qp *qp; 2513 struct ib_uqp_object *obj; 2514 struct ib_uverbs_mcast_entry *mcast; 2515 int ret; 2516 2517 if (copy_from_user(&cmd, buf, sizeof cmd)) 2518 return -EFAULT; 2519 2520 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2521 if (!qp) 2522 return -EINVAL; 2523 2524 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2525 2526 list_for_each_entry(mcast, &obj->mcast_list, list) 2527 if (cmd.mlid == mcast->lid && 2528 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2529 ret = 0; 2530 goto out_put; 2531 } 2532 2533 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2534 if (!mcast) { 2535 ret = -ENOMEM; 2536 goto out_put; 2537 } 2538 2539 mcast->lid = cmd.mlid; 2540 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2541 2542 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2543 if (!ret) 2544 list_add_tail(&mcast->list, &obj->mcast_list); 2545 else 2546 kfree(mcast); 2547 2548 out_put: 2549 put_qp_write(qp); 2550 2551 return ret ? ret : in_len; 2552 } 2553 2554 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2555 const char __user *buf, int in_len, 2556 int out_len) 2557 { 2558 struct ib_uverbs_detach_mcast cmd; 2559 struct ib_uqp_object *obj; 2560 struct ib_qp *qp; 2561 struct ib_uverbs_mcast_entry *mcast; 2562 int ret = -EINVAL; 2563 2564 if (copy_from_user(&cmd, buf, sizeof cmd)) 2565 return -EFAULT; 2566 2567 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2568 if (!qp) 2569 return -EINVAL; 2570 2571 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 2572 if (ret) 2573 goto out_put; 2574 2575 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2576 2577 list_for_each_entry(mcast, &obj->mcast_list, list) 2578 if (cmd.mlid == mcast->lid && 2579 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2580 list_del(&mcast->list); 2581 kfree(mcast); 2582 break; 2583 } 2584 2585 out_put: 2586 put_qp_write(qp); 2587 2588 return ret ? ret : in_len; 2589 } 2590 2591 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 2592 struct ib_uverbs_create_xsrq *cmd, 2593 struct ib_udata *udata) 2594 { 2595 struct ib_uverbs_create_srq_resp resp; 2596 struct ib_usrq_object *obj; 2597 struct ib_pd *pd; 2598 struct ib_srq *srq; 2599 struct ib_uobject *uninitialized_var(xrcd_uobj); 2600 struct ib_srq_init_attr attr; 2601 int ret; 2602 2603 obj = kmalloc(sizeof *obj, GFP_KERNEL); 2604 if (!obj) 2605 return -ENOMEM; 2606 2607 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class); 2608 down_write(&obj->uevent.uobject.mutex); 2609 2610 if (cmd->srq_type == IB_SRQT_XRC) { 2611 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); 2612 if (!attr.ext.xrc.xrcd) { 2613 ret = -EINVAL; 2614 goto err; 2615 } 2616 2617 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 2618 atomic_inc(&obj->uxrcd->refcnt); 2619 2620 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); 2621 if (!attr.ext.xrc.cq) { 2622 ret = -EINVAL; 2623 goto err_put_xrcd; 2624 } 2625 } 2626 2627 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 2628 if (!pd) { 2629 ret = -EINVAL; 2630 goto err_put_cq; 2631 } 2632 2633 attr.event_handler = ib_uverbs_srq_event_handler; 2634 attr.srq_context = file; 2635 attr.srq_type = cmd->srq_type; 2636 attr.attr.max_wr = cmd->max_wr; 2637 attr.attr.max_sge = cmd->max_sge; 2638 attr.attr.srq_limit = cmd->srq_limit; 2639 2640 obj->uevent.events_reported = 0; 2641 INIT_LIST_HEAD(&obj->uevent.event_list); 2642 2643 srq = pd->device->create_srq(pd, &attr, udata); 2644 if (IS_ERR(srq)) { 2645 ret = PTR_ERR(srq); 2646 goto err_put; 2647 } 2648 2649 srq->device = pd->device; 2650 srq->pd = pd; 2651 srq->srq_type = cmd->srq_type; 2652 srq->uobject = &obj->uevent.uobject; 2653 srq->event_handler = attr.event_handler; 2654 srq->srq_context = attr.srq_context; 2655 2656 if (cmd->srq_type == IB_SRQT_XRC) { 2657 srq->ext.xrc.cq = attr.ext.xrc.cq; 2658 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 2659 atomic_inc(&attr.ext.xrc.cq->usecnt); 2660 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 2661 } 2662 2663 atomic_inc(&pd->usecnt); 2664 atomic_set(&srq->usecnt, 0); 2665 2666 obj->uevent.uobject.object = srq; 2667 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 2668 if (ret) 2669 goto err_destroy; 2670 2671 memset(&resp, 0, sizeof resp); 2672 resp.srq_handle = obj->uevent.uobject.id; 2673 resp.max_wr = attr.attr.max_wr; 2674 resp.max_sge = attr.attr.max_sge; 2675 if (cmd->srq_type == IB_SRQT_XRC) 2676 resp.srqn = srq->ext.xrc.srq_num; 2677 2678 if (copy_to_user((void __user *) (unsigned long) cmd->response, 2679 &resp, sizeof resp)) { 2680 ret = -EFAULT; 2681 goto err_copy; 2682 } 2683 2684 if (cmd->srq_type == IB_SRQT_XRC) { 2685 put_uobj_read(xrcd_uobj); 2686 put_cq_read(attr.ext.xrc.cq); 2687 } 2688 put_pd_read(pd); 2689 2690 mutex_lock(&file->mutex); 2691 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); 2692 mutex_unlock(&file->mutex); 2693 2694 obj->uevent.uobject.live = 1; 2695 2696 up_write(&obj->uevent.uobject.mutex); 2697 2698 return 0; 2699 2700 err_copy: 2701 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 2702 2703 err_destroy: 2704 ib_destroy_srq(srq); 2705 2706 err_put: 2707 put_pd_read(pd); 2708 2709 err_put_cq: 2710 if (cmd->srq_type == IB_SRQT_XRC) 2711 put_cq_read(attr.ext.xrc.cq); 2712 2713 err_put_xrcd: 2714 if (cmd->srq_type == IB_SRQT_XRC) { 2715 atomic_dec(&obj->uxrcd->refcnt); 2716 put_uobj_read(xrcd_uobj); 2717 } 2718 2719 err: 2720 put_uobj_write(&obj->uevent.uobject); 2721 return ret; 2722 } 2723 2724 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 2725 const char __user *buf, int in_len, 2726 int out_len) 2727 { 2728 struct ib_uverbs_create_srq cmd; 2729 struct ib_uverbs_create_xsrq xcmd; 2730 struct ib_uverbs_create_srq_resp resp; 2731 struct ib_udata udata; 2732 int ret; 2733 2734 if (out_len < sizeof resp) 2735 return -ENOSPC; 2736 2737 if (copy_from_user(&cmd, buf, sizeof cmd)) 2738 return -EFAULT; 2739 2740 xcmd.response = cmd.response; 2741 xcmd.user_handle = cmd.user_handle; 2742 xcmd.srq_type = IB_SRQT_BASIC; 2743 xcmd.pd_handle = cmd.pd_handle; 2744 xcmd.max_wr = cmd.max_wr; 2745 xcmd.max_sge = cmd.max_sge; 2746 xcmd.srq_limit = cmd.srq_limit; 2747 2748 INIT_UDATA(&udata, buf + sizeof cmd, 2749 (unsigned long) cmd.response + sizeof resp, 2750 in_len - sizeof cmd, out_len - sizeof resp); 2751 2752 ret = __uverbs_create_xsrq(file, &xcmd, &udata); 2753 if (ret) 2754 return ret; 2755 2756 return in_len; 2757 } 2758 2759 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 2760 const char __user *buf, int in_len, int out_len) 2761 { 2762 struct ib_uverbs_create_xsrq cmd; 2763 struct ib_uverbs_create_srq_resp resp; 2764 struct ib_udata udata; 2765 int ret; 2766 2767 if (out_len < sizeof resp) 2768 return -ENOSPC; 2769 2770 if (copy_from_user(&cmd, buf, sizeof cmd)) 2771 return -EFAULT; 2772 2773 INIT_UDATA(&udata, buf + sizeof cmd, 2774 (unsigned long) cmd.response + sizeof resp, 2775 in_len - sizeof cmd, out_len - sizeof resp); 2776 2777 ret = __uverbs_create_xsrq(file, &cmd, &udata); 2778 if (ret) 2779 return ret; 2780 2781 return in_len; 2782 } 2783 2784 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 2785 const char __user *buf, int in_len, 2786 int out_len) 2787 { 2788 struct ib_uverbs_modify_srq cmd; 2789 struct ib_udata udata; 2790 struct ib_srq *srq; 2791 struct ib_srq_attr attr; 2792 int ret; 2793 2794 if (copy_from_user(&cmd, buf, sizeof cmd)) 2795 return -EFAULT; 2796 2797 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 2798 out_len); 2799 2800 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2801 if (!srq) 2802 return -EINVAL; 2803 2804 attr.max_wr = cmd.max_wr; 2805 attr.srq_limit = cmd.srq_limit; 2806 2807 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 2808 2809 put_srq_read(srq); 2810 2811 return ret ? ret : in_len; 2812 } 2813 2814 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 2815 const char __user *buf, 2816 int in_len, int out_len) 2817 { 2818 struct ib_uverbs_query_srq cmd; 2819 struct ib_uverbs_query_srq_resp resp; 2820 struct ib_srq_attr attr; 2821 struct ib_srq *srq; 2822 int ret; 2823 2824 if (out_len < sizeof resp) 2825 return -ENOSPC; 2826 2827 if (copy_from_user(&cmd, buf, sizeof cmd)) 2828 return -EFAULT; 2829 2830 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2831 if (!srq) 2832 return -EINVAL; 2833 2834 ret = ib_query_srq(srq, &attr); 2835 2836 put_srq_read(srq); 2837 2838 if (ret) 2839 return ret; 2840 2841 memset(&resp, 0, sizeof resp); 2842 2843 resp.max_wr = attr.max_wr; 2844 resp.max_sge = attr.max_sge; 2845 resp.srq_limit = attr.srq_limit; 2846 2847 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2848 &resp, sizeof resp)) 2849 return -EFAULT; 2850 2851 return in_len; 2852 } 2853 2854 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 2855 const char __user *buf, int in_len, 2856 int out_len) 2857 { 2858 struct ib_uverbs_destroy_srq cmd; 2859 struct ib_uverbs_destroy_srq_resp resp; 2860 struct ib_uobject *uobj; 2861 struct ib_srq *srq; 2862 struct ib_uevent_object *obj; 2863 int ret = -EINVAL; 2864 2865 if (copy_from_user(&cmd, buf, sizeof cmd)) 2866 return -EFAULT; 2867 2868 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 2869 if (!uobj) 2870 return -EINVAL; 2871 srq = uobj->object; 2872 obj = container_of(uobj, struct ib_uevent_object, uobject); 2873 2874 ret = ib_destroy_srq(srq); 2875 if (!ret) 2876 uobj->live = 0; 2877 2878 put_uobj_write(uobj); 2879 2880 if (ret) 2881 return ret; 2882 2883 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 2884 2885 mutex_lock(&file->mutex); 2886 list_del(&uobj->list); 2887 mutex_unlock(&file->mutex); 2888 2889 ib_uverbs_release_uevent(file, obj); 2890 2891 memset(&resp, 0, sizeof resp); 2892 resp.events_reported = obj->events_reported; 2893 2894 put_uobj(uobj); 2895 2896 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2897 &resp, sizeof resp)) 2898 ret = -EFAULT; 2899 2900 return ret ? ret : in_len; 2901 } 2902