1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 40 #include <asm/uaccess.h> 41 42 #include "uverbs.h" 43 44 struct uverbs_lock_class { 45 struct lock_class_key key; 46 char name[16]; 47 }; 48 49 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; 50 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; 51 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; 52 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; 53 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 54 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 55 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 56 57 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 58 do { \ 59 (udata)->inbuf = (void __user *) (ibuf); \ 60 (udata)->outbuf = (void __user *) (obuf); \ 61 (udata)->inlen = (ilen); \ 62 (udata)->outlen = (olen); \ 63 } while (0) 64 65 /* 66 * The ib_uobject locking scheme is as follows: 67 * 68 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 69 * needs to be held during all idr operations. When an object is 70 * looked up, a reference must be taken on the object's kref before 71 * dropping this lock. 72 * 73 * - Each object also has an rwsem. This rwsem must be held for 74 * reading while an operation that uses the object is performed. 75 * For example, while registering an MR, the associated PD's 76 * uobject.mutex must be held for reading. The rwsem must be held 77 * for writing while initializing or destroying an object. 78 * 79 * - In addition, each object has a "live" flag. If this flag is not 80 * set, then lookups of the object will fail even if it is found in 81 * the idr. This handles a reader that blocks and does not acquire 82 * the rwsem until after the object is destroyed. The destroy 83 * operation will set the live flag to 0 and then drop the rwsem; 84 * this will allow the reader to acquire the rwsem, see that the 85 * live flag is 0, and then drop the rwsem and its reference to 86 * object. The underlying storage will not be freed until the last 87 * reference to the object is dropped. 88 */ 89 90 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 91 struct ib_ucontext *context, struct uverbs_lock_class *c) 92 { 93 uobj->user_handle = user_handle; 94 uobj->context = context; 95 kref_init(&uobj->ref); 96 init_rwsem(&uobj->mutex); 97 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name); 98 uobj->live = 0; 99 } 100 101 static void release_uobj(struct kref *kref) 102 { 103 kfree(container_of(kref, struct ib_uobject, ref)); 104 } 105 106 static void put_uobj(struct ib_uobject *uobj) 107 { 108 kref_put(&uobj->ref, release_uobj); 109 } 110 111 static void put_uobj_read(struct ib_uobject *uobj) 112 { 113 up_read(&uobj->mutex); 114 put_uobj(uobj); 115 } 116 117 static void put_uobj_write(struct ib_uobject *uobj) 118 { 119 up_write(&uobj->mutex); 120 put_uobj(uobj); 121 } 122 123 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 124 { 125 int ret; 126 127 retry: 128 if (!idr_pre_get(idr, GFP_KERNEL)) 129 return -ENOMEM; 130 131 spin_lock(&ib_uverbs_idr_lock); 132 ret = idr_get_new(idr, uobj, &uobj->id); 133 spin_unlock(&ib_uverbs_idr_lock); 134 135 if (ret == -EAGAIN) 136 goto retry; 137 138 return ret; 139 } 140 141 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 142 { 143 spin_lock(&ib_uverbs_idr_lock); 144 idr_remove(idr, uobj->id); 145 spin_unlock(&ib_uverbs_idr_lock); 146 } 147 148 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 149 struct ib_ucontext *context) 150 { 151 struct ib_uobject *uobj; 152 153 spin_lock(&ib_uverbs_idr_lock); 154 uobj = idr_find(idr, id); 155 if (uobj) { 156 if (uobj->context == context) 157 kref_get(&uobj->ref); 158 else 159 uobj = NULL; 160 } 161 spin_unlock(&ib_uverbs_idr_lock); 162 163 return uobj; 164 } 165 166 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 167 struct ib_ucontext *context, int nested) 168 { 169 struct ib_uobject *uobj; 170 171 uobj = __idr_get_uobj(idr, id, context); 172 if (!uobj) 173 return NULL; 174 175 if (nested) 176 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 177 else 178 down_read(&uobj->mutex); 179 if (!uobj->live) { 180 put_uobj_read(uobj); 181 return NULL; 182 } 183 184 return uobj; 185 } 186 187 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 188 struct ib_ucontext *context) 189 { 190 struct ib_uobject *uobj; 191 192 uobj = __idr_get_uobj(idr, id, context); 193 if (!uobj) 194 return NULL; 195 196 down_write(&uobj->mutex); 197 if (!uobj->live) { 198 put_uobj_write(uobj); 199 return NULL; 200 } 201 202 return uobj; 203 } 204 205 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 206 int nested) 207 { 208 struct ib_uobject *uobj; 209 210 uobj = idr_read_uobj(idr, id, context, nested); 211 return uobj ? uobj->object : NULL; 212 } 213 214 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 215 { 216 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 217 } 218 219 static void put_pd_read(struct ib_pd *pd) 220 { 221 put_uobj_read(pd->uobject); 222 } 223 224 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 225 { 226 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 227 } 228 229 static void put_cq_read(struct ib_cq *cq) 230 { 231 put_uobj_read(cq->uobject); 232 } 233 234 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 235 { 236 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 237 } 238 239 static void put_ah_read(struct ib_ah *ah) 240 { 241 put_uobj_read(ah->uobject); 242 } 243 244 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 245 { 246 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 247 } 248 249 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) 250 { 251 struct ib_uobject *uobj; 252 253 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context); 254 return uobj ? uobj->object : NULL; 255 } 256 257 static void put_qp_read(struct ib_qp *qp) 258 { 259 put_uobj_read(qp->uobject); 260 } 261 262 static void put_qp_write(struct ib_qp *qp) 263 { 264 put_uobj_write(qp->uobject); 265 } 266 267 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 268 { 269 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 270 } 271 272 static void put_srq_read(struct ib_srq *srq) 273 { 274 put_uobj_read(srq->uobject); 275 } 276 277 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, 278 struct ib_uobject **uobj) 279 { 280 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); 281 return *uobj ? (*uobj)->object : NULL; 282 } 283 284 static void put_xrcd_read(struct ib_uobject *uobj) 285 { 286 put_uobj_read(uobj); 287 } 288 289 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 290 const char __user *buf, 291 int in_len, int out_len) 292 { 293 struct ib_uverbs_get_context cmd; 294 struct ib_uverbs_get_context_resp resp; 295 struct ib_udata udata; 296 struct ib_device *ibdev = file->device->ib_dev; 297 struct ib_ucontext *ucontext; 298 struct file *filp; 299 int ret; 300 301 if (out_len < sizeof resp) 302 return -ENOSPC; 303 304 if (copy_from_user(&cmd, buf, sizeof cmd)) 305 return -EFAULT; 306 307 mutex_lock(&file->mutex); 308 309 if (file->ucontext) { 310 ret = -EINVAL; 311 goto err; 312 } 313 314 INIT_UDATA(&udata, buf + sizeof cmd, 315 (unsigned long) cmd.response + sizeof resp, 316 in_len - sizeof cmd, out_len - sizeof resp); 317 318 ucontext = ibdev->alloc_ucontext(ibdev, &udata); 319 if (IS_ERR(ucontext)) { 320 ret = PTR_ERR(ucontext); 321 goto err; 322 } 323 324 ucontext->device = ibdev; 325 INIT_LIST_HEAD(&ucontext->pd_list); 326 INIT_LIST_HEAD(&ucontext->mr_list); 327 INIT_LIST_HEAD(&ucontext->mw_list); 328 INIT_LIST_HEAD(&ucontext->cq_list); 329 INIT_LIST_HEAD(&ucontext->qp_list); 330 INIT_LIST_HEAD(&ucontext->srq_list); 331 INIT_LIST_HEAD(&ucontext->ah_list); 332 INIT_LIST_HEAD(&ucontext->xrcd_list); 333 ucontext->closing = 0; 334 335 resp.num_comp_vectors = file->device->num_comp_vectors; 336 337 ret = get_unused_fd(); 338 if (ret < 0) 339 goto err_free; 340 resp.async_fd = ret; 341 342 filp = ib_uverbs_alloc_event_file(file, 1); 343 if (IS_ERR(filp)) { 344 ret = PTR_ERR(filp); 345 goto err_fd; 346 } 347 348 if (copy_to_user((void __user *) (unsigned long) cmd.response, 349 &resp, sizeof resp)) { 350 ret = -EFAULT; 351 goto err_file; 352 } 353 354 file->async_file = filp->private_data; 355 356 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev, 357 ib_uverbs_event_handler); 358 ret = ib_register_event_handler(&file->event_handler); 359 if (ret) 360 goto err_file; 361 362 kref_get(&file->async_file->ref); 363 kref_get(&file->ref); 364 file->ucontext = ucontext; 365 366 fd_install(resp.async_fd, filp); 367 368 mutex_unlock(&file->mutex); 369 370 return in_len; 371 372 err_file: 373 fput(filp); 374 375 err_fd: 376 put_unused_fd(resp.async_fd); 377 378 err_free: 379 ibdev->dealloc_ucontext(ucontext); 380 381 err: 382 mutex_unlock(&file->mutex); 383 return ret; 384 } 385 386 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 387 const char __user *buf, 388 int in_len, int out_len) 389 { 390 struct ib_uverbs_query_device cmd; 391 struct ib_uverbs_query_device_resp resp; 392 struct ib_device_attr attr; 393 int ret; 394 395 if (out_len < sizeof resp) 396 return -ENOSPC; 397 398 if (copy_from_user(&cmd, buf, sizeof cmd)) 399 return -EFAULT; 400 401 ret = ib_query_device(file->device->ib_dev, &attr); 402 if (ret) 403 return ret; 404 405 memset(&resp, 0, sizeof resp); 406 407 resp.fw_ver = attr.fw_ver; 408 resp.node_guid = file->device->ib_dev->node_guid; 409 resp.sys_image_guid = attr.sys_image_guid; 410 resp.max_mr_size = attr.max_mr_size; 411 resp.page_size_cap = attr.page_size_cap; 412 resp.vendor_id = attr.vendor_id; 413 resp.vendor_part_id = attr.vendor_part_id; 414 resp.hw_ver = attr.hw_ver; 415 resp.max_qp = attr.max_qp; 416 resp.max_qp_wr = attr.max_qp_wr; 417 resp.device_cap_flags = attr.device_cap_flags; 418 resp.max_sge = attr.max_sge; 419 resp.max_sge_rd = attr.max_sge_rd; 420 resp.max_cq = attr.max_cq; 421 resp.max_cqe = attr.max_cqe; 422 resp.max_mr = attr.max_mr; 423 resp.max_pd = attr.max_pd; 424 resp.max_qp_rd_atom = attr.max_qp_rd_atom; 425 resp.max_ee_rd_atom = attr.max_ee_rd_atom; 426 resp.max_res_rd_atom = attr.max_res_rd_atom; 427 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; 428 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; 429 resp.atomic_cap = attr.atomic_cap; 430 resp.max_ee = attr.max_ee; 431 resp.max_rdd = attr.max_rdd; 432 resp.max_mw = attr.max_mw; 433 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; 434 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; 435 resp.max_mcast_grp = attr.max_mcast_grp; 436 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; 437 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; 438 resp.max_ah = attr.max_ah; 439 resp.max_fmr = attr.max_fmr; 440 resp.max_map_per_fmr = attr.max_map_per_fmr; 441 resp.max_srq = attr.max_srq; 442 resp.max_srq_wr = attr.max_srq_wr; 443 resp.max_srq_sge = attr.max_srq_sge; 444 resp.max_pkeys = attr.max_pkeys; 445 resp.local_ca_ack_delay = attr.local_ca_ack_delay; 446 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; 447 448 if (copy_to_user((void __user *) (unsigned long) cmd.response, 449 &resp, sizeof resp)) 450 return -EFAULT; 451 452 return in_len; 453 } 454 455 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 456 const char __user *buf, 457 int in_len, int out_len) 458 { 459 struct ib_uverbs_query_port cmd; 460 struct ib_uverbs_query_port_resp resp; 461 struct ib_port_attr attr; 462 int ret; 463 464 if (out_len < sizeof resp) 465 return -ENOSPC; 466 467 if (copy_from_user(&cmd, buf, sizeof cmd)) 468 return -EFAULT; 469 470 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr); 471 if (ret) 472 return ret; 473 474 memset(&resp, 0, sizeof resp); 475 476 resp.state = attr.state; 477 resp.max_mtu = attr.max_mtu; 478 resp.active_mtu = attr.active_mtu; 479 resp.gid_tbl_len = attr.gid_tbl_len; 480 resp.port_cap_flags = attr.port_cap_flags; 481 resp.max_msg_sz = attr.max_msg_sz; 482 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 483 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 484 resp.pkey_tbl_len = attr.pkey_tbl_len; 485 resp.lid = attr.lid; 486 resp.sm_lid = attr.sm_lid; 487 resp.lmc = attr.lmc; 488 resp.max_vl_num = attr.max_vl_num; 489 resp.sm_sl = attr.sm_sl; 490 resp.subnet_timeout = attr.subnet_timeout; 491 resp.init_type_reply = attr.init_type_reply; 492 resp.active_width = attr.active_width; 493 resp.active_speed = attr.active_speed; 494 resp.phys_state = attr.phys_state; 495 resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev, 496 cmd.port_num); 497 498 if (copy_to_user((void __user *) (unsigned long) cmd.response, 499 &resp, sizeof resp)) 500 return -EFAULT; 501 502 return in_len; 503 } 504 505 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 506 const char __user *buf, 507 int in_len, int out_len) 508 { 509 struct ib_uverbs_alloc_pd cmd; 510 struct ib_uverbs_alloc_pd_resp resp; 511 struct ib_udata udata; 512 struct ib_uobject *uobj; 513 struct ib_pd *pd; 514 int ret; 515 516 if (out_len < sizeof resp) 517 return -ENOSPC; 518 519 if (copy_from_user(&cmd, buf, sizeof cmd)) 520 return -EFAULT; 521 522 INIT_UDATA(&udata, buf + sizeof cmd, 523 (unsigned long) cmd.response + sizeof resp, 524 in_len - sizeof cmd, out_len - sizeof resp); 525 526 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 527 if (!uobj) 528 return -ENOMEM; 529 530 init_uobj(uobj, 0, file->ucontext, &pd_lock_class); 531 down_write(&uobj->mutex); 532 533 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 534 file->ucontext, &udata); 535 if (IS_ERR(pd)) { 536 ret = PTR_ERR(pd); 537 goto err; 538 } 539 540 pd->device = file->device->ib_dev; 541 pd->uobject = uobj; 542 atomic_set(&pd->usecnt, 0); 543 544 uobj->object = pd; 545 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 546 if (ret) 547 goto err_idr; 548 549 memset(&resp, 0, sizeof resp); 550 resp.pd_handle = uobj->id; 551 552 if (copy_to_user((void __user *) (unsigned long) cmd.response, 553 &resp, sizeof resp)) { 554 ret = -EFAULT; 555 goto err_copy; 556 } 557 558 mutex_lock(&file->mutex); 559 list_add_tail(&uobj->list, &file->ucontext->pd_list); 560 mutex_unlock(&file->mutex); 561 562 uobj->live = 1; 563 564 up_write(&uobj->mutex); 565 566 return in_len; 567 568 err_copy: 569 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 570 571 err_idr: 572 ib_dealloc_pd(pd); 573 574 err: 575 put_uobj_write(uobj); 576 return ret; 577 } 578 579 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 580 const char __user *buf, 581 int in_len, int out_len) 582 { 583 struct ib_uverbs_dealloc_pd cmd; 584 struct ib_uobject *uobj; 585 int ret; 586 587 if (copy_from_user(&cmd, buf, sizeof cmd)) 588 return -EFAULT; 589 590 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 591 if (!uobj) 592 return -EINVAL; 593 594 ret = ib_dealloc_pd(uobj->object); 595 if (!ret) 596 uobj->live = 0; 597 598 put_uobj_write(uobj); 599 600 if (ret) 601 return ret; 602 603 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 604 605 mutex_lock(&file->mutex); 606 list_del(&uobj->list); 607 mutex_unlock(&file->mutex); 608 609 put_uobj(uobj); 610 611 return in_len; 612 } 613 614 struct xrcd_table_entry { 615 struct rb_node node; 616 struct ib_xrcd *xrcd; 617 struct inode *inode; 618 }; 619 620 static int xrcd_table_insert(struct ib_uverbs_device *dev, 621 struct inode *inode, 622 struct ib_xrcd *xrcd) 623 { 624 struct xrcd_table_entry *entry, *scan; 625 struct rb_node **p = &dev->xrcd_tree.rb_node; 626 struct rb_node *parent = NULL; 627 628 entry = kmalloc(sizeof *entry, GFP_KERNEL); 629 if (!entry) 630 return -ENOMEM; 631 632 entry->xrcd = xrcd; 633 entry->inode = inode; 634 635 while (*p) { 636 parent = *p; 637 scan = rb_entry(parent, struct xrcd_table_entry, node); 638 639 if (inode < scan->inode) { 640 p = &(*p)->rb_left; 641 } else if (inode > scan->inode) { 642 p = &(*p)->rb_right; 643 } else { 644 kfree(entry); 645 return -EEXIST; 646 } 647 } 648 649 rb_link_node(&entry->node, parent, p); 650 rb_insert_color(&entry->node, &dev->xrcd_tree); 651 igrab(inode); 652 return 0; 653 } 654 655 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 656 struct inode *inode) 657 { 658 struct xrcd_table_entry *entry; 659 struct rb_node *p = dev->xrcd_tree.rb_node; 660 661 while (p) { 662 entry = rb_entry(p, struct xrcd_table_entry, node); 663 664 if (inode < entry->inode) 665 p = p->rb_left; 666 else if (inode > entry->inode) 667 p = p->rb_right; 668 else 669 return entry; 670 } 671 672 return NULL; 673 } 674 675 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 676 { 677 struct xrcd_table_entry *entry; 678 679 entry = xrcd_table_search(dev, inode); 680 if (!entry) 681 return NULL; 682 683 return entry->xrcd; 684 } 685 686 static void xrcd_table_delete(struct ib_uverbs_device *dev, 687 struct inode *inode) 688 { 689 struct xrcd_table_entry *entry; 690 691 entry = xrcd_table_search(dev, inode); 692 if (entry) { 693 iput(inode); 694 rb_erase(&entry->node, &dev->xrcd_tree); 695 kfree(entry); 696 } 697 } 698 699 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 700 const char __user *buf, int in_len, 701 int out_len) 702 { 703 struct ib_uverbs_open_xrcd cmd; 704 struct ib_uverbs_open_xrcd_resp resp; 705 struct ib_udata udata; 706 struct ib_uxrcd_object *obj; 707 struct ib_xrcd *xrcd = NULL; 708 struct file *f = NULL; 709 struct inode *inode = NULL; 710 int ret = 0; 711 int new_xrcd = 0; 712 713 if (out_len < sizeof resp) 714 return -ENOSPC; 715 716 if (copy_from_user(&cmd, buf, sizeof cmd)) 717 return -EFAULT; 718 719 INIT_UDATA(&udata, buf + sizeof cmd, 720 (unsigned long) cmd.response + sizeof resp, 721 in_len - sizeof cmd, out_len - sizeof resp); 722 723 mutex_lock(&file->device->xrcd_tree_mutex); 724 725 if (cmd.fd != -1) { 726 /* search for file descriptor */ 727 f = fget(cmd.fd); 728 if (!f) { 729 ret = -EBADF; 730 goto err_tree_mutex_unlock; 731 } 732 733 inode = f->f_dentry->d_inode; 734 if (!inode) { 735 ret = -EBADF; 736 goto err_tree_mutex_unlock; 737 } 738 739 xrcd = find_xrcd(file->device, inode); 740 if (!xrcd && !(cmd.oflags & O_CREAT)) { 741 /* no file descriptor. Need CREATE flag */ 742 ret = -EAGAIN; 743 goto err_tree_mutex_unlock; 744 } 745 746 if (xrcd && cmd.oflags & O_EXCL) { 747 ret = -EINVAL; 748 goto err_tree_mutex_unlock; 749 } 750 } 751 752 obj = kmalloc(sizeof *obj, GFP_KERNEL); 753 if (!obj) { 754 ret = -ENOMEM; 755 goto err_tree_mutex_unlock; 756 } 757 758 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class); 759 760 down_write(&obj->uobject.mutex); 761 762 if (!xrcd) { 763 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev, 764 file->ucontext, &udata); 765 if (IS_ERR(xrcd)) { 766 ret = PTR_ERR(xrcd); 767 goto err; 768 } 769 770 xrcd->inode = inode; 771 xrcd->device = file->device->ib_dev; 772 atomic_set(&xrcd->usecnt, 0); 773 mutex_init(&xrcd->tgt_qp_mutex); 774 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 775 new_xrcd = 1; 776 } 777 778 atomic_set(&obj->refcnt, 0); 779 obj->uobject.object = xrcd; 780 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 781 if (ret) 782 goto err_idr; 783 784 memset(&resp, 0, sizeof resp); 785 resp.xrcd_handle = obj->uobject.id; 786 787 if (inode) { 788 if (new_xrcd) { 789 /* create new inode/xrcd table entry */ 790 ret = xrcd_table_insert(file->device, inode, xrcd); 791 if (ret) 792 goto err_insert_xrcd; 793 } 794 atomic_inc(&xrcd->usecnt); 795 } 796 797 if (copy_to_user((void __user *) (unsigned long) cmd.response, 798 &resp, sizeof resp)) { 799 ret = -EFAULT; 800 goto err_copy; 801 } 802 803 if (f) 804 fput(f); 805 806 mutex_lock(&file->mutex); 807 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); 808 mutex_unlock(&file->mutex); 809 810 obj->uobject.live = 1; 811 up_write(&obj->uobject.mutex); 812 813 mutex_unlock(&file->device->xrcd_tree_mutex); 814 return in_len; 815 816 err_copy: 817 if (inode) { 818 if (new_xrcd) 819 xrcd_table_delete(file->device, inode); 820 atomic_dec(&xrcd->usecnt); 821 } 822 823 err_insert_xrcd: 824 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 825 826 err_idr: 827 ib_dealloc_xrcd(xrcd); 828 829 err: 830 put_uobj_write(&obj->uobject); 831 832 err_tree_mutex_unlock: 833 if (f) 834 fput(f); 835 836 mutex_unlock(&file->device->xrcd_tree_mutex); 837 838 return ret; 839 } 840 841 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 842 const char __user *buf, int in_len, 843 int out_len) 844 { 845 struct ib_uverbs_close_xrcd cmd; 846 struct ib_uobject *uobj; 847 struct ib_xrcd *xrcd = NULL; 848 struct inode *inode = NULL; 849 struct ib_uxrcd_object *obj; 850 int live; 851 int ret = 0; 852 853 if (copy_from_user(&cmd, buf, sizeof cmd)) 854 return -EFAULT; 855 856 mutex_lock(&file->device->xrcd_tree_mutex); 857 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); 858 if (!uobj) { 859 ret = -EINVAL; 860 goto out; 861 } 862 863 xrcd = uobj->object; 864 inode = xrcd->inode; 865 obj = container_of(uobj, struct ib_uxrcd_object, uobject); 866 if (atomic_read(&obj->refcnt)) { 867 put_uobj_write(uobj); 868 ret = -EBUSY; 869 goto out; 870 } 871 872 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { 873 ret = ib_dealloc_xrcd(uobj->object); 874 if (!ret) 875 uobj->live = 0; 876 } 877 878 live = uobj->live; 879 if (inode && ret) 880 atomic_inc(&xrcd->usecnt); 881 882 put_uobj_write(uobj); 883 884 if (ret) 885 goto out; 886 887 if (inode && !live) 888 xrcd_table_delete(file->device, inode); 889 890 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); 891 mutex_lock(&file->mutex); 892 list_del(&uobj->list); 893 mutex_unlock(&file->mutex); 894 895 put_uobj(uobj); 896 ret = in_len; 897 898 out: 899 mutex_unlock(&file->device->xrcd_tree_mutex); 900 return ret; 901 } 902 903 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 904 struct ib_xrcd *xrcd) 905 { 906 struct inode *inode; 907 908 inode = xrcd->inode; 909 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 910 return; 911 912 ib_dealloc_xrcd(xrcd); 913 914 if (inode) 915 xrcd_table_delete(dev, inode); 916 } 917 918 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 919 const char __user *buf, int in_len, 920 int out_len) 921 { 922 struct ib_uverbs_reg_mr cmd; 923 struct ib_uverbs_reg_mr_resp resp; 924 struct ib_udata udata; 925 struct ib_uobject *uobj; 926 struct ib_pd *pd; 927 struct ib_mr *mr; 928 int ret; 929 930 if (out_len < sizeof resp) 931 return -ENOSPC; 932 933 if (copy_from_user(&cmd, buf, sizeof cmd)) 934 return -EFAULT; 935 936 INIT_UDATA(&udata, buf + sizeof cmd, 937 (unsigned long) cmd.response + sizeof resp, 938 in_len - sizeof cmd, out_len - sizeof resp); 939 940 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 941 return -EINVAL; 942 943 /* 944 * Local write permission is required if remote write or 945 * remote atomic permission is also requested. 946 */ 947 if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 948 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE)) 949 return -EINVAL; 950 951 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 952 if (!uobj) 953 return -ENOMEM; 954 955 init_uobj(uobj, 0, file->ucontext, &mr_lock_class); 956 down_write(&uobj->mutex); 957 958 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 959 if (!pd) { 960 ret = -EINVAL; 961 goto err_free; 962 } 963 964 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 965 cmd.access_flags, &udata); 966 if (IS_ERR(mr)) { 967 ret = PTR_ERR(mr); 968 goto err_put; 969 } 970 971 mr->device = pd->device; 972 mr->pd = pd; 973 mr->uobject = uobj; 974 atomic_inc(&pd->usecnt); 975 atomic_set(&mr->usecnt, 0); 976 977 uobj->object = mr; 978 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 979 if (ret) 980 goto err_unreg; 981 982 memset(&resp, 0, sizeof resp); 983 resp.lkey = mr->lkey; 984 resp.rkey = mr->rkey; 985 resp.mr_handle = uobj->id; 986 987 if (copy_to_user((void __user *) (unsigned long) cmd.response, 988 &resp, sizeof resp)) { 989 ret = -EFAULT; 990 goto err_copy; 991 } 992 993 put_pd_read(pd); 994 995 mutex_lock(&file->mutex); 996 list_add_tail(&uobj->list, &file->ucontext->mr_list); 997 mutex_unlock(&file->mutex); 998 999 uobj->live = 1; 1000 1001 up_write(&uobj->mutex); 1002 1003 return in_len; 1004 1005 err_copy: 1006 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1007 1008 err_unreg: 1009 ib_dereg_mr(mr); 1010 1011 err_put: 1012 put_pd_read(pd); 1013 1014 err_free: 1015 put_uobj_write(uobj); 1016 return ret; 1017 } 1018 1019 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 1020 const char __user *buf, int in_len, 1021 int out_len) 1022 { 1023 struct ib_uverbs_dereg_mr cmd; 1024 struct ib_mr *mr; 1025 struct ib_uobject *uobj; 1026 int ret = -EINVAL; 1027 1028 if (copy_from_user(&cmd, buf, sizeof cmd)) 1029 return -EFAULT; 1030 1031 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 1032 if (!uobj) 1033 return -EINVAL; 1034 1035 mr = uobj->object; 1036 1037 ret = ib_dereg_mr(mr); 1038 if (!ret) 1039 uobj->live = 0; 1040 1041 put_uobj_write(uobj); 1042 1043 if (ret) 1044 return ret; 1045 1046 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1047 1048 mutex_lock(&file->mutex); 1049 list_del(&uobj->list); 1050 mutex_unlock(&file->mutex); 1051 1052 put_uobj(uobj); 1053 1054 return in_len; 1055 } 1056 1057 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 1058 const char __user *buf, int in_len, 1059 int out_len) 1060 { 1061 struct ib_uverbs_create_comp_channel cmd; 1062 struct ib_uverbs_create_comp_channel_resp resp; 1063 struct file *filp; 1064 int ret; 1065 1066 if (out_len < sizeof resp) 1067 return -ENOSPC; 1068 1069 if (copy_from_user(&cmd, buf, sizeof cmd)) 1070 return -EFAULT; 1071 1072 ret = get_unused_fd(); 1073 if (ret < 0) 1074 return ret; 1075 resp.fd = ret; 1076 1077 filp = ib_uverbs_alloc_event_file(file, 0); 1078 if (IS_ERR(filp)) { 1079 put_unused_fd(resp.fd); 1080 return PTR_ERR(filp); 1081 } 1082 1083 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1084 &resp, sizeof resp)) { 1085 put_unused_fd(resp.fd); 1086 fput(filp); 1087 return -EFAULT; 1088 } 1089 1090 fd_install(resp.fd, filp); 1091 return in_len; 1092 } 1093 1094 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1095 const char __user *buf, int in_len, 1096 int out_len) 1097 { 1098 struct ib_uverbs_create_cq cmd; 1099 struct ib_uverbs_create_cq_resp resp; 1100 struct ib_udata udata; 1101 struct ib_ucq_object *obj; 1102 struct ib_uverbs_event_file *ev_file = NULL; 1103 struct ib_cq *cq; 1104 int ret; 1105 1106 if (out_len < sizeof resp) 1107 return -ENOSPC; 1108 1109 if (copy_from_user(&cmd, buf, sizeof cmd)) 1110 return -EFAULT; 1111 1112 INIT_UDATA(&udata, buf + sizeof cmd, 1113 (unsigned long) cmd.response + sizeof resp, 1114 in_len - sizeof cmd, out_len - sizeof resp); 1115 1116 if (cmd.comp_vector >= file->device->num_comp_vectors) 1117 return -EINVAL; 1118 1119 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1120 if (!obj) 1121 return -ENOMEM; 1122 1123 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class); 1124 down_write(&obj->uobject.mutex); 1125 1126 if (cmd.comp_channel >= 0) { 1127 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel); 1128 if (!ev_file) { 1129 ret = -EINVAL; 1130 goto err; 1131 } 1132 } 1133 1134 obj->uverbs_file = file; 1135 obj->comp_events_reported = 0; 1136 obj->async_events_reported = 0; 1137 INIT_LIST_HEAD(&obj->comp_list); 1138 INIT_LIST_HEAD(&obj->async_list); 1139 1140 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 1141 cmd.comp_vector, 1142 file->ucontext, &udata); 1143 if (IS_ERR(cq)) { 1144 ret = PTR_ERR(cq); 1145 goto err_file; 1146 } 1147 1148 cq->device = file->device->ib_dev; 1149 cq->uobject = &obj->uobject; 1150 cq->comp_handler = ib_uverbs_comp_handler; 1151 cq->event_handler = ib_uverbs_cq_event_handler; 1152 cq->cq_context = ev_file; 1153 atomic_set(&cq->usecnt, 0); 1154 1155 obj->uobject.object = cq; 1156 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1157 if (ret) 1158 goto err_free; 1159 1160 memset(&resp, 0, sizeof resp); 1161 resp.cq_handle = obj->uobject.id; 1162 resp.cqe = cq->cqe; 1163 1164 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1165 &resp, sizeof resp)) { 1166 ret = -EFAULT; 1167 goto err_copy; 1168 } 1169 1170 mutex_lock(&file->mutex); 1171 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 1172 mutex_unlock(&file->mutex); 1173 1174 obj->uobject.live = 1; 1175 1176 up_write(&obj->uobject.mutex); 1177 1178 return in_len; 1179 1180 err_copy: 1181 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1182 1183 err_free: 1184 ib_destroy_cq(cq); 1185 1186 err_file: 1187 if (ev_file) 1188 ib_uverbs_release_ucq(file, ev_file, obj); 1189 1190 err: 1191 put_uobj_write(&obj->uobject); 1192 return ret; 1193 } 1194 1195 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1196 const char __user *buf, int in_len, 1197 int out_len) 1198 { 1199 struct ib_uverbs_resize_cq cmd; 1200 struct ib_uverbs_resize_cq_resp resp; 1201 struct ib_udata udata; 1202 struct ib_cq *cq; 1203 int ret = -EINVAL; 1204 1205 if (copy_from_user(&cmd, buf, sizeof cmd)) 1206 return -EFAULT; 1207 1208 INIT_UDATA(&udata, buf + sizeof cmd, 1209 (unsigned long) cmd.response + sizeof resp, 1210 in_len - sizeof cmd, out_len - sizeof resp); 1211 1212 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1213 if (!cq) 1214 return -EINVAL; 1215 1216 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1217 if (ret) 1218 goto out; 1219 1220 resp.cqe = cq->cqe; 1221 1222 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1223 &resp, sizeof resp.cqe)) 1224 ret = -EFAULT; 1225 1226 out: 1227 put_cq_read(cq); 1228 1229 return ret ? ret : in_len; 1230 } 1231 1232 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1233 { 1234 struct ib_uverbs_wc tmp; 1235 1236 tmp.wr_id = wc->wr_id; 1237 tmp.status = wc->status; 1238 tmp.opcode = wc->opcode; 1239 tmp.vendor_err = wc->vendor_err; 1240 tmp.byte_len = wc->byte_len; 1241 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1242 tmp.qp_num = wc->qp->qp_num; 1243 tmp.src_qp = wc->src_qp; 1244 tmp.wc_flags = wc->wc_flags; 1245 tmp.pkey_index = wc->pkey_index; 1246 tmp.slid = wc->slid; 1247 tmp.sl = wc->sl; 1248 tmp.dlid_path_bits = wc->dlid_path_bits; 1249 tmp.port_num = wc->port_num; 1250 tmp.reserved = 0; 1251 1252 if (copy_to_user(dest, &tmp, sizeof tmp)) 1253 return -EFAULT; 1254 1255 return 0; 1256 } 1257 1258 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1259 const char __user *buf, int in_len, 1260 int out_len) 1261 { 1262 struct ib_uverbs_poll_cq cmd; 1263 struct ib_uverbs_poll_cq_resp resp; 1264 u8 __user *header_ptr; 1265 u8 __user *data_ptr; 1266 struct ib_cq *cq; 1267 struct ib_wc wc; 1268 int ret; 1269 1270 if (copy_from_user(&cmd, buf, sizeof cmd)) 1271 return -EFAULT; 1272 1273 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1274 if (!cq) 1275 return -EINVAL; 1276 1277 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1278 header_ptr = (void __user *)(unsigned long) cmd.response; 1279 data_ptr = header_ptr + sizeof resp; 1280 1281 memset(&resp, 0, sizeof resp); 1282 while (resp.count < cmd.ne) { 1283 ret = ib_poll_cq(cq, 1, &wc); 1284 if (ret < 0) 1285 goto out_put; 1286 if (!ret) 1287 break; 1288 1289 ret = copy_wc_to_user(data_ptr, &wc); 1290 if (ret) 1291 goto out_put; 1292 1293 data_ptr += sizeof(struct ib_uverbs_wc); 1294 ++resp.count; 1295 } 1296 1297 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1298 ret = -EFAULT; 1299 goto out_put; 1300 } 1301 1302 ret = in_len; 1303 1304 out_put: 1305 put_cq_read(cq); 1306 return ret; 1307 } 1308 1309 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1310 const char __user *buf, int in_len, 1311 int out_len) 1312 { 1313 struct ib_uverbs_req_notify_cq cmd; 1314 struct ib_cq *cq; 1315 1316 if (copy_from_user(&cmd, buf, sizeof cmd)) 1317 return -EFAULT; 1318 1319 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1320 if (!cq) 1321 return -EINVAL; 1322 1323 ib_req_notify_cq(cq, cmd.solicited_only ? 1324 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1325 1326 put_cq_read(cq); 1327 1328 return in_len; 1329 } 1330 1331 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1332 const char __user *buf, int in_len, 1333 int out_len) 1334 { 1335 struct ib_uverbs_destroy_cq cmd; 1336 struct ib_uverbs_destroy_cq_resp resp; 1337 struct ib_uobject *uobj; 1338 struct ib_cq *cq; 1339 struct ib_ucq_object *obj; 1340 struct ib_uverbs_event_file *ev_file; 1341 int ret = -EINVAL; 1342 1343 if (copy_from_user(&cmd, buf, sizeof cmd)) 1344 return -EFAULT; 1345 1346 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 1347 if (!uobj) 1348 return -EINVAL; 1349 cq = uobj->object; 1350 ev_file = cq->cq_context; 1351 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1352 1353 ret = ib_destroy_cq(cq); 1354 if (!ret) 1355 uobj->live = 0; 1356 1357 put_uobj_write(uobj); 1358 1359 if (ret) 1360 return ret; 1361 1362 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1363 1364 mutex_lock(&file->mutex); 1365 list_del(&uobj->list); 1366 mutex_unlock(&file->mutex); 1367 1368 ib_uverbs_release_ucq(file, ev_file, obj); 1369 1370 memset(&resp, 0, sizeof resp); 1371 resp.comp_events_reported = obj->comp_events_reported; 1372 resp.async_events_reported = obj->async_events_reported; 1373 1374 put_uobj(uobj); 1375 1376 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1377 &resp, sizeof resp)) 1378 return -EFAULT; 1379 1380 return in_len; 1381 } 1382 1383 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1384 const char __user *buf, int in_len, 1385 int out_len) 1386 { 1387 struct ib_uverbs_create_qp cmd; 1388 struct ib_uverbs_create_qp_resp resp; 1389 struct ib_udata udata; 1390 struct ib_uqp_object *obj; 1391 struct ib_device *device; 1392 struct ib_pd *pd = NULL; 1393 struct ib_xrcd *xrcd = NULL; 1394 struct ib_uobject *uninitialized_var(xrcd_uobj); 1395 struct ib_cq *scq = NULL, *rcq = NULL; 1396 struct ib_srq *srq = NULL; 1397 struct ib_qp *qp; 1398 struct ib_qp_init_attr attr; 1399 int ret; 1400 1401 if (out_len < sizeof resp) 1402 return -ENOSPC; 1403 1404 if (copy_from_user(&cmd, buf, sizeof cmd)) 1405 return -EFAULT; 1406 1407 if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1408 return -EPERM; 1409 1410 INIT_UDATA(&udata, buf + sizeof cmd, 1411 (unsigned long) cmd.response + sizeof resp, 1412 in_len - sizeof cmd, out_len - sizeof resp); 1413 1414 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1415 if (!obj) 1416 return -ENOMEM; 1417 1418 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 1419 down_write(&obj->uevent.uobject.mutex); 1420 1421 if (cmd.qp_type == IB_QPT_XRC_TGT) { 1422 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 1423 if (!xrcd) { 1424 ret = -EINVAL; 1425 goto err_put; 1426 } 1427 device = xrcd->device; 1428 } else { 1429 if (cmd.qp_type == IB_QPT_XRC_INI) { 1430 cmd.max_recv_wr = cmd.max_recv_sge = 0; 1431 } else { 1432 if (cmd.is_srq) { 1433 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 1434 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1435 ret = -EINVAL; 1436 goto err_put; 1437 } 1438 } 1439 1440 if (cmd.recv_cq_handle != cmd.send_cq_handle) { 1441 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0); 1442 if (!rcq) { 1443 ret = -EINVAL; 1444 goto err_put; 1445 } 1446 } 1447 } 1448 1449 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq); 1450 rcq = rcq ?: scq; 1451 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1452 if (!pd || !scq) { 1453 ret = -EINVAL; 1454 goto err_put; 1455 } 1456 1457 device = pd->device; 1458 } 1459 1460 attr.event_handler = ib_uverbs_qp_event_handler; 1461 attr.qp_context = file; 1462 attr.send_cq = scq; 1463 attr.recv_cq = rcq; 1464 attr.srq = srq; 1465 attr.xrcd = xrcd; 1466 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 1467 attr.qp_type = cmd.qp_type; 1468 attr.create_flags = 0; 1469 1470 attr.cap.max_send_wr = cmd.max_send_wr; 1471 attr.cap.max_recv_wr = cmd.max_recv_wr; 1472 attr.cap.max_send_sge = cmd.max_send_sge; 1473 attr.cap.max_recv_sge = cmd.max_recv_sge; 1474 attr.cap.max_inline_data = cmd.max_inline_data; 1475 1476 obj->uevent.events_reported = 0; 1477 INIT_LIST_HEAD(&obj->uevent.event_list); 1478 INIT_LIST_HEAD(&obj->mcast_list); 1479 1480 if (cmd.qp_type == IB_QPT_XRC_TGT) 1481 qp = ib_create_qp(pd, &attr); 1482 else 1483 qp = device->create_qp(pd, &attr, &udata); 1484 1485 if (IS_ERR(qp)) { 1486 ret = PTR_ERR(qp); 1487 goto err_put; 1488 } 1489 1490 if (cmd.qp_type != IB_QPT_XRC_TGT) { 1491 qp->real_qp = qp; 1492 qp->device = device; 1493 qp->pd = pd; 1494 qp->send_cq = attr.send_cq; 1495 qp->recv_cq = attr.recv_cq; 1496 qp->srq = attr.srq; 1497 qp->event_handler = attr.event_handler; 1498 qp->qp_context = attr.qp_context; 1499 qp->qp_type = attr.qp_type; 1500 atomic_set(&qp->usecnt, 0); 1501 atomic_inc(&pd->usecnt); 1502 atomic_inc(&attr.send_cq->usecnt); 1503 if (attr.recv_cq) 1504 atomic_inc(&attr.recv_cq->usecnt); 1505 if (attr.srq) 1506 atomic_inc(&attr.srq->usecnt); 1507 } 1508 qp->uobject = &obj->uevent.uobject; 1509 1510 obj->uevent.uobject.object = qp; 1511 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1512 if (ret) 1513 goto err_destroy; 1514 1515 memset(&resp, 0, sizeof resp); 1516 resp.qpn = qp->qp_num; 1517 resp.qp_handle = obj->uevent.uobject.id; 1518 resp.max_recv_sge = attr.cap.max_recv_sge; 1519 resp.max_send_sge = attr.cap.max_send_sge; 1520 resp.max_recv_wr = attr.cap.max_recv_wr; 1521 resp.max_send_wr = attr.cap.max_send_wr; 1522 resp.max_inline_data = attr.cap.max_inline_data; 1523 1524 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1525 &resp, sizeof resp)) { 1526 ret = -EFAULT; 1527 goto err_copy; 1528 } 1529 1530 if (xrcd) 1531 put_xrcd_read(xrcd_uobj); 1532 if (pd) 1533 put_pd_read(pd); 1534 if (scq) 1535 put_cq_read(scq); 1536 if (rcq && rcq != scq) 1537 put_cq_read(rcq); 1538 if (srq) 1539 put_srq_read(srq); 1540 1541 mutex_lock(&file->mutex); 1542 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1543 mutex_unlock(&file->mutex); 1544 1545 obj->uevent.uobject.live = 1; 1546 1547 up_write(&obj->uevent.uobject.mutex); 1548 1549 return in_len; 1550 1551 err_copy: 1552 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1553 1554 err_destroy: 1555 ib_destroy_qp(qp); 1556 1557 err_put: 1558 if (xrcd) 1559 put_xrcd_read(xrcd_uobj); 1560 if (pd) 1561 put_pd_read(pd); 1562 if (scq) 1563 put_cq_read(scq); 1564 if (rcq && rcq != scq) 1565 put_cq_read(rcq); 1566 if (srq) 1567 put_srq_read(srq); 1568 1569 put_uobj_write(&obj->uevent.uobject); 1570 return ret; 1571 } 1572 1573 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1574 const char __user *buf, int in_len, int out_len) 1575 { 1576 struct ib_uverbs_open_qp cmd; 1577 struct ib_uverbs_create_qp_resp resp; 1578 struct ib_udata udata; 1579 struct ib_uqp_object *obj; 1580 struct ib_xrcd *xrcd; 1581 struct ib_uobject *uninitialized_var(xrcd_uobj); 1582 struct ib_qp *qp; 1583 struct ib_qp_open_attr attr; 1584 int ret; 1585 1586 if (out_len < sizeof resp) 1587 return -ENOSPC; 1588 1589 if (copy_from_user(&cmd, buf, sizeof cmd)) 1590 return -EFAULT; 1591 1592 INIT_UDATA(&udata, buf + sizeof cmd, 1593 (unsigned long) cmd.response + sizeof resp, 1594 in_len - sizeof cmd, out_len - sizeof resp); 1595 1596 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1597 if (!obj) 1598 return -ENOMEM; 1599 1600 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 1601 down_write(&obj->uevent.uobject.mutex); 1602 1603 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 1604 if (!xrcd) { 1605 ret = -EINVAL; 1606 goto err_put; 1607 } 1608 1609 attr.event_handler = ib_uverbs_qp_event_handler; 1610 attr.qp_context = file; 1611 attr.qp_num = cmd.qpn; 1612 attr.qp_type = cmd.qp_type; 1613 1614 obj->uevent.events_reported = 0; 1615 INIT_LIST_HEAD(&obj->uevent.event_list); 1616 INIT_LIST_HEAD(&obj->mcast_list); 1617 1618 qp = ib_open_qp(xrcd, &attr); 1619 if (IS_ERR(qp)) { 1620 ret = PTR_ERR(qp); 1621 goto err_put; 1622 } 1623 1624 qp->uobject = &obj->uevent.uobject; 1625 1626 obj->uevent.uobject.object = qp; 1627 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1628 if (ret) 1629 goto err_destroy; 1630 1631 memset(&resp, 0, sizeof resp); 1632 resp.qpn = qp->qp_num; 1633 resp.qp_handle = obj->uevent.uobject.id; 1634 1635 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1636 &resp, sizeof resp)) { 1637 ret = -EFAULT; 1638 goto err_remove; 1639 } 1640 1641 put_xrcd_read(xrcd_uobj); 1642 1643 mutex_lock(&file->mutex); 1644 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1645 mutex_unlock(&file->mutex); 1646 1647 obj->uevent.uobject.live = 1; 1648 1649 up_write(&obj->uevent.uobject.mutex); 1650 1651 return in_len; 1652 1653 err_remove: 1654 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1655 1656 err_destroy: 1657 ib_destroy_qp(qp); 1658 1659 err_put: 1660 put_xrcd_read(xrcd_uobj); 1661 put_uobj_write(&obj->uevent.uobject); 1662 return ret; 1663 } 1664 1665 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1666 const char __user *buf, int in_len, 1667 int out_len) 1668 { 1669 struct ib_uverbs_query_qp cmd; 1670 struct ib_uverbs_query_qp_resp resp; 1671 struct ib_qp *qp; 1672 struct ib_qp_attr *attr; 1673 struct ib_qp_init_attr *init_attr; 1674 int ret; 1675 1676 if (copy_from_user(&cmd, buf, sizeof cmd)) 1677 return -EFAULT; 1678 1679 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1680 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1681 if (!attr || !init_attr) { 1682 ret = -ENOMEM; 1683 goto out; 1684 } 1685 1686 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1687 if (!qp) { 1688 ret = -EINVAL; 1689 goto out; 1690 } 1691 1692 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1693 1694 put_qp_read(qp); 1695 1696 if (ret) 1697 goto out; 1698 1699 memset(&resp, 0, sizeof resp); 1700 1701 resp.qp_state = attr->qp_state; 1702 resp.cur_qp_state = attr->cur_qp_state; 1703 resp.path_mtu = attr->path_mtu; 1704 resp.path_mig_state = attr->path_mig_state; 1705 resp.qkey = attr->qkey; 1706 resp.rq_psn = attr->rq_psn; 1707 resp.sq_psn = attr->sq_psn; 1708 resp.dest_qp_num = attr->dest_qp_num; 1709 resp.qp_access_flags = attr->qp_access_flags; 1710 resp.pkey_index = attr->pkey_index; 1711 resp.alt_pkey_index = attr->alt_pkey_index; 1712 resp.sq_draining = attr->sq_draining; 1713 resp.max_rd_atomic = attr->max_rd_atomic; 1714 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1715 resp.min_rnr_timer = attr->min_rnr_timer; 1716 resp.port_num = attr->port_num; 1717 resp.timeout = attr->timeout; 1718 resp.retry_cnt = attr->retry_cnt; 1719 resp.rnr_retry = attr->rnr_retry; 1720 resp.alt_port_num = attr->alt_port_num; 1721 resp.alt_timeout = attr->alt_timeout; 1722 1723 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 1724 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 1725 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 1726 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 1727 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 1728 resp.dest.dlid = attr->ah_attr.dlid; 1729 resp.dest.sl = attr->ah_attr.sl; 1730 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 1731 resp.dest.static_rate = attr->ah_attr.static_rate; 1732 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 1733 resp.dest.port_num = attr->ah_attr.port_num; 1734 1735 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 1736 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 1737 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 1738 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 1739 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 1740 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 1741 resp.alt_dest.sl = attr->alt_ah_attr.sl; 1742 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 1743 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 1744 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 1745 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 1746 1747 resp.max_send_wr = init_attr->cap.max_send_wr; 1748 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1749 resp.max_send_sge = init_attr->cap.max_send_sge; 1750 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1751 resp.max_inline_data = init_attr->cap.max_inline_data; 1752 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1753 1754 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1755 &resp, sizeof resp)) 1756 ret = -EFAULT; 1757 1758 out: 1759 kfree(attr); 1760 kfree(init_attr); 1761 1762 return ret ? ret : in_len; 1763 } 1764 1765 /* Remove ignored fields set in the attribute mask */ 1766 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 1767 { 1768 switch (qp_type) { 1769 case IB_QPT_XRC_INI: 1770 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 1771 case IB_QPT_XRC_TGT: 1772 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 1773 IB_QP_RNR_RETRY); 1774 default: 1775 return mask; 1776 } 1777 } 1778 1779 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 1780 const char __user *buf, int in_len, 1781 int out_len) 1782 { 1783 struct ib_uverbs_modify_qp cmd; 1784 struct ib_udata udata; 1785 struct ib_qp *qp; 1786 struct ib_qp_attr *attr; 1787 int ret; 1788 1789 if (copy_from_user(&cmd, buf, sizeof cmd)) 1790 return -EFAULT; 1791 1792 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 1793 out_len); 1794 1795 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1796 if (!attr) 1797 return -ENOMEM; 1798 1799 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1800 if (!qp) { 1801 ret = -EINVAL; 1802 goto out; 1803 } 1804 1805 attr->qp_state = cmd.qp_state; 1806 attr->cur_qp_state = cmd.cur_qp_state; 1807 attr->path_mtu = cmd.path_mtu; 1808 attr->path_mig_state = cmd.path_mig_state; 1809 attr->qkey = cmd.qkey; 1810 attr->rq_psn = cmd.rq_psn; 1811 attr->sq_psn = cmd.sq_psn; 1812 attr->dest_qp_num = cmd.dest_qp_num; 1813 attr->qp_access_flags = cmd.qp_access_flags; 1814 attr->pkey_index = cmd.pkey_index; 1815 attr->alt_pkey_index = cmd.alt_pkey_index; 1816 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 1817 attr->max_rd_atomic = cmd.max_rd_atomic; 1818 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 1819 attr->min_rnr_timer = cmd.min_rnr_timer; 1820 attr->port_num = cmd.port_num; 1821 attr->timeout = cmd.timeout; 1822 attr->retry_cnt = cmd.retry_cnt; 1823 attr->rnr_retry = cmd.rnr_retry; 1824 attr->alt_port_num = cmd.alt_port_num; 1825 attr->alt_timeout = cmd.alt_timeout; 1826 1827 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 1828 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 1829 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 1830 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 1831 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 1832 attr->ah_attr.dlid = cmd.dest.dlid; 1833 attr->ah_attr.sl = cmd.dest.sl; 1834 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 1835 attr->ah_attr.static_rate = cmd.dest.static_rate; 1836 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 1837 attr->ah_attr.port_num = cmd.dest.port_num; 1838 1839 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 1840 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 1841 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 1842 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 1843 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 1844 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 1845 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 1846 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 1847 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 1848 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 1849 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 1850 1851 if (qp->real_qp == qp) { 1852 ret = qp->device->modify_qp(qp, attr, 1853 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 1854 } else { 1855 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 1856 } 1857 1858 put_qp_read(qp); 1859 1860 if (ret) 1861 goto out; 1862 1863 ret = in_len; 1864 1865 out: 1866 kfree(attr); 1867 1868 return ret; 1869 } 1870 1871 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 1872 const char __user *buf, int in_len, 1873 int out_len) 1874 { 1875 struct ib_uverbs_destroy_qp cmd; 1876 struct ib_uverbs_destroy_qp_resp resp; 1877 struct ib_uobject *uobj; 1878 struct ib_qp *qp; 1879 struct ib_uqp_object *obj; 1880 int ret = -EINVAL; 1881 1882 if (copy_from_user(&cmd, buf, sizeof cmd)) 1883 return -EFAULT; 1884 1885 memset(&resp, 0, sizeof resp); 1886 1887 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 1888 if (!uobj) 1889 return -EINVAL; 1890 qp = uobj->object; 1891 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 1892 1893 if (!list_empty(&obj->mcast_list)) { 1894 put_uobj_write(uobj); 1895 return -EBUSY; 1896 } 1897 1898 ret = ib_destroy_qp(qp); 1899 if (!ret) 1900 uobj->live = 0; 1901 1902 put_uobj_write(uobj); 1903 1904 if (ret) 1905 return ret; 1906 1907 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 1908 1909 mutex_lock(&file->mutex); 1910 list_del(&uobj->list); 1911 mutex_unlock(&file->mutex); 1912 1913 ib_uverbs_release_uevent(file, &obj->uevent); 1914 1915 resp.events_reported = obj->uevent.events_reported; 1916 1917 put_uobj(uobj); 1918 1919 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1920 &resp, sizeof resp)) 1921 return -EFAULT; 1922 1923 return in_len; 1924 } 1925 1926 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 1927 const char __user *buf, int in_len, 1928 int out_len) 1929 { 1930 struct ib_uverbs_post_send cmd; 1931 struct ib_uverbs_post_send_resp resp; 1932 struct ib_uverbs_send_wr *user_wr; 1933 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 1934 struct ib_qp *qp; 1935 int i, sg_ind; 1936 int is_ud; 1937 ssize_t ret = -EINVAL; 1938 1939 if (copy_from_user(&cmd, buf, sizeof cmd)) 1940 return -EFAULT; 1941 1942 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 1943 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 1944 return -EINVAL; 1945 1946 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 1947 return -EINVAL; 1948 1949 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 1950 if (!user_wr) 1951 return -ENOMEM; 1952 1953 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1954 if (!qp) 1955 goto out; 1956 1957 is_ud = qp->qp_type == IB_QPT_UD; 1958 sg_ind = 0; 1959 last = NULL; 1960 for (i = 0; i < cmd.wr_count; ++i) { 1961 if (copy_from_user(user_wr, 1962 buf + sizeof cmd + i * cmd.wqe_size, 1963 cmd.wqe_size)) { 1964 ret = -EFAULT; 1965 goto out_put; 1966 } 1967 1968 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 1969 ret = -EINVAL; 1970 goto out_put; 1971 } 1972 1973 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 1974 user_wr->num_sge * sizeof (struct ib_sge), 1975 GFP_KERNEL); 1976 if (!next) { 1977 ret = -ENOMEM; 1978 goto out_put; 1979 } 1980 1981 if (!last) 1982 wr = next; 1983 else 1984 last->next = next; 1985 last = next; 1986 1987 next->next = NULL; 1988 next->wr_id = user_wr->wr_id; 1989 next->num_sge = user_wr->num_sge; 1990 next->opcode = user_wr->opcode; 1991 next->send_flags = user_wr->send_flags; 1992 1993 if (is_ud) { 1994 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, 1995 file->ucontext); 1996 if (!next->wr.ud.ah) { 1997 ret = -EINVAL; 1998 goto out_put; 1999 } 2000 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; 2001 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 2002 } else { 2003 switch (next->opcode) { 2004 case IB_WR_RDMA_WRITE_WITH_IMM: 2005 next->ex.imm_data = 2006 (__be32 __force) user_wr->ex.imm_data; 2007 case IB_WR_RDMA_WRITE: 2008 case IB_WR_RDMA_READ: 2009 next->wr.rdma.remote_addr = 2010 user_wr->wr.rdma.remote_addr; 2011 next->wr.rdma.rkey = 2012 user_wr->wr.rdma.rkey; 2013 break; 2014 case IB_WR_SEND_WITH_IMM: 2015 next->ex.imm_data = 2016 (__be32 __force) user_wr->ex.imm_data; 2017 break; 2018 case IB_WR_SEND_WITH_INV: 2019 next->ex.invalidate_rkey = 2020 user_wr->ex.invalidate_rkey; 2021 break; 2022 case IB_WR_ATOMIC_CMP_AND_SWP: 2023 case IB_WR_ATOMIC_FETCH_AND_ADD: 2024 next->wr.atomic.remote_addr = 2025 user_wr->wr.atomic.remote_addr; 2026 next->wr.atomic.compare_add = 2027 user_wr->wr.atomic.compare_add; 2028 next->wr.atomic.swap = user_wr->wr.atomic.swap; 2029 next->wr.atomic.rkey = user_wr->wr.atomic.rkey; 2030 break; 2031 default: 2032 break; 2033 } 2034 } 2035 2036 if (next->num_sge) { 2037 next->sg_list = (void *) next + 2038 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2039 if (copy_from_user(next->sg_list, 2040 buf + sizeof cmd + 2041 cmd.wr_count * cmd.wqe_size + 2042 sg_ind * sizeof (struct ib_sge), 2043 next->num_sge * sizeof (struct ib_sge))) { 2044 ret = -EFAULT; 2045 goto out_put; 2046 } 2047 sg_ind += next->num_sge; 2048 } else 2049 next->sg_list = NULL; 2050 } 2051 2052 resp.bad_wr = 0; 2053 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2054 if (ret) 2055 for (next = wr; next; next = next->next) { 2056 ++resp.bad_wr; 2057 if (next == bad_wr) 2058 break; 2059 } 2060 2061 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2062 &resp, sizeof resp)) 2063 ret = -EFAULT; 2064 2065 out_put: 2066 put_qp_read(qp); 2067 2068 while (wr) { 2069 if (is_ud && wr->wr.ud.ah) 2070 put_ah_read(wr->wr.ud.ah); 2071 next = wr->next; 2072 kfree(wr); 2073 wr = next; 2074 } 2075 2076 out: 2077 kfree(user_wr); 2078 2079 return ret ? ret : in_len; 2080 } 2081 2082 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2083 int in_len, 2084 u32 wr_count, 2085 u32 sge_count, 2086 u32 wqe_size) 2087 { 2088 struct ib_uverbs_recv_wr *user_wr; 2089 struct ib_recv_wr *wr = NULL, *last, *next; 2090 int sg_ind; 2091 int i; 2092 int ret; 2093 2094 if (in_len < wqe_size * wr_count + 2095 sge_count * sizeof (struct ib_uverbs_sge)) 2096 return ERR_PTR(-EINVAL); 2097 2098 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2099 return ERR_PTR(-EINVAL); 2100 2101 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2102 if (!user_wr) 2103 return ERR_PTR(-ENOMEM); 2104 2105 sg_ind = 0; 2106 last = NULL; 2107 for (i = 0; i < wr_count; ++i) { 2108 if (copy_from_user(user_wr, buf + i * wqe_size, 2109 wqe_size)) { 2110 ret = -EFAULT; 2111 goto err; 2112 } 2113 2114 if (user_wr->num_sge + sg_ind > sge_count) { 2115 ret = -EINVAL; 2116 goto err; 2117 } 2118 2119 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2120 user_wr->num_sge * sizeof (struct ib_sge), 2121 GFP_KERNEL); 2122 if (!next) { 2123 ret = -ENOMEM; 2124 goto err; 2125 } 2126 2127 if (!last) 2128 wr = next; 2129 else 2130 last->next = next; 2131 last = next; 2132 2133 next->next = NULL; 2134 next->wr_id = user_wr->wr_id; 2135 next->num_sge = user_wr->num_sge; 2136 2137 if (next->num_sge) { 2138 next->sg_list = (void *) next + 2139 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2140 if (copy_from_user(next->sg_list, 2141 buf + wr_count * wqe_size + 2142 sg_ind * sizeof (struct ib_sge), 2143 next->num_sge * sizeof (struct ib_sge))) { 2144 ret = -EFAULT; 2145 goto err; 2146 } 2147 sg_ind += next->num_sge; 2148 } else 2149 next->sg_list = NULL; 2150 } 2151 2152 kfree(user_wr); 2153 return wr; 2154 2155 err: 2156 kfree(user_wr); 2157 2158 while (wr) { 2159 next = wr->next; 2160 kfree(wr); 2161 wr = next; 2162 } 2163 2164 return ERR_PTR(ret); 2165 } 2166 2167 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2168 const char __user *buf, int in_len, 2169 int out_len) 2170 { 2171 struct ib_uverbs_post_recv cmd; 2172 struct ib_uverbs_post_recv_resp resp; 2173 struct ib_recv_wr *wr, *next, *bad_wr; 2174 struct ib_qp *qp; 2175 ssize_t ret = -EINVAL; 2176 2177 if (copy_from_user(&cmd, buf, sizeof cmd)) 2178 return -EFAULT; 2179 2180 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2181 in_len - sizeof cmd, cmd.wr_count, 2182 cmd.sge_count, cmd.wqe_size); 2183 if (IS_ERR(wr)) 2184 return PTR_ERR(wr); 2185 2186 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2187 if (!qp) 2188 goto out; 2189 2190 resp.bad_wr = 0; 2191 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2192 2193 put_qp_read(qp); 2194 2195 if (ret) 2196 for (next = wr; next; next = next->next) { 2197 ++resp.bad_wr; 2198 if (next == bad_wr) 2199 break; 2200 } 2201 2202 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2203 &resp, sizeof resp)) 2204 ret = -EFAULT; 2205 2206 out: 2207 while (wr) { 2208 next = wr->next; 2209 kfree(wr); 2210 wr = next; 2211 } 2212 2213 return ret ? ret : in_len; 2214 } 2215 2216 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2217 const char __user *buf, int in_len, 2218 int out_len) 2219 { 2220 struct ib_uverbs_post_srq_recv cmd; 2221 struct ib_uverbs_post_srq_recv_resp resp; 2222 struct ib_recv_wr *wr, *next, *bad_wr; 2223 struct ib_srq *srq; 2224 ssize_t ret = -EINVAL; 2225 2226 if (copy_from_user(&cmd, buf, sizeof cmd)) 2227 return -EFAULT; 2228 2229 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2230 in_len - sizeof cmd, cmd.wr_count, 2231 cmd.sge_count, cmd.wqe_size); 2232 if (IS_ERR(wr)) 2233 return PTR_ERR(wr); 2234 2235 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2236 if (!srq) 2237 goto out; 2238 2239 resp.bad_wr = 0; 2240 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2241 2242 put_srq_read(srq); 2243 2244 if (ret) 2245 for (next = wr; next; next = next->next) { 2246 ++resp.bad_wr; 2247 if (next == bad_wr) 2248 break; 2249 } 2250 2251 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2252 &resp, sizeof resp)) 2253 ret = -EFAULT; 2254 2255 out: 2256 while (wr) { 2257 next = wr->next; 2258 kfree(wr); 2259 wr = next; 2260 } 2261 2262 return ret ? ret : in_len; 2263 } 2264 2265 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2266 const char __user *buf, int in_len, 2267 int out_len) 2268 { 2269 struct ib_uverbs_create_ah cmd; 2270 struct ib_uverbs_create_ah_resp resp; 2271 struct ib_uobject *uobj; 2272 struct ib_pd *pd; 2273 struct ib_ah *ah; 2274 struct ib_ah_attr attr; 2275 int ret; 2276 2277 if (out_len < sizeof resp) 2278 return -ENOSPC; 2279 2280 if (copy_from_user(&cmd, buf, sizeof cmd)) 2281 return -EFAULT; 2282 2283 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 2284 if (!uobj) 2285 return -ENOMEM; 2286 2287 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class); 2288 down_write(&uobj->mutex); 2289 2290 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 2291 if (!pd) { 2292 ret = -EINVAL; 2293 goto err; 2294 } 2295 2296 attr.dlid = cmd.attr.dlid; 2297 attr.sl = cmd.attr.sl; 2298 attr.src_path_bits = cmd.attr.src_path_bits; 2299 attr.static_rate = cmd.attr.static_rate; 2300 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 2301 attr.port_num = cmd.attr.port_num; 2302 attr.grh.flow_label = cmd.attr.grh.flow_label; 2303 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 2304 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 2305 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 2306 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 2307 2308 ah = ib_create_ah(pd, &attr); 2309 if (IS_ERR(ah)) { 2310 ret = PTR_ERR(ah); 2311 goto err_put; 2312 } 2313 2314 ah->uobject = uobj; 2315 uobj->object = ah; 2316 2317 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 2318 if (ret) 2319 goto err_destroy; 2320 2321 resp.ah_handle = uobj->id; 2322 2323 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2324 &resp, sizeof resp)) { 2325 ret = -EFAULT; 2326 goto err_copy; 2327 } 2328 2329 put_pd_read(pd); 2330 2331 mutex_lock(&file->mutex); 2332 list_add_tail(&uobj->list, &file->ucontext->ah_list); 2333 mutex_unlock(&file->mutex); 2334 2335 uobj->live = 1; 2336 2337 up_write(&uobj->mutex); 2338 2339 return in_len; 2340 2341 err_copy: 2342 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2343 2344 err_destroy: 2345 ib_destroy_ah(ah); 2346 2347 err_put: 2348 put_pd_read(pd); 2349 2350 err: 2351 put_uobj_write(uobj); 2352 return ret; 2353 } 2354 2355 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2356 const char __user *buf, int in_len, int out_len) 2357 { 2358 struct ib_uverbs_destroy_ah cmd; 2359 struct ib_ah *ah; 2360 struct ib_uobject *uobj; 2361 int ret; 2362 2363 if (copy_from_user(&cmd, buf, sizeof cmd)) 2364 return -EFAULT; 2365 2366 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 2367 if (!uobj) 2368 return -EINVAL; 2369 ah = uobj->object; 2370 2371 ret = ib_destroy_ah(ah); 2372 if (!ret) 2373 uobj->live = 0; 2374 2375 put_uobj_write(uobj); 2376 2377 if (ret) 2378 return ret; 2379 2380 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2381 2382 mutex_lock(&file->mutex); 2383 list_del(&uobj->list); 2384 mutex_unlock(&file->mutex); 2385 2386 put_uobj(uobj); 2387 2388 return in_len; 2389 } 2390 2391 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2392 const char __user *buf, int in_len, 2393 int out_len) 2394 { 2395 struct ib_uverbs_attach_mcast cmd; 2396 struct ib_qp *qp; 2397 struct ib_uqp_object *obj; 2398 struct ib_uverbs_mcast_entry *mcast; 2399 int ret; 2400 2401 if (copy_from_user(&cmd, buf, sizeof cmd)) 2402 return -EFAULT; 2403 2404 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2405 if (!qp) 2406 return -EINVAL; 2407 2408 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2409 2410 list_for_each_entry(mcast, &obj->mcast_list, list) 2411 if (cmd.mlid == mcast->lid && 2412 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2413 ret = 0; 2414 goto out_put; 2415 } 2416 2417 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2418 if (!mcast) { 2419 ret = -ENOMEM; 2420 goto out_put; 2421 } 2422 2423 mcast->lid = cmd.mlid; 2424 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2425 2426 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2427 if (!ret) 2428 list_add_tail(&mcast->list, &obj->mcast_list); 2429 else 2430 kfree(mcast); 2431 2432 out_put: 2433 put_qp_write(qp); 2434 2435 return ret ? ret : in_len; 2436 } 2437 2438 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2439 const char __user *buf, int in_len, 2440 int out_len) 2441 { 2442 struct ib_uverbs_detach_mcast cmd; 2443 struct ib_uqp_object *obj; 2444 struct ib_qp *qp; 2445 struct ib_uverbs_mcast_entry *mcast; 2446 int ret = -EINVAL; 2447 2448 if (copy_from_user(&cmd, buf, sizeof cmd)) 2449 return -EFAULT; 2450 2451 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2452 if (!qp) 2453 return -EINVAL; 2454 2455 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 2456 if (ret) 2457 goto out_put; 2458 2459 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2460 2461 list_for_each_entry(mcast, &obj->mcast_list, list) 2462 if (cmd.mlid == mcast->lid && 2463 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2464 list_del(&mcast->list); 2465 kfree(mcast); 2466 break; 2467 } 2468 2469 out_put: 2470 put_qp_write(qp); 2471 2472 return ret ? ret : in_len; 2473 } 2474 2475 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 2476 struct ib_uverbs_create_xsrq *cmd, 2477 struct ib_udata *udata) 2478 { 2479 struct ib_uverbs_create_srq_resp resp; 2480 struct ib_usrq_object *obj; 2481 struct ib_pd *pd; 2482 struct ib_srq *srq; 2483 struct ib_uobject *uninitialized_var(xrcd_uobj); 2484 struct ib_srq_init_attr attr; 2485 int ret; 2486 2487 obj = kmalloc(sizeof *obj, GFP_KERNEL); 2488 if (!obj) 2489 return -ENOMEM; 2490 2491 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class); 2492 down_write(&obj->uevent.uobject.mutex); 2493 2494 if (cmd->srq_type == IB_SRQT_XRC) { 2495 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); 2496 if (!attr.ext.xrc.xrcd) { 2497 ret = -EINVAL; 2498 goto err; 2499 } 2500 2501 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 2502 atomic_inc(&obj->uxrcd->refcnt); 2503 2504 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); 2505 if (!attr.ext.xrc.cq) { 2506 ret = -EINVAL; 2507 goto err_put_xrcd; 2508 } 2509 } 2510 2511 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 2512 if (!pd) { 2513 ret = -EINVAL; 2514 goto err_put_cq; 2515 } 2516 2517 attr.event_handler = ib_uverbs_srq_event_handler; 2518 attr.srq_context = file; 2519 attr.srq_type = cmd->srq_type; 2520 attr.attr.max_wr = cmd->max_wr; 2521 attr.attr.max_sge = cmd->max_sge; 2522 attr.attr.srq_limit = cmd->srq_limit; 2523 2524 obj->uevent.events_reported = 0; 2525 INIT_LIST_HEAD(&obj->uevent.event_list); 2526 2527 srq = pd->device->create_srq(pd, &attr, udata); 2528 if (IS_ERR(srq)) { 2529 ret = PTR_ERR(srq); 2530 goto err_put; 2531 } 2532 2533 srq->device = pd->device; 2534 srq->pd = pd; 2535 srq->srq_type = cmd->srq_type; 2536 srq->uobject = &obj->uevent.uobject; 2537 srq->event_handler = attr.event_handler; 2538 srq->srq_context = attr.srq_context; 2539 2540 if (cmd->srq_type == IB_SRQT_XRC) { 2541 srq->ext.xrc.cq = attr.ext.xrc.cq; 2542 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 2543 atomic_inc(&attr.ext.xrc.cq->usecnt); 2544 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 2545 } 2546 2547 atomic_inc(&pd->usecnt); 2548 atomic_set(&srq->usecnt, 0); 2549 2550 obj->uevent.uobject.object = srq; 2551 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 2552 if (ret) 2553 goto err_destroy; 2554 2555 memset(&resp, 0, sizeof resp); 2556 resp.srq_handle = obj->uevent.uobject.id; 2557 resp.max_wr = attr.attr.max_wr; 2558 resp.max_sge = attr.attr.max_sge; 2559 if (cmd->srq_type == IB_SRQT_XRC) 2560 resp.srqn = srq->ext.xrc.srq_num; 2561 2562 if (copy_to_user((void __user *) (unsigned long) cmd->response, 2563 &resp, sizeof resp)) { 2564 ret = -EFAULT; 2565 goto err_copy; 2566 } 2567 2568 if (cmd->srq_type == IB_SRQT_XRC) { 2569 put_uobj_read(xrcd_uobj); 2570 put_cq_read(attr.ext.xrc.cq); 2571 } 2572 put_pd_read(pd); 2573 2574 mutex_lock(&file->mutex); 2575 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); 2576 mutex_unlock(&file->mutex); 2577 2578 obj->uevent.uobject.live = 1; 2579 2580 up_write(&obj->uevent.uobject.mutex); 2581 2582 return 0; 2583 2584 err_copy: 2585 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 2586 2587 err_destroy: 2588 ib_destroy_srq(srq); 2589 2590 err_put: 2591 put_pd_read(pd); 2592 2593 err_put_cq: 2594 if (cmd->srq_type == IB_SRQT_XRC) 2595 put_cq_read(attr.ext.xrc.cq); 2596 2597 err_put_xrcd: 2598 if (cmd->srq_type == IB_SRQT_XRC) { 2599 atomic_dec(&obj->uxrcd->refcnt); 2600 put_uobj_read(xrcd_uobj); 2601 } 2602 2603 err: 2604 put_uobj_write(&obj->uevent.uobject); 2605 return ret; 2606 } 2607 2608 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 2609 const char __user *buf, int in_len, 2610 int out_len) 2611 { 2612 struct ib_uverbs_create_srq cmd; 2613 struct ib_uverbs_create_xsrq xcmd; 2614 struct ib_uverbs_create_srq_resp resp; 2615 struct ib_udata udata; 2616 int ret; 2617 2618 if (out_len < sizeof resp) 2619 return -ENOSPC; 2620 2621 if (copy_from_user(&cmd, buf, sizeof cmd)) 2622 return -EFAULT; 2623 2624 xcmd.response = cmd.response; 2625 xcmd.user_handle = cmd.user_handle; 2626 xcmd.srq_type = IB_SRQT_BASIC; 2627 xcmd.pd_handle = cmd.pd_handle; 2628 xcmd.max_wr = cmd.max_wr; 2629 xcmd.max_sge = cmd.max_sge; 2630 xcmd.srq_limit = cmd.srq_limit; 2631 2632 INIT_UDATA(&udata, buf + sizeof cmd, 2633 (unsigned long) cmd.response + sizeof resp, 2634 in_len - sizeof cmd, out_len - sizeof resp); 2635 2636 ret = __uverbs_create_xsrq(file, &xcmd, &udata); 2637 if (ret) 2638 return ret; 2639 2640 return in_len; 2641 } 2642 2643 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 2644 const char __user *buf, int in_len, int out_len) 2645 { 2646 struct ib_uverbs_create_xsrq cmd; 2647 struct ib_uverbs_create_srq_resp resp; 2648 struct ib_udata udata; 2649 int ret; 2650 2651 if (out_len < sizeof resp) 2652 return -ENOSPC; 2653 2654 if (copy_from_user(&cmd, buf, sizeof cmd)) 2655 return -EFAULT; 2656 2657 INIT_UDATA(&udata, buf + sizeof cmd, 2658 (unsigned long) cmd.response + sizeof resp, 2659 in_len - sizeof cmd, out_len - sizeof resp); 2660 2661 ret = __uverbs_create_xsrq(file, &cmd, &udata); 2662 if (ret) 2663 return ret; 2664 2665 return in_len; 2666 } 2667 2668 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 2669 const char __user *buf, int in_len, 2670 int out_len) 2671 { 2672 struct ib_uverbs_modify_srq cmd; 2673 struct ib_udata udata; 2674 struct ib_srq *srq; 2675 struct ib_srq_attr attr; 2676 int ret; 2677 2678 if (copy_from_user(&cmd, buf, sizeof cmd)) 2679 return -EFAULT; 2680 2681 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 2682 out_len); 2683 2684 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2685 if (!srq) 2686 return -EINVAL; 2687 2688 attr.max_wr = cmd.max_wr; 2689 attr.srq_limit = cmd.srq_limit; 2690 2691 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 2692 2693 put_srq_read(srq); 2694 2695 return ret ? ret : in_len; 2696 } 2697 2698 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 2699 const char __user *buf, 2700 int in_len, int out_len) 2701 { 2702 struct ib_uverbs_query_srq cmd; 2703 struct ib_uverbs_query_srq_resp resp; 2704 struct ib_srq_attr attr; 2705 struct ib_srq *srq; 2706 int ret; 2707 2708 if (out_len < sizeof resp) 2709 return -ENOSPC; 2710 2711 if (copy_from_user(&cmd, buf, sizeof cmd)) 2712 return -EFAULT; 2713 2714 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2715 if (!srq) 2716 return -EINVAL; 2717 2718 ret = ib_query_srq(srq, &attr); 2719 2720 put_srq_read(srq); 2721 2722 if (ret) 2723 return ret; 2724 2725 memset(&resp, 0, sizeof resp); 2726 2727 resp.max_wr = attr.max_wr; 2728 resp.max_sge = attr.max_sge; 2729 resp.srq_limit = attr.srq_limit; 2730 2731 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2732 &resp, sizeof resp)) 2733 return -EFAULT; 2734 2735 return in_len; 2736 } 2737 2738 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 2739 const char __user *buf, int in_len, 2740 int out_len) 2741 { 2742 struct ib_uverbs_destroy_srq cmd; 2743 struct ib_uverbs_destroy_srq_resp resp; 2744 struct ib_uobject *uobj; 2745 struct ib_srq *srq; 2746 struct ib_uevent_object *obj; 2747 int ret = -EINVAL; 2748 2749 if (copy_from_user(&cmd, buf, sizeof cmd)) 2750 return -EFAULT; 2751 2752 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 2753 if (!uobj) 2754 return -EINVAL; 2755 srq = uobj->object; 2756 obj = container_of(uobj, struct ib_uevent_object, uobject); 2757 2758 ret = ib_destroy_srq(srq); 2759 if (!ret) 2760 uobj->live = 0; 2761 2762 put_uobj_write(uobj); 2763 2764 if (ret) 2765 return ret; 2766 2767 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 2768 2769 mutex_lock(&file->mutex); 2770 list_del(&uobj->list); 2771 mutex_unlock(&file->mutex); 2772 2773 ib_uverbs_release_uevent(file, obj); 2774 2775 memset(&resp, 0, sizeof resp); 2776 resp.events_reported = obj->events_reported; 2777 2778 put_uobj(uobj); 2779 2780 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2781 &resp, sizeof resp)) 2782 ret = -EFAULT; 2783 2784 return ret ? ret : in_len; 2785 } 2786