1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 40 #include <asm/uaccess.h> 41 42 #include "uverbs.h" 43 44 static struct lock_class_key pd_lock_key; 45 static struct lock_class_key mr_lock_key; 46 static struct lock_class_key cq_lock_key; 47 static struct lock_class_key qp_lock_key; 48 static struct lock_class_key ah_lock_key; 49 static struct lock_class_key srq_lock_key; 50 51 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 52 do { \ 53 (udata)->inbuf = (void __user *) (ibuf); \ 54 (udata)->outbuf = (void __user *) (obuf); \ 55 (udata)->inlen = (ilen); \ 56 (udata)->outlen = (olen); \ 57 } while (0) 58 59 /* 60 * The ib_uobject locking scheme is as follows: 61 * 62 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 63 * needs to be held during all idr operations. When an object is 64 * looked up, a reference must be taken on the object's kref before 65 * dropping this lock. 66 * 67 * - Each object also has an rwsem. This rwsem must be held for 68 * reading while an operation that uses the object is performed. 69 * For example, while registering an MR, the associated PD's 70 * uobject.mutex must be held for reading. The rwsem must be held 71 * for writing while initializing or destroying an object. 72 * 73 * - In addition, each object has a "live" flag. If this flag is not 74 * set, then lookups of the object will fail even if it is found in 75 * the idr. This handles a reader that blocks and does not acquire 76 * the rwsem until after the object is destroyed. The destroy 77 * operation will set the live flag to 0 and then drop the rwsem; 78 * this will allow the reader to acquire the rwsem, see that the 79 * live flag is 0, and then drop the rwsem and its reference to 80 * object. The underlying storage will not be freed until the last 81 * reference to the object is dropped. 82 */ 83 84 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 85 struct ib_ucontext *context, struct lock_class_key *key) 86 { 87 uobj->user_handle = user_handle; 88 uobj->context = context; 89 kref_init(&uobj->ref); 90 init_rwsem(&uobj->mutex); 91 lockdep_set_class(&uobj->mutex, key); 92 uobj->live = 0; 93 } 94 95 static void release_uobj(struct kref *kref) 96 { 97 kfree(container_of(kref, struct ib_uobject, ref)); 98 } 99 100 static void put_uobj(struct ib_uobject *uobj) 101 { 102 kref_put(&uobj->ref, release_uobj); 103 } 104 105 static void put_uobj_read(struct ib_uobject *uobj) 106 { 107 up_read(&uobj->mutex); 108 put_uobj(uobj); 109 } 110 111 static void put_uobj_write(struct ib_uobject *uobj) 112 { 113 up_write(&uobj->mutex); 114 put_uobj(uobj); 115 } 116 117 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 118 { 119 int ret; 120 121 retry: 122 if (!idr_pre_get(idr, GFP_KERNEL)) 123 return -ENOMEM; 124 125 spin_lock(&ib_uverbs_idr_lock); 126 ret = idr_get_new(idr, uobj, &uobj->id); 127 spin_unlock(&ib_uverbs_idr_lock); 128 129 if (ret == -EAGAIN) 130 goto retry; 131 132 return ret; 133 } 134 135 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 136 { 137 spin_lock(&ib_uverbs_idr_lock); 138 idr_remove(idr, uobj->id); 139 spin_unlock(&ib_uverbs_idr_lock); 140 } 141 142 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 143 struct ib_ucontext *context) 144 { 145 struct ib_uobject *uobj; 146 147 spin_lock(&ib_uverbs_idr_lock); 148 uobj = idr_find(idr, id); 149 if (uobj) { 150 if (uobj->context == context) 151 kref_get(&uobj->ref); 152 else 153 uobj = NULL; 154 } 155 spin_unlock(&ib_uverbs_idr_lock); 156 157 return uobj; 158 } 159 160 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 161 struct ib_ucontext *context, int nested) 162 { 163 struct ib_uobject *uobj; 164 165 uobj = __idr_get_uobj(idr, id, context); 166 if (!uobj) 167 return NULL; 168 169 if (nested) 170 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 171 else 172 down_read(&uobj->mutex); 173 if (!uobj->live) { 174 put_uobj_read(uobj); 175 return NULL; 176 } 177 178 return uobj; 179 } 180 181 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 182 struct ib_ucontext *context) 183 { 184 struct ib_uobject *uobj; 185 186 uobj = __idr_get_uobj(idr, id, context); 187 if (!uobj) 188 return NULL; 189 190 down_write(&uobj->mutex); 191 if (!uobj->live) { 192 put_uobj_write(uobj); 193 return NULL; 194 } 195 196 return uobj; 197 } 198 199 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 200 int nested) 201 { 202 struct ib_uobject *uobj; 203 204 uobj = idr_read_uobj(idr, id, context, nested); 205 return uobj ? uobj->object : NULL; 206 } 207 208 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 209 { 210 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 211 } 212 213 static void put_pd_read(struct ib_pd *pd) 214 { 215 put_uobj_read(pd->uobject); 216 } 217 218 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 219 { 220 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 221 } 222 223 static void put_cq_read(struct ib_cq *cq) 224 { 225 put_uobj_read(cq->uobject); 226 } 227 228 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 229 { 230 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 231 } 232 233 static void put_ah_read(struct ib_ah *ah) 234 { 235 put_uobj_read(ah->uobject); 236 } 237 238 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 239 { 240 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 241 } 242 243 static void put_qp_read(struct ib_qp *qp) 244 { 245 put_uobj_read(qp->uobject); 246 } 247 248 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 249 { 250 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 251 } 252 253 static void put_srq_read(struct ib_srq *srq) 254 { 255 put_uobj_read(srq->uobject); 256 } 257 258 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 259 const char __user *buf, 260 int in_len, int out_len) 261 { 262 struct ib_uverbs_get_context cmd; 263 struct ib_uverbs_get_context_resp resp; 264 struct ib_udata udata; 265 struct ib_device *ibdev = file->device->ib_dev; 266 struct ib_ucontext *ucontext; 267 struct file *filp; 268 int ret; 269 270 if (out_len < sizeof resp) 271 return -ENOSPC; 272 273 if (copy_from_user(&cmd, buf, sizeof cmd)) 274 return -EFAULT; 275 276 mutex_lock(&file->mutex); 277 278 if (file->ucontext) { 279 ret = -EINVAL; 280 goto err; 281 } 282 283 INIT_UDATA(&udata, buf + sizeof cmd, 284 (unsigned long) cmd.response + sizeof resp, 285 in_len - sizeof cmd, out_len - sizeof resp); 286 287 ucontext = ibdev->alloc_ucontext(ibdev, &udata); 288 if (IS_ERR(ucontext)) { 289 ret = PTR_ERR(ucontext); 290 goto err; 291 } 292 293 ucontext->device = ibdev; 294 INIT_LIST_HEAD(&ucontext->pd_list); 295 INIT_LIST_HEAD(&ucontext->mr_list); 296 INIT_LIST_HEAD(&ucontext->mw_list); 297 INIT_LIST_HEAD(&ucontext->cq_list); 298 INIT_LIST_HEAD(&ucontext->qp_list); 299 INIT_LIST_HEAD(&ucontext->srq_list); 300 INIT_LIST_HEAD(&ucontext->ah_list); 301 ucontext->closing = 0; 302 303 resp.num_comp_vectors = file->device->num_comp_vectors; 304 305 ret = get_unused_fd(); 306 if (ret < 0) 307 goto err_free; 308 resp.async_fd = ret; 309 310 filp = ib_uverbs_alloc_event_file(file, 1); 311 if (IS_ERR(filp)) { 312 ret = PTR_ERR(filp); 313 goto err_fd; 314 } 315 316 if (copy_to_user((void __user *) (unsigned long) cmd.response, 317 &resp, sizeof resp)) { 318 ret = -EFAULT; 319 goto err_file; 320 } 321 322 file->async_file = filp->private_data; 323 324 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev, 325 ib_uverbs_event_handler); 326 ret = ib_register_event_handler(&file->event_handler); 327 if (ret) 328 goto err_file; 329 330 kref_get(&file->async_file->ref); 331 kref_get(&file->ref); 332 file->ucontext = ucontext; 333 334 fd_install(resp.async_fd, filp); 335 336 mutex_unlock(&file->mutex); 337 338 return in_len; 339 340 err_file: 341 fput(filp); 342 343 err_fd: 344 put_unused_fd(resp.async_fd); 345 346 err_free: 347 ibdev->dealloc_ucontext(ucontext); 348 349 err: 350 mutex_unlock(&file->mutex); 351 return ret; 352 } 353 354 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 355 const char __user *buf, 356 int in_len, int out_len) 357 { 358 struct ib_uverbs_query_device cmd; 359 struct ib_uverbs_query_device_resp resp; 360 struct ib_device_attr attr; 361 int ret; 362 363 if (out_len < sizeof resp) 364 return -ENOSPC; 365 366 if (copy_from_user(&cmd, buf, sizeof cmd)) 367 return -EFAULT; 368 369 ret = ib_query_device(file->device->ib_dev, &attr); 370 if (ret) 371 return ret; 372 373 memset(&resp, 0, sizeof resp); 374 375 resp.fw_ver = attr.fw_ver; 376 resp.node_guid = file->device->ib_dev->node_guid; 377 resp.sys_image_guid = attr.sys_image_guid; 378 resp.max_mr_size = attr.max_mr_size; 379 resp.page_size_cap = attr.page_size_cap; 380 resp.vendor_id = attr.vendor_id; 381 resp.vendor_part_id = attr.vendor_part_id; 382 resp.hw_ver = attr.hw_ver; 383 resp.max_qp = attr.max_qp; 384 resp.max_qp_wr = attr.max_qp_wr; 385 resp.device_cap_flags = attr.device_cap_flags; 386 resp.max_sge = attr.max_sge; 387 resp.max_sge_rd = attr.max_sge_rd; 388 resp.max_cq = attr.max_cq; 389 resp.max_cqe = attr.max_cqe; 390 resp.max_mr = attr.max_mr; 391 resp.max_pd = attr.max_pd; 392 resp.max_qp_rd_atom = attr.max_qp_rd_atom; 393 resp.max_ee_rd_atom = attr.max_ee_rd_atom; 394 resp.max_res_rd_atom = attr.max_res_rd_atom; 395 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; 396 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; 397 resp.atomic_cap = attr.atomic_cap; 398 resp.max_ee = attr.max_ee; 399 resp.max_rdd = attr.max_rdd; 400 resp.max_mw = attr.max_mw; 401 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; 402 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; 403 resp.max_mcast_grp = attr.max_mcast_grp; 404 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; 405 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; 406 resp.max_ah = attr.max_ah; 407 resp.max_fmr = attr.max_fmr; 408 resp.max_map_per_fmr = attr.max_map_per_fmr; 409 resp.max_srq = attr.max_srq; 410 resp.max_srq_wr = attr.max_srq_wr; 411 resp.max_srq_sge = attr.max_srq_sge; 412 resp.max_pkeys = attr.max_pkeys; 413 resp.local_ca_ack_delay = attr.local_ca_ack_delay; 414 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; 415 416 if (copy_to_user((void __user *) (unsigned long) cmd.response, 417 &resp, sizeof resp)) 418 return -EFAULT; 419 420 return in_len; 421 } 422 423 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 424 const char __user *buf, 425 int in_len, int out_len) 426 { 427 struct ib_uverbs_query_port cmd; 428 struct ib_uverbs_query_port_resp resp; 429 struct ib_port_attr attr; 430 int ret; 431 432 if (out_len < sizeof resp) 433 return -ENOSPC; 434 435 if (copy_from_user(&cmd, buf, sizeof cmd)) 436 return -EFAULT; 437 438 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr); 439 if (ret) 440 return ret; 441 442 memset(&resp, 0, sizeof resp); 443 444 resp.state = attr.state; 445 resp.max_mtu = attr.max_mtu; 446 resp.active_mtu = attr.active_mtu; 447 resp.gid_tbl_len = attr.gid_tbl_len; 448 resp.port_cap_flags = attr.port_cap_flags; 449 resp.max_msg_sz = attr.max_msg_sz; 450 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 451 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 452 resp.pkey_tbl_len = attr.pkey_tbl_len; 453 resp.lid = attr.lid; 454 resp.sm_lid = attr.sm_lid; 455 resp.lmc = attr.lmc; 456 resp.max_vl_num = attr.max_vl_num; 457 resp.sm_sl = attr.sm_sl; 458 resp.subnet_timeout = attr.subnet_timeout; 459 resp.init_type_reply = attr.init_type_reply; 460 resp.active_width = attr.active_width; 461 resp.active_speed = attr.active_speed; 462 resp.phys_state = attr.phys_state; 463 resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev, 464 cmd.port_num); 465 466 if (copy_to_user((void __user *) (unsigned long) cmd.response, 467 &resp, sizeof resp)) 468 return -EFAULT; 469 470 return in_len; 471 } 472 473 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 474 const char __user *buf, 475 int in_len, int out_len) 476 { 477 struct ib_uverbs_alloc_pd cmd; 478 struct ib_uverbs_alloc_pd_resp resp; 479 struct ib_udata udata; 480 struct ib_uobject *uobj; 481 struct ib_pd *pd; 482 int ret; 483 484 if (out_len < sizeof resp) 485 return -ENOSPC; 486 487 if (copy_from_user(&cmd, buf, sizeof cmd)) 488 return -EFAULT; 489 490 INIT_UDATA(&udata, buf + sizeof cmd, 491 (unsigned long) cmd.response + sizeof resp, 492 in_len - sizeof cmd, out_len - sizeof resp); 493 494 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 495 if (!uobj) 496 return -ENOMEM; 497 498 init_uobj(uobj, 0, file->ucontext, &pd_lock_key); 499 down_write(&uobj->mutex); 500 501 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 502 file->ucontext, &udata); 503 if (IS_ERR(pd)) { 504 ret = PTR_ERR(pd); 505 goto err; 506 } 507 508 pd->device = file->device->ib_dev; 509 pd->uobject = uobj; 510 atomic_set(&pd->usecnt, 0); 511 512 uobj->object = pd; 513 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 514 if (ret) 515 goto err_idr; 516 517 memset(&resp, 0, sizeof resp); 518 resp.pd_handle = uobj->id; 519 520 if (copy_to_user((void __user *) (unsigned long) cmd.response, 521 &resp, sizeof resp)) { 522 ret = -EFAULT; 523 goto err_copy; 524 } 525 526 mutex_lock(&file->mutex); 527 list_add_tail(&uobj->list, &file->ucontext->pd_list); 528 mutex_unlock(&file->mutex); 529 530 uobj->live = 1; 531 532 up_write(&uobj->mutex); 533 534 return in_len; 535 536 err_copy: 537 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 538 539 err_idr: 540 ib_dealloc_pd(pd); 541 542 err: 543 put_uobj_write(uobj); 544 return ret; 545 } 546 547 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 548 const char __user *buf, 549 int in_len, int out_len) 550 { 551 struct ib_uverbs_dealloc_pd cmd; 552 struct ib_uobject *uobj; 553 int ret; 554 555 if (copy_from_user(&cmd, buf, sizeof cmd)) 556 return -EFAULT; 557 558 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 559 if (!uobj) 560 return -EINVAL; 561 562 ret = ib_dealloc_pd(uobj->object); 563 if (!ret) 564 uobj->live = 0; 565 566 put_uobj_write(uobj); 567 568 if (ret) 569 return ret; 570 571 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 572 573 mutex_lock(&file->mutex); 574 list_del(&uobj->list); 575 mutex_unlock(&file->mutex); 576 577 put_uobj(uobj); 578 579 return in_len; 580 } 581 582 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 583 const char __user *buf, int in_len, 584 int out_len) 585 { 586 struct ib_uverbs_reg_mr cmd; 587 struct ib_uverbs_reg_mr_resp resp; 588 struct ib_udata udata; 589 struct ib_uobject *uobj; 590 struct ib_pd *pd; 591 struct ib_mr *mr; 592 int ret; 593 594 if (out_len < sizeof resp) 595 return -ENOSPC; 596 597 if (copy_from_user(&cmd, buf, sizeof cmd)) 598 return -EFAULT; 599 600 INIT_UDATA(&udata, buf + sizeof cmd, 601 (unsigned long) cmd.response + sizeof resp, 602 in_len - sizeof cmd, out_len - sizeof resp); 603 604 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 605 return -EINVAL; 606 607 /* 608 * Local write permission is required if remote write or 609 * remote atomic permission is also requested. 610 */ 611 if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 612 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE)) 613 return -EINVAL; 614 615 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 616 if (!uobj) 617 return -ENOMEM; 618 619 init_uobj(uobj, 0, file->ucontext, &mr_lock_key); 620 down_write(&uobj->mutex); 621 622 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 623 if (!pd) { 624 ret = -EINVAL; 625 goto err_free; 626 } 627 628 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 629 cmd.access_flags, &udata); 630 if (IS_ERR(mr)) { 631 ret = PTR_ERR(mr); 632 goto err_put; 633 } 634 635 mr->device = pd->device; 636 mr->pd = pd; 637 mr->uobject = uobj; 638 atomic_inc(&pd->usecnt); 639 atomic_set(&mr->usecnt, 0); 640 641 uobj->object = mr; 642 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 643 if (ret) 644 goto err_unreg; 645 646 memset(&resp, 0, sizeof resp); 647 resp.lkey = mr->lkey; 648 resp.rkey = mr->rkey; 649 resp.mr_handle = uobj->id; 650 651 if (copy_to_user((void __user *) (unsigned long) cmd.response, 652 &resp, sizeof resp)) { 653 ret = -EFAULT; 654 goto err_copy; 655 } 656 657 put_pd_read(pd); 658 659 mutex_lock(&file->mutex); 660 list_add_tail(&uobj->list, &file->ucontext->mr_list); 661 mutex_unlock(&file->mutex); 662 663 uobj->live = 1; 664 665 up_write(&uobj->mutex); 666 667 return in_len; 668 669 err_copy: 670 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 671 672 err_unreg: 673 ib_dereg_mr(mr); 674 675 err_put: 676 put_pd_read(pd); 677 678 err_free: 679 put_uobj_write(uobj); 680 return ret; 681 } 682 683 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 684 const char __user *buf, int in_len, 685 int out_len) 686 { 687 struct ib_uverbs_dereg_mr cmd; 688 struct ib_mr *mr; 689 struct ib_uobject *uobj; 690 int ret = -EINVAL; 691 692 if (copy_from_user(&cmd, buf, sizeof cmd)) 693 return -EFAULT; 694 695 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 696 if (!uobj) 697 return -EINVAL; 698 699 mr = uobj->object; 700 701 ret = ib_dereg_mr(mr); 702 if (!ret) 703 uobj->live = 0; 704 705 put_uobj_write(uobj); 706 707 if (ret) 708 return ret; 709 710 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 711 712 mutex_lock(&file->mutex); 713 list_del(&uobj->list); 714 mutex_unlock(&file->mutex); 715 716 put_uobj(uobj); 717 718 return in_len; 719 } 720 721 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 722 const char __user *buf, int in_len, 723 int out_len) 724 { 725 struct ib_uverbs_create_comp_channel cmd; 726 struct ib_uverbs_create_comp_channel_resp resp; 727 struct file *filp; 728 int ret; 729 730 if (out_len < sizeof resp) 731 return -ENOSPC; 732 733 if (copy_from_user(&cmd, buf, sizeof cmd)) 734 return -EFAULT; 735 736 ret = get_unused_fd(); 737 if (ret < 0) 738 return ret; 739 resp.fd = ret; 740 741 filp = ib_uverbs_alloc_event_file(file, 0); 742 if (IS_ERR(filp)) { 743 put_unused_fd(resp.fd); 744 return PTR_ERR(filp); 745 } 746 747 if (copy_to_user((void __user *) (unsigned long) cmd.response, 748 &resp, sizeof resp)) { 749 put_unused_fd(resp.fd); 750 fput(filp); 751 return -EFAULT; 752 } 753 754 fd_install(resp.fd, filp); 755 return in_len; 756 } 757 758 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 759 const char __user *buf, int in_len, 760 int out_len) 761 { 762 struct ib_uverbs_create_cq cmd; 763 struct ib_uverbs_create_cq_resp resp; 764 struct ib_udata udata; 765 struct ib_ucq_object *obj; 766 struct ib_uverbs_event_file *ev_file = NULL; 767 struct ib_cq *cq; 768 int ret; 769 770 if (out_len < sizeof resp) 771 return -ENOSPC; 772 773 if (copy_from_user(&cmd, buf, sizeof cmd)) 774 return -EFAULT; 775 776 INIT_UDATA(&udata, buf + sizeof cmd, 777 (unsigned long) cmd.response + sizeof resp, 778 in_len - sizeof cmd, out_len - sizeof resp); 779 780 if (cmd.comp_vector >= file->device->num_comp_vectors) 781 return -EINVAL; 782 783 obj = kmalloc(sizeof *obj, GFP_KERNEL); 784 if (!obj) 785 return -ENOMEM; 786 787 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key); 788 down_write(&obj->uobject.mutex); 789 790 if (cmd.comp_channel >= 0) { 791 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel); 792 if (!ev_file) { 793 ret = -EINVAL; 794 goto err; 795 } 796 } 797 798 obj->uverbs_file = file; 799 obj->comp_events_reported = 0; 800 obj->async_events_reported = 0; 801 INIT_LIST_HEAD(&obj->comp_list); 802 INIT_LIST_HEAD(&obj->async_list); 803 804 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 805 cmd.comp_vector, 806 file->ucontext, &udata); 807 if (IS_ERR(cq)) { 808 ret = PTR_ERR(cq); 809 goto err_file; 810 } 811 812 cq->device = file->device->ib_dev; 813 cq->uobject = &obj->uobject; 814 cq->comp_handler = ib_uverbs_comp_handler; 815 cq->event_handler = ib_uverbs_cq_event_handler; 816 cq->cq_context = ev_file; 817 atomic_set(&cq->usecnt, 0); 818 819 obj->uobject.object = cq; 820 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 821 if (ret) 822 goto err_free; 823 824 memset(&resp, 0, sizeof resp); 825 resp.cq_handle = obj->uobject.id; 826 resp.cqe = cq->cqe; 827 828 if (copy_to_user((void __user *) (unsigned long) cmd.response, 829 &resp, sizeof resp)) { 830 ret = -EFAULT; 831 goto err_copy; 832 } 833 834 mutex_lock(&file->mutex); 835 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 836 mutex_unlock(&file->mutex); 837 838 obj->uobject.live = 1; 839 840 up_write(&obj->uobject.mutex); 841 842 return in_len; 843 844 err_copy: 845 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 846 847 err_free: 848 ib_destroy_cq(cq); 849 850 err_file: 851 if (ev_file) 852 ib_uverbs_release_ucq(file, ev_file, obj); 853 854 err: 855 put_uobj_write(&obj->uobject); 856 return ret; 857 } 858 859 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 860 const char __user *buf, int in_len, 861 int out_len) 862 { 863 struct ib_uverbs_resize_cq cmd; 864 struct ib_uverbs_resize_cq_resp resp; 865 struct ib_udata udata; 866 struct ib_cq *cq; 867 int ret = -EINVAL; 868 869 if (copy_from_user(&cmd, buf, sizeof cmd)) 870 return -EFAULT; 871 872 INIT_UDATA(&udata, buf + sizeof cmd, 873 (unsigned long) cmd.response + sizeof resp, 874 in_len - sizeof cmd, out_len - sizeof resp); 875 876 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 877 if (!cq) 878 return -EINVAL; 879 880 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 881 if (ret) 882 goto out; 883 884 resp.cqe = cq->cqe; 885 886 if (copy_to_user((void __user *) (unsigned long) cmd.response, 887 &resp, sizeof resp.cqe)) 888 ret = -EFAULT; 889 890 out: 891 put_cq_read(cq); 892 893 return ret ? ret : in_len; 894 } 895 896 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 897 const char __user *buf, int in_len, 898 int out_len) 899 { 900 struct ib_uverbs_poll_cq cmd; 901 struct ib_uverbs_poll_cq_resp *resp; 902 struct ib_cq *cq; 903 struct ib_wc *wc; 904 int ret = 0; 905 int i; 906 int rsize; 907 908 if (copy_from_user(&cmd, buf, sizeof cmd)) 909 return -EFAULT; 910 911 wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL); 912 if (!wc) 913 return -ENOMEM; 914 915 rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc); 916 resp = kmalloc(rsize, GFP_KERNEL); 917 if (!resp) { 918 ret = -ENOMEM; 919 goto out_wc; 920 } 921 922 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 923 if (!cq) { 924 ret = -EINVAL; 925 goto out; 926 } 927 928 resp->count = ib_poll_cq(cq, cmd.ne, wc); 929 930 put_cq_read(cq); 931 932 for (i = 0; i < resp->count; i++) { 933 resp->wc[i].wr_id = wc[i].wr_id; 934 resp->wc[i].status = wc[i].status; 935 resp->wc[i].opcode = wc[i].opcode; 936 resp->wc[i].vendor_err = wc[i].vendor_err; 937 resp->wc[i].byte_len = wc[i].byte_len; 938 resp->wc[i].ex.imm_data = (__u32 __force) wc[i].ex.imm_data; 939 resp->wc[i].qp_num = wc[i].qp->qp_num; 940 resp->wc[i].src_qp = wc[i].src_qp; 941 resp->wc[i].wc_flags = wc[i].wc_flags; 942 resp->wc[i].pkey_index = wc[i].pkey_index; 943 resp->wc[i].slid = wc[i].slid; 944 resp->wc[i].sl = wc[i].sl; 945 resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits; 946 resp->wc[i].port_num = wc[i].port_num; 947 } 948 949 if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize)) 950 ret = -EFAULT; 951 952 out: 953 kfree(resp); 954 955 out_wc: 956 kfree(wc); 957 return ret ? ret : in_len; 958 } 959 960 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 961 const char __user *buf, int in_len, 962 int out_len) 963 { 964 struct ib_uverbs_req_notify_cq cmd; 965 struct ib_cq *cq; 966 967 if (copy_from_user(&cmd, buf, sizeof cmd)) 968 return -EFAULT; 969 970 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 971 if (!cq) 972 return -EINVAL; 973 974 ib_req_notify_cq(cq, cmd.solicited_only ? 975 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 976 977 put_cq_read(cq); 978 979 return in_len; 980 } 981 982 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 983 const char __user *buf, int in_len, 984 int out_len) 985 { 986 struct ib_uverbs_destroy_cq cmd; 987 struct ib_uverbs_destroy_cq_resp resp; 988 struct ib_uobject *uobj; 989 struct ib_cq *cq; 990 struct ib_ucq_object *obj; 991 struct ib_uverbs_event_file *ev_file; 992 int ret = -EINVAL; 993 994 if (copy_from_user(&cmd, buf, sizeof cmd)) 995 return -EFAULT; 996 997 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 998 if (!uobj) 999 return -EINVAL; 1000 cq = uobj->object; 1001 ev_file = cq->cq_context; 1002 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1003 1004 ret = ib_destroy_cq(cq); 1005 if (!ret) 1006 uobj->live = 0; 1007 1008 put_uobj_write(uobj); 1009 1010 if (ret) 1011 return ret; 1012 1013 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1014 1015 mutex_lock(&file->mutex); 1016 list_del(&uobj->list); 1017 mutex_unlock(&file->mutex); 1018 1019 ib_uverbs_release_ucq(file, ev_file, obj); 1020 1021 memset(&resp, 0, sizeof resp); 1022 resp.comp_events_reported = obj->comp_events_reported; 1023 resp.async_events_reported = obj->async_events_reported; 1024 1025 put_uobj(uobj); 1026 1027 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1028 &resp, sizeof resp)) 1029 return -EFAULT; 1030 1031 return in_len; 1032 } 1033 1034 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1035 const char __user *buf, int in_len, 1036 int out_len) 1037 { 1038 struct ib_uverbs_create_qp cmd; 1039 struct ib_uverbs_create_qp_resp resp; 1040 struct ib_udata udata; 1041 struct ib_uqp_object *obj; 1042 struct ib_pd *pd; 1043 struct ib_cq *scq, *rcq; 1044 struct ib_srq *srq; 1045 struct ib_qp *qp; 1046 struct ib_qp_init_attr attr; 1047 int ret; 1048 1049 if (out_len < sizeof resp) 1050 return -ENOSPC; 1051 1052 if (copy_from_user(&cmd, buf, sizeof cmd)) 1053 return -EFAULT; 1054 1055 INIT_UDATA(&udata, buf + sizeof cmd, 1056 (unsigned long) cmd.response + sizeof resp, 1057 in_len - sizeof cmd, out_len - sizeof resp); 1058 1059 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1060 if (!obj) 1061 return -ENOMEM; 1062 1063 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); 1064 down_write(&obj->uevent.uobject.mutex); 1065 1066 srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; 1067 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1068 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0); 1069 rcq = cmd.recv_cq_handle == cmd.send_cq_handle ? 1070 scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1); 1071 1072 if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { 1073 ret = -EINVAL; 1074 goto err_put; 1075 } 1076 1077 attr.event_handler = ib_uverbs_qp_event_handler; 1078 attr.qp_context = file; 1079 attr.send_cq = scq; 1080 attr.recv_cq = rcq; 1081 attr.srq = srq; 1082 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 1083 attr.qp_type = cmd.qp_type; 1084 attr.create_flags = 0; 1085 1086 attr.cap.max_send_wr = cmd.max_send_wr; 1087 attr.cap.max_recv_wr = cmd.max_recv_wr; 1088 attr.cap.max_send_sge = cmd.max_send_sge; 1089 attr.cap.max_recv_sge = cmd.max_recv_sge; 1090 attr.cap.max_inline_data = cmd.max_inline_data; 1091 1092 obj->uevent.events_reported = 0; 1093 INIT_LIST_HEAD(&obj->uevent.event_list); 1094 INIT_LIST_HEAD(&obj->mcast_list); 1095 1096 qp = pd->device->create_qp(pd, &attr, &udata); 1097 if (IS_ERR(qp)) { 1098 ret = PTR_ERR(qp); 1099 goto err_put; 1100 } 1101 1102 qp->device = pd->device; 1103 qp->pd = pd; 1104 qp->send_cq = attr.send_cq; 1105 qp->recv_cq = attr.recv_cq; 1106 qp->srq = attr.srq; 1107 qp->uobject = &obj->uevent.uobject; 1108 qp->event_handler = attr.event_handler; 1109 qp->qp_context = attr.qp_context; 1110 qp->qp_type = attr.qp_type; 1111 atomic_inc(&pd->usecnt); 1112 atomic_inc(&attr.send_cq->usecnt); 1113 atomic_inc(&attr.recv_cq->usecnt); 1114 if (attr.srq) 1115 atomic_inc(&attr.srq->usecnt); 1116 1117 obj->uevent.uobject.object = qp; 1118 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1119 if (ret) 1120 goto err_destroy; 1121 1122 memset(&resp, 0, sizeof resp); 1123 resp.qpn = qp->qp_num; 1124 resp.qp_handle = obj->uevent.uobject.id; 1125 resp.max_recv_sge = attr.cap.max_recv_sge; 1126 resp.max_send_sge = attr.cap.max_send_sge; 1127 resp.max_recv_wr = attr.cap.max_recv_wr; 1128 resp.max_send_wr = attr.cap.max_send_wr; 1129 resp.max_inline_data = attr.cap.max_inline_data; 1130 1131 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1132 &resp, sizeof resp)) { 1133 ret = -EFAULT; 1134 goto err_copy; 1135 } 1136 1137 put_pd_read(pd); 1138 put_cq_read(scq); 1139 if (rcq != scq) 1140 put_cq_read(rcq); 1141 if (srq) 1142 put_srq_read(srq); 1143 1144 mutex_lock(&file->mutex); 1145 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1146 mutex_unlock(&file->mutex); 1147 1148 obj->uevent.uobject.live = 1; 1149 1150 up_write(&obj->uevent.uobject.mutex); 1151 1152 return in_len; 1153 1154 err_copy: 1155 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1156 1157 err_destroy: 1158 ib_destroy_qp(qp); 1159 1160 err_put: 1161 if (pd) 1162 put_pd_read(pd); 1163 if (scq) 1164 put_cq_read(scq); 1165 if (rcq && rcq != scq) 1166 put_cq_read(rcq); 1167 if (srq) 1168 put_srq_read(srq); 1169 1170 put_uobj_write(&obj->uevent.uobject); 1171 return ret; 1172 } 1173 1174 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1175 const char __user *buf, int in_len, 1176 int out_len) 1177 { 1178 struct ib_uverbs_query_qp cmd; 1179 struct ib_uverbs_query_qp_resp resp; 1180 struct ib_qp *qp; 1181 struct ib_qp_attr *attr; 1182 struct ib_qp_init_attr *init_attr; 1183 int ret; 1184 1185 if (copy_from_user(&cmd, buf, sizeof cmd)) 1186 return -EFAULT; 1187 1188 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1189 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1190 if (!attr || !init_attr) { 1191 ret = -ENOMEM; 1192 goto out; 1193 } 1194 1195 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1196 if (!qp) { 1197 ret = -EINVAL; 1198 goto out; 1199 } 1200 1201 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1202 1203 put_qp_read(qp); 1204 1205 if (ret) 1206 goto out; 1207 1208 memset(&resp, 0, sizeof resp); 1209 1210 resp.qp_state = attr->qp_state; 1211 resp.cur_qp_state = attr->cur_qp_state; 1212 resp.path_mtu = attr->path_mtu; 1213 resp.path_mig_state = attr->path_mig_state; 1214 resp.qkey = attr->qkey; 1215 resp.rq_psn = attr->rq_psn; 1216 resp.sq_psn = attr->sq_psn; 1217 resp.dest_qp_num = attr->dest_qp_num; 1218 resp.qp_access_flags = attr->qp_access_flags; 1219 resp.pkey_index = attr->pkey_index; 1220 resp.alt_pkey_index = attr->alt_pkey_index; 1221 resp.sq_draining = attr->sq_draining; 1222 resp.max_rd_atomic = attr->max_rd_atomic; 1223 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1224 resp.min_rnr_timer = attr->min_rnr_timer; 1225 resp.port_num = attr->port_num; 1226 resp.timeout = attr->timeout; 1227 resp.retry_cnt = attr->retry_cnt; 1228 resp.rnr_retry = attr->rnr_retry; 1229 resp.alt_port_num = attr->alt_port_num; 1230 resp.alt_timeout = attr->alt_timeout; 1231 1232 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 1233 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 1234 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 1235 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 1236 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 1237 resp.dest.dlid = attr->ah_attr.dlid; 1238 resp.dest.sl = attr->ah_attr.sl; 1239 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 1240 resp.dest.static_rate = attr->ah_attr.static_rate; 1241 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 1242 resp.dest.port_num = attr->ah_attr.port_num; 1243 1244 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 1245 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 1246 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 1247 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 1248 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 1249 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 1250 resp.alt_dest.sl = attr->alt_ah_attr.sl; 1251 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 1252 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 1253 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 1254 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 1255 1256 resp.max_send_wr = init_attr->cap.max_send_wr; 1257 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1258 resp.max_send_sge = init_attr->cap.max_send_sge; 1259 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1260 resp.max_inline_data = init_attr->cap.max_inline_data; 1261 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1262 1263 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1264 &resp, sizeof resp)) 1265 ret = -EFAULT; 1266 1267 out: 1268 kfree(attr); 1269 kfree(init_attr); 1270 1271 return ret ? ret : in_len; 1272 } 1273 1274 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 1275 const char __user *buf, int in_len, 1276 int out_len) 1277 { 1278 struct ib_uverbs_modify_qp cmd; 1279 struct ib_udata udata; 1280 struct ib_qp *qp; 1281 struct ib_qp_attr *attr; 1282 int ret; 1283 1284 if (copy_from_user(&cmd, buf, sizeof cmd)) 1285 return -EFAULT; 1286 1287 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 1288 out_len); 1289 1290 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1291 if (!attr) 1292 return -ENOMEM; 1293 1294 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1295 if (!qp) { 1296 ret = -EINVAL; 1297 goto out; 1298 } 1299 1300 attr->qp_state = cmd.qp_state; 1301 attr->cur_qp_state = cmd.cur_qp_state; 1302 attr->path_mtu = cmd.path_mtu; 1303 attr->path_mig_state = cmd.path_mig_state; 1304 attr->qkey = cmd.qkey; 1305 attr->rq_psn = cmd.rq_psn; 1306 attr->sq_psn = cmd.sq_psn; 1307 attr->dest_qp_num = cmd.dest_qp_num; 1308 attr->qp_access_flags = cmd.qp_access_flags; 1309 attr->pkey_index = cmd.pkey_index; 1310 attr->alt_pkey_index = cmd.alt_pkey_index; 1311 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 1312 attr->max_rd_atomic = cmd.max_rd_atomic; 1313 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 1314 attr->min_rnr_timer = cmd.min_rnr_timer; 1315 attr->port_num = cmd.port_num; 1316 attr->timeout = cmd.timeout; 1317 attr->retry_cnt = cmd.retry_cnt; 1318 attr->rnr_retry = cmd.rnr_retry; 1319 attr->alt_port_num = cmd.alt_port_num; 1320 attr->alt_timeout = cmd.alt_timeout; 1321 1322 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 1323 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 1324 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 1325 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 1326 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 1327 attr->ah_attr.dlid = cmd.dest.dlid; 1328 attr->ah_attr.sl = cmd.dest.sl; 1329 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 1330 attr->ah_attr.static_rate = cmd.dest.static_rate; 1331 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 1332 attr->ah_attr.port_num = cmd.dest.port_num; 1333 1334 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 1335 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 1336 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 1337 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 1338 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 1339 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 1340 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 1341 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 1342 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 1343 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 1344 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 1345 1346 ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata); 1347 1348 put_qp_read(qp); 1349 1350 if (ret) 1351 goto out; 1352 1353 ret = in_len; 1354 1355 out: 1356 kfree(attr); 1357 1358 return ret; 1359 } 1360 1361 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 1362 const char __user *buf, int in_len, 1363 int out_len) 1364 { 1365 struct ib_uverbs_destroy_qp cmd; 1366 struct ib_uverbs_destroy_qp_resp resp; 1367 struct ib_uobject *uobj; 1368 struct ib_qp *qp; 1369 struct ib_uqp_object *obj; 1370 int ret = -EINVAL; 1371 1372 if (copy_from_user(&cmd, buf, sizeof cmd)) 1373 return -EFAULT; 1374 1375 memset(&resp, 0, sizeof resp); 1376 1377 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 1378 if (!uobj) 1379 return -EINVAL; 1380 qp = uobj->object; 1381 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 1382 1383 if (!list_empty(&obj->mcast_list)) { 1384 put_uobj_write(uobj); 1385 return -EBUSY; 1386 } 1387 1388 ret = ib_destroy_qp(qp); 1389 if (!ret) 1390 uobj->live = 0; 1391 1392 put_uobj_write(uobj); 1393 1394 if (ret) 1395 return ret; 1396 1397 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 1398 1399 mutex_lock(&file->mutex); 1400 list_del(&uobj->list); 1401 mutex_unlock(&file->mutex); 1402 1403 ib_uverbs_release_uevent(file, &obj->uevent); 1404 1405 resp.events_reported = obj->uevent.events_reported; 1406 1407 put_uobj(uobj); 1408 1409 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1410 &resp, sizeof resp)) 1411 return -EFAULT; 1412 1413 return in_len; 1414 } 1415 1416 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 1417 const char __user *buf, int in_len, 1418 int out_len) 1419 { 1420 struct ib_uverbs_post_send cmd; 1421 struct ib_uverbs_post_send_resp resp; 1422 struct ib_uverbs_send_wr *user_wr; 1423 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 1424 struct ib_qp *qp; 1425 int i, sg_ind; 1426 int is_ud; 1427 ssize_t ret = -EINVAL; 1428 1429 if (copy_from_user(&cmd, buf, sizeof cmd)) 1430 return -EFAULT; 1431 1432 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 1433 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 1434 return -EINVAL; 1435 1436 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 1437 return -EINVAL; 1438 1439 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 1440 if (!user_wr) 1441 return -ENOMEM; 1442 1443 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1444 if (!qp) 1445 goto out; 1446 1447 is_ud = qp->qp_type == IB_QPT_UD; 1448 sg_ind = 0; 1449 last = NULL; 1450 for (i = 0; i < cmd.wr_count; ++i) { 1451 if (copy_from_user(user_wr, 1452 buf + sizeof cmd + i * cmd.wqe_size, 1453 cmd.wqe_size)) { 1454 ret = -EFAULT; 1455 goto out_put; 1456 } 1457 1458 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 1459 ret = -EINVAL; 1460 goto out_put; 1461 } 1462 1463 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 1464 user_wr->num_sge * sizeof (struct ib_sge), 1465 GFP_KERNEL); 1466 if (!next) { 1467 ret = -ENOMEM; 1468 goto out_put; 1469 } 1470 1471 if (!last) 1472 wr = next; 1473 else 1474 last->next = next; 1475 last = next; 1476 1477 next->next = NULL; 1478 next->wr_id = user_wr->wr_id; 1479 next->num_sge = user_wr->num_sge; 1480 next->opcode = user_wr->opcode; 1481 next->send_flags = user_wr->send_flags; 1482 1483 if (is_ud) { 1484 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, 1485 file->ucontext); 1486 if (!next->wr.ud.ah) { 1487 ret = -EINVAL; 1488 goto out_put; 1489 } 1490 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; 1491 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 1492 } else { 1493 switch (next->opcode) { 1494 case IB_WR_RDMA_WRITE_WITH_IMM: 1495 next->ex.imm_data = 1496 (__be32 __force) user_wr->ex.imm_data; 1497 case IB_WR_RDMA_WRITE: 1498 case IB_WR_RDMA_READ: 1499 next->wr.rdma.remote_addr = 1500 user_wr->wr.rdma.remote_addr; 1501 next->wr.rdma.rkey = 1502 user_wr->wr.rdma.rkey; 1503 break; 1504 case IB_WR_SEND_WITH_IMM: 1505 next->ex.imm_data = 1506 (__be32 __force) user_wr->ex.imm_data; 1507 break; 1508 case IB_WR_SEND_WITH_INV: 1509 next->ex.invalidate_rkey = 1510 user_wr->ex.invalidate_rkey; 1511 break; 1512 case IB_WR_ATOMIC_CMP_AND_SWP: 1513 case IB_WR_ATOMIC_FETCH_AND_ADD: 1514 next->wr.atomic.remote_addr = 1515 user_wr->wr.atomic.remote_addr; 1516 next->wr.atomic.compare_add = 1517 user_wr->wr.atomic.compare_add; 1518 next->wr.atomic.swap = user_wr->wr.atomic.swap; 1519 next->wr.atomic.rkey = user_wr->wr.atomic.rkey; 1520 break; 1521 default: 1522 break; 1523 } 1524 } 1525 1526 if (next->num_sge) { 1527 next->sg_list = (void *) next + 1528 ALIGN(sizeof *next, sizeof (struct ib_sge)); 1529 if (copy_from_user(next->sg_list, 1530 buf + sizeof cmd + 1531 cmd.wr_count * cmd.wqe_size + 1532 sg_ind * sizeof (struct ib_sge), 1533 next->num_sge * sizeof (struct ib_sge))) { 1534 ret = -EFAULT; 1535 goto out_put; 1536 } 1537 sg_ind += next->num_sge; 1538 } else 1539 next->sg_list = NULL; 1540 } 1541 1542 resp.bad_wr = 0; 1543 ret = qp->device->post_send(qp, wr, &bad_wr); 1544 if (ret) 1545 for (next = wr; next; next = next->next) { 1546 ++resp.bad_wr; 1547 if (next == bad_wr) 1548 break; 1549 } 1550 1551 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1552 &resp, sizeof resp)) 1553 ret = -EFAULT; 1554 1555 out_put: 1556 put_qp_read(qp); 1557 1558 while (wr) { 1559 if (is_ud && wr->wr.ud.ah) 1560 put_ah_read(wr->wr.ud.ah); 1561 next = wr->next; 1562 kfree(wr); 1563 wr = next; 1564 } 1565 1566 out: 1567 kfree(user_wr); 1568 1569 return ret ? ret : in_len; 1570 } 1571 1572 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 1573 int in_len, 1574 u32 wr_count, 1575 u32 sge_count, 1576 u32 wqe_size) 1577 { 1578 struct ib_uverbs_recv_wr *user_wr; 1579 struct ib_recv_wr *wr = NULL, *last, *next; 1580 int sg_ind; 1581 int i; 1582 int ret; 1583 1584 if (in_len < wqe_size * wr_count + 1585 sge_count * sizeof (struct ib_uverbs_sge)) 1586 return ERR_PTR(-EINVAL); 1587 1588 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 1589 return ERR_PTR(-EINVAL); 1590 1591 user_wr = kmalloc(wqe_size, GFP_KERNEL); 1592 if (!user_wr) 1593 return ERR_PTR(-ENOMEM); 1594 1595 sg_ind = 0; 1596 last = NULL; 1597 for (i = 0; i < wr_count; ++i) { 1598 if (copy_from_user(user_wr, buf + i * wqe_size, 1599 wqe_size)) { 1600 ret = -EFAULT; 1601 goto err; 1602 } 1603 1604 if (user_wr->num_sge + sg_ind > sge_count) { 1605 ret = -EINVAL; 1606 goto err; 1607 } 1608 1609 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 1610 user_wr->num_sge * sizeof (struct ib_sge), 1611 GFP_KERNEL); 1612 if (!next) { 1613 ret = -ENOMEM; 1614 goto err; 1615 } 1616 1617 if (!last) 1618 wr = next; 1619 else 1620 last->next = next; 1621 last = next; 1622 1623 next->next = NULL; 1624 next->wr_id = user_wr->wr_id; 1625 next->num_sge = user_wr->num_sge; 1626 1627 if (next->num_sge) { 1628 next->sg_list = (void *) next + 1629 ALIGN(sizeof *next, sizeof (struct ib_sge)); 1630 if (copy_from_user(next->sg_list, 1631 buf + wr_count * wqe_size + 1632 sg_ind * sizeof (struct ib_sge), 1633 next->num_sge * sizeof (struct ib_sge))) { 1634 ret = -EFAULT; 1635 goto err; 1636 } 1637 sg_ind += next->num_sge; 1638 } else 1639 next->sg_list = NULL; 1640 } 1641 1642 kfree(user_wr); 1643 return wr; 1644 1645 err: 1646 kfree(user_wr); 1647 1648 while (wr) { 1649 next = wr->next; 1650 kfree(wr); 1651 wr = next; 1652 } 1653 1654 return ERR_PTR(ret); 1655 } 1656 1657 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 1658 const char __user *buf, int in_len, 1659 int out_len) 1660 { 1661 struct ib_uverbs_post_recv cmd; 1662 struct ib_uverbs_post_recv_resp resp; 1663 struct ib_recv_wr *wr, *next, *bad_wr; 1664 struct ib_qp *qp; 1665 ssize_t ret = -EINVAL; 1666 1667 if (copy_from_user(&cmd, buf, sizeof cmd)) 1668 return -EFAULT; 1669 1670 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 1671 in_len - sizeof cmd, cmd.wr_count, 1672 cmd.sge_count, cmd.wqe_size); 1673 if (IS_ERR(wr)) 1674 return PTR_ERR(wr); 1675 1676 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1677 if (!qp) 1678 goto out; 1679 1680 resp.bad_wr = 0; 1681 ret = qp->device->post_recv(qp, wr, &bad_wr); 1682 1683 put_qp_read(qp); 1684 1685 if (ret) 1686 for (next = wr; next; next = next->next) { 1687 ++resp.bad_wr; 1688 if (next == bad_wr) 1689 break; 1690 } 1691 1692 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1693 &resp, sizeof resp)) 1694 ret = -EFAULT; 1695 1696 out: 1697 while (wr) { 1698 next = wr->next; 1699 kfree(wr); 1700 wr = next; 1701 } 1702 1703 return ret ? ret : in_len; 1704 } 1705 1706 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 1707 const char __user *buf, int in_len, 1708 int out_len) 1709 { 1710 struct ib_uverbs_post_srq_recv cmd; 1711 struct ib_uverbs_post_srq_recv_resp resp; 1712 struct ib_recv_wr *wr, *next, *bad_wr; 1713 struct ib_srq *srq; 1714 ssize_t ret = -EINVAL; 1715 1716 if (copy_from_user(&cmd, buf, sizeof cmd)) 1717 return -EFAULT; 1718 1719 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 1720 in_len - sizeof cmd, cmd.wr_count, 1721 cmd.sge_count, cmd.wqe_size); 1722 if (IS_ERR(wr)) 1723 return PTR_ERR(wr); 1724 1725 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 1726 if (!srq) 1727 goto out; 1728 1729 resp.bad_wr = 0; 1730 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 1731 1732 put_srq_read(srq); 1733 1734 if (ret) 1735 for (next = wr; next; next = next->next) { 1736 ++resp.bad_wr; 1737 if (next == bad_wr) 1738 break; 1739 } 1740 1741 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1742 &resp, sizeof resp)) 1743 ret = -EFAULT; 1744 1745 out: 1746 while (wr) { 1747 next = wr->next; 1748 kfree(wr); 1749 wr = next; 1750 } 1751 1752 return ret ? ret : in_len; 1753 } 1754 1755 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 1756 const char __user *buf, int in_len, 1757 int out_len) 1758 { 1759 struct ib_uverbs_create_ah cmd; 1760 struct ib_uverbs_create_ah_resp resp; 1761 struct ib_uobject *uobj; 1762 struct ib_pd *pd; 1763 struct ib_ah *ah; 1764 struct ib_ah_attr attr; 1765 int ret; 1766 1767 if (out_len < sizeof resp) 1768 return -ENOSPC; 1769 1770 if (copy_from_user(&cmd, buf, sizeof cmd)) 1771 return -EFAULT; 1772 1773 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 1774 if (!uobj) 1775 return -ENOMEM; 1776 1777 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key); 1778 down_write(&uobj->mutex); 1779 1780 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1781 if (!pd) { 1782 ret = -EINVAL; 1783 goto err; 1784 } 1785 1786 attr.dlid = cmd.attr.dlid; 1787 attr.sl = cmd.attr.sl; 1788 attr.src_path_bits = cmd.attr.src_path_bits; 1789 attr.static_rate = cmd.attr.static_rate; 1790 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 1791 attr.port_num = cmd.attr.port_num; 1792 attr.grh.flow_label = cmd.attr.grh.flow_label; 1793 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 1794 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 1795 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 1796 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 1797 1798 ah = ib_create_ah(pd, &attr); 1799 if (IS_ERR(ah)) { 1800 ret = PTR_ERR(ah); 1801 goto err_put; 1802 } 1803 1804 ah->uobject = uobj; 1805 uobj->object = ah; 1806 1807 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 1808 if (ret) 1809 goto err_destroy; 1810 1811 resp.ah_handle = uobj->id; 1812 1813 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1814 &resp, sizeof resp)) { 1815 ret = -EFAULT; 1816 goto err_copy; 1817 } 1818 1819 put_pd_read(pd); 1820 1821 mutex_lock(&file->mutex); 1822 list_add_tail(&uobj->list, &file->ucontext->ah_list); 1823 mutex_unlock(&file->mutex); 1824 1825 uobj->live = 1; 1826 1827 up_write(&uobj->mutex); 1828 1829 return in_len; 1830 1831 err_copy: 1832 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 1833 1834 err_destroy: 1835 ib_destroy_ah(ah); 1836 1837 err_put: 1838 put_pd_read(pd); 1839 1840 err: 1841 put_uobj_write(uobj); 1842 return ret; 1843 } 1844 1845 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 1846 const char __user *buf, int in_len, int out_len) 1847 { 1848 struct ib_uverbs_destroy_ah cmd; 1849 struct ib_ah *ah; 1850 struct ib_uobject *uobj; 1851 int ret; 1852 1853 if (copy_from_user(&cmd, buf, sizeof cmd)) 1854 return -EFAULT; 1855 1856 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 1857 if (!uobj) 1858 return -EINVAL; 1859 ah = uobj->object; 1860 1861 ret = ib_destroy_ah(ah); 1862 if (!ret) 1863 uobj->live = 0; 1864 1865 put_uobj_write(uobj); 1866 1867 if (ret) 1868 return ret; 1869 1870 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 1871 1872 mutex_lock(&file->mutex); 1873 list_del(&uobj->list); 1874 mutex_unlock(&file->mutex); 1875 1876 put_uobj(uobj); 1877 1878 return in_len; 1879 } 1880 1881 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 1882 const char __user *buf, int in_len, 1883 int out_len) 1884 { 1885 struct ib_uverbs_attach_mcast cmd; 1886 struct ib_qp *qp; 1887 struct ib_uqp_object *obj; 1888 struct ib_uverbs_mcast_entry *mcast; 1889 int ret; 1890 1891 if (copy_from_user(&cmd, buf, sizeof cmd)) 1892 return -EFAULT; 1893 1894 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1895 if (!qp) 1896 return -EINVAL; 1897 1898 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1899 1900 list_for_each_entry(mcast, &obj->mcast_list, list) 1901 if (cmd.mlid == mcast->lid && 1902 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 1903 ret = 0; 1904 goto out_put; 1905 } 1906 1907 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 1908 if (!mcast) { 1909 ret = -ENOMEM; 1910 goto out_put; 1911 } 1912 1913 mcast->lid = cmd.mlid; 1914 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 1915 1916 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 1917 if (!ret) 1918 list_add_tail(&mcast->list, &obj->mcast_list); 1919 else 1920 kfree(mcast); 1921 1922 out_put: 1923 put_qp_read(qp); 1924 1925 return ret ? ret : in_len; 1926 } 1927 1928 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 1929 const char __user *buf, int in_len, 1930 int out_len) 1931 { 1932 struct ib_uverbs_detach_mcast cmd; 1933 struct ib_uqp_object *obj; 1934 struct ib_qp *qp; 1935 struct ib_uverbs_mcast_entry *mcast; 1936 int ret = -EINVAL; 1937 1938 if (copy_from_user(&cmd, buf, sizeof cmd)) 1939 return -EFAULT; 1940 1941 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1942 if (!qp) 1943 return -EINVAL; 1944 1945 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1946 if (ret) 1947 goto out_put; 1948 1949 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1950 1951 list_for_each_entry(mcast, &obj->mcast_list, list) 1952 if (cmd.mlid == mcast->lid && 1953 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 1954 list_del(&mcast->list); 1955 kfree(mcast); 1956 break; 1957 } 1958 1959 out_put: 1960 put_qp_read(qp); 1961 1962 return ret ? ret : in_len; 1963 } 1964 1965 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 1966 const char __user *buf, int in_len, 1967 int out_len) 1968 { 1969 struct ib_uverbs_create_srq cmd; 1970 struct ib_uverbs_create_srq_resp resp; 1971 struct ib_udata udata; 1972 struct ib_uevent_object *obj; 1973 struct ib_pd *pd; 1974 struct ib_srq *srq; 1975 struct ib_srq_init_attr attr; 1976 int ret; 1977 1978 if (out_len < sizeof resp) 1979 return -ENOSPC; 1980 1981 if (copy_from_user(&cmd, buf, sizeof cmd)) 1982 return -EFAULT; 1983 1984 INIT_UDATA(&udata, buf + sizeof cmd, 1985 (unsigned long) cmd.response + sizeof resp, 1986 in_len - sizeof cmd, out_len - sizeof resp); 1987 1988 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1989 if (!obj) 1990 return -ENOMEM; 1991 1992 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key); 1993 down_write(&obj->uobject.mutex); 1994 1995 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1996 if (!pd) { 1997 ret = -EINVAL; 1998 goto err; 1999 } 2000 2001 attr.event_handler = ib_uverbs_srq_event_handler; 2002 attr.srq_context = file; 2003 attr.attr.max_wr = cmd.max_wr; 2004 attr.attr.max_sge = cmd.max_sge; 2005 attr.attr.srq_limit = cmd.srq_limit; 2006 2007 obj->events_reported = 0; 2008 INIT_LIST_HEAD(&obj->event_list); 2009 2010 srq = pd->device->create_srq(pd, &attr, &udata); 2011 if (IS_ERR(srq)) { 2012 ret = PTR_ERR(srq); 2013 goto err_put; 2014 } 2015 2016 srq->device = pd->device; 2017 srq->pd = pd; 2018 srq->uobject = &obj->uobject; 2019 srq->event_handler = attr.event_handler; 2020 srq->srq_context = attr.srq_context; 2021 atomic_inc(&pd->usecnt); 2022 atomic_set(&srq->usecnt, 0); 2023 2024 obj->uobject.object = srq; 2025 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uobject); 2026 if (ret) 2027 goto err_destroy; 2028 2029 memset(&resp, 0, sizeof resp); 2030 resp.srq_handle = obj->uobject.id; 2031 resp.max_wr = attr.attr.max_wr; 2032 resp.max_sge = attr.attr.max_sge; 2033 2034 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2035 &resp, sizeof resp)) { 2036 ret = -EFAULT; 2037 goto err_copy; 2038 } 2039 2040 put_pd_read(pd); 2041 2042 mutex_lock(&file->mutex); 2043 list_add_tail(&obj->uobject.list, &file->ucontext->srq_list); 2044 mutex_unlock(&file->mutex); 2045 2046 obj->uobject.live = 1; 2047 2048 up_write(&obj->uobject.mutex); 2049 2050 return in_len; 2051 2052 err_copy: 2053 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uobject); 2054 2055 err_destroy: 2056 ib_destroy_srq(srq); 2057 2058 err_put: 2059 put_pd_read(pd); 2060 2061 err: 2062 put_uobj_write(&obj->uobject); 2063 return ret; 2064 } 2065 2066 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 2067 const char __user *buf, int in_len, 2068 int out_len) 2069 { 2070 struct ib_uverbs_modify_srq cmd; 2071 struct ib_udata udata; 2072 struct ib_srq *srq; 2073 struct ib_srq_attr attr; 2074 int ret; 2075 2076 if (copy_from_user(&cmd, buf, sizeof cmd)) 2077 return -EFAULT; 2078 2079 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 2080 out_len); 2081 2082 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2083 if (!srq) 2084 return -EINVAL; 2085 2086 attr.max_wr = cmd.max_wr; 2087 attr.srq_limit = cmd.srq_limit; 2088 2089 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 2090 2091 put_srq_read(srq); 2092 2093 return ret ? ret : in_len; 2094 } 2095 2096 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 2097 const char __user *buf, 2098 int in_len, int out_len) 2099 { 2100 struct ib_uverbs_query_srq cmd; 2101 struct ib_uverbs_query_srq_resp resp; 2102 struct ib_srq_attr attr; 2103 struct ib_srq *srq; 2104 int ret; 2105 2106 if (out_len < sizeof resp) 2107 return -ENOSPC; 2108 2109 if (copy_from_user(&cmd, buf, sizeof cmd)) 2110 return -EFAULT; 2111 2112 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2113 if (!srq) 2114 return -EINVAL; 2115 2116 ret = ib_query_srq(srq, &attr); 2117 2118 put_srq_read(srq); 2119 2120 if (ret) 2121 return ret; 2122 2123 memset(&resp, 0, sizeof resp); 2124 2125 resp.max_wr = attr.max_wr; 2126 resp.max_sge = attr.max_sge; 2127 resp.srq_limit = attr.srq_limit; 2128 2129 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2130 &resp, sizeof resp)) 2131 return -EFAULT; 2132 2133 return in_len; 2134 } 2135 2136 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 2137 const char __user *buf, int in_len, 2138 int out_len) 2139 { 2140 struct ib_uverbs_destroy_srq cmd; 2141 struct ib_uverbs_destroy_srq_resp resp; 2142 struct ib_uobject *uobj; 2143 struct ib_srq *srq; 2144 struct ib_uevent_object *obj; 2145 int ret = -EINVAL; 2146 2147 if (copy_from_user(&cmd, buf, sizeof cmd)) 2148 return -EFAULT; 2149 2150 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 2151 if (!uobj) 2152 return -EINVAL; 2153 srq = uobj->object; 2154 obj = container_of(uobj, struct ib_uevent_object, uobject); 2155 2156 ret = ib_destroy_srq(srq); 2157 if (!ret) 2158 uobj->live = 0; 2159 2160 put_uobj_write(uobj); 2161 2162 if (ret) 2163 return ret; 2164 2165 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 2166 2167 mutex_lock(&file->mutex); 2168 list_del(&uobj->list); 2169 mutex_unlock(&file->mutex); 2170 2171 ib_uverbs_release_uevent(file, obj); 2172 2173 memset(&resp, 0, sizeof resp); 2174 resp.events_reported = obj->events_reported; 2175 2176 put_uobj(uobj); 2177 2178 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2179 &resp, sizeof resp)) 2180 ret = -EFAULT; 2181 2182 return ret ? ret : in_len; 2183 } 2184