1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 * 35 * $Id: uverbs_cmd.c 2708 2005-06-24 17:27:21Z roland $ 36 */ 37 38 #include <linux/file.h> 39 #include <linux/fs.h> 40 41 #include <asm/uaccess.h> 42 43 #include "uverbs.h" 44 45 static struct lock_class_key pd_lock_key; 46 static struct lock_class_key mr_lock_key; 47 static struct lock_class_key cq_lock_key; 48 static struct lock_class_key qp_lock_key; 49 static struct lock_class_key ah_lock_key; 50 static struct lock_class_key srq_lock_key; 51 52 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 53 do { \ 54 (udata)->inbuf = (void __user *) (ibuf); \ 55 (udata)->outbuf = (void __user *) (obuf); \ 56 (udata)->inlen = (ilen); \ 57 (udata)->outlen = (olen); \ 58 } while (0) 59 60 /* 61 * The ib_uobject locking scheme is as follows: 62 * 63 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 64 * needs to be held during all idr operations. When an object is 65 * looked up, a reference must be taken on the object's kref before 66 * dropping this lock. 67 * 68 * - Each object also has an rwsem. This rwsem must be held for 69 * reading while an operation that uses the object is performed. 70 * For example, while registering an MR, the associated PD's 71 * uobject.mutex must be held for reading. The rwsem must be held 72 * for writing while initializing or destroying an object. 73 * 74 * - In addition, each object has a "live" flag. If this flag is not 75 * set, then lookups of the object will fail even if it is found in 76 * the idr. This handles a reader that blocks and does not acquire 77 * the rwsem until after the object is destroyed. The destroy 78 * operation will set the live flag to 0 and then drop the rwsem; 79 * this will allow the reader to acquire the rwsem, see that the 80 * live flag is 0, and then drop the rwsem and its reference to 81 * object. The underlying storage will not be freed until the last 82 * reference to the object is dropped. 83 */ 84 85 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 86 struct ib_ucontext *context, struct lock_class_key *key) 87 { 88 uobj->user_handle = user_handle; 89 uobj->context = context; 90 kref_init(&uobj->ref); 91 init_rwsem(&uobj->mutex); 92 lockdep_set_class(&uobj->mutex, key); 93 uobj->live = 0; 94 } 95 96 static void release_uobj(struct kref *kref) 97 { 98 kfree(container_of(kref, struct ib_uobject, ref)); 99 } 100 101 static void put_uobj(struct ib_uobject *uobj) 102 { 103 kref_put(&uobj->ref, release_uobj); 104 } 105 106 static void put_uobj_read(struct ib_uobject *uobj) 107 { 108 up_read(&uobj->mutex); 109 put_uobj(uobj); 110 } 111 112 static void put_uobj_write(struct ib_uobject *uobj) 113 { 114 up_write(&uobj->mutex); 115 put_uobj(uobj); 116 } 117 118 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 119 { 120 int ret; 121 122 retry: 123 if (!idr_pre_get(idr, GFP_KERNEL)) 124 return -ENOMEM; 125 126 spin_lock(&ib_uverbs_idr_lock); 127 ret = idr_get_new(idr, uobj, &uobj->id); 128 spin_unlock(&ib_uverbs_idr_lock); 129 130 if (ret == -EAGAIN) 131 goto retry; 132 133 return ret; 134 } 135 136 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 137 { 138 spin_lock(&ib_uverbs_idr_lock); 139 idr_remove(idr, uobj->id); 140 spin_unlock(&ib_uverbs_idr_lock); 141 } 142 143 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 144 struct ib_ucontext *context) 145 { 146 struct ib_uobject *uobj; 147 148 spin_lock(&ib_uverbs_idr_lock); 149 uobj = idr_find(idr, id); 150 if (uobj) 151 kref_get(&uobj->ref); 152 spin_unlock(&ib_uverbs_idr_lock); 153 154 return uobj; 155 } 156 157 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 158 struct ib_ucontext *context, int nested) 159 { 160 struct ib_uobject *uobj; 161 162 uobj = __idr_get_uobj(idr, id, context); 163 if (!uobj) 164 return NULL; 165 166 if (nested) 167 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 168 else 169 down_read(&uobj->mutex); 170 if (!uobj->live) { 171 put_uobj_read(uobj); 172 return NULL; 173 } 174 175 return uobj; 176 } 177 178 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 179 struct ib_ucontext *context) 180 { 181 struct ib_uobject *uobj; 182 183 uobj = __idr_get_uobj(idr, id, context); 184 if (!uobj) 185 return NULL; 186 187 down_write(&uobj->mutex); 188 if (!uobj->live) { 189 put_uobj_write(uobj); 190 return NULL; 191 } 192 193 return uobj; 194 } 195 196 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 197 int nested) 198 { 199 struct ib_uobject *uobj; 200 201 uobj = idr_read_uobj(idr, id, context, nested); 202 return uobj ? uobj->object : NULL; 203 } 204 205 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 206 { 207 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 208 } 209 210 static void put_pd_read(struct ib_pd *pd) 211 { 212 put_uobj_read(pd->uobject); 213 } 214 215 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 216 { 217 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 218 } 219 220 static void put_cq_read(struct ib_cq *cq) 221 { 222 put_uobj_read(cq->uobject); 223 } 224 225 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 226 { 227 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 228 } 229 230 static void put_ah_read(struct ib_ah *ah) 231 { 232 put_uobj_read(ah->uobject); 233 } 234 235 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 236 { 237 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 238 } 239 240 static void put_qp_read(struct ib_qp *qp) 241 { 242 put_uobj_read(qp->uobject); 243 } 244 245 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 246 { 247 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 248 } 249 250 static void put_srq_read(struct ib_srq *srq) 251 { 252 put_uobj_read(srq->uobject); 253 } 254 255 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 256 const char __user *buf, 257 int in_len, int out_len) 258 { 259 struct ib_uverbs_get_context cmd; 260 struct ib_uverbs_get_context_resp resp; 261 struct ib_udata udata; 262 struct ib_device *ibdev = file->device->ib_dev; 263 struct ib_ucontext *ucontext; 264 struct file *filp; 265 int ret; 266 267 if (out_len < sizeof resp) 268 return -ENOSPC; 269 270 if (copy_from_user(&cmd, buf, sizeof cmd)) 271 return -EFAULT; 272 273 mutex_lock(&file->mutex); 274 275 if (file->ucontext) { 276 ret = -EINVAL; 277 goto err; 278 } 279 280 INIT_UDATA(&udata, buf + sizeof cmd, 281 (unsigned long) cmd.response + sizeof resp, 282 in_len - sizeof cmd, out_len - sizeof resp); 283 284 ucontext = ibdev->alloc_ucontext(ibdev, &udata); 285 if (IS_ERR(ucontext)) { 286 ret = PTR_ERR(file->ucontext); 287 goto err; 288 } 289 290 ucontext->device = ibdev; 291 INIT_LIST_HEAD(&ucontext->pd_list); 292 INIT_LIST_HEAD(&ucontext->mr_list); 293 INIT_LIST_HEAD(&ucontext->mw_list); 294 INIT_LIST_HEAD(&ucontext->cq_list); 295 INIT_LIST_HEAD(&ucontext->qp_list); 296 INIT_LIST_HEAD(&ucontext->srq_list); 297 INIT_LIST_HEAD(&ucontext->ah_list); 298 299 resp.num_comp_vectors = file->device->num_comp_vectors; 300 301 filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd); 302 if (IS_ERR(filp)) { 303 ret = PTR_ERR(filp); 304 goto err_free; 305 } 306 307 if (copy_to_user((void __user *) (unsigned long) cmd.response, 308 &resp, sizeof resp)) { 309 ret = -EFAULT; 310 goto err_file; 311 } 312 313 file->async_file = filp->private_data; 314 315 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev, 316 ib_uverbs_event_handler); 317 ret = ib_register_event_handler(&file->event_handler); 318 if (ret) 319 goto err_file; 320 321 kref_get(&file->async_file->ref); 322 kref_get(&file->ref); 323 file->ucontext = ucontext; 324 325 fd_install(resp.async_fd, filp); 326 327 mutex_unlock(&file->mutex); 328 329 return in_len; 330 331 err_file: 332 put_unused_fd(resp.async_fd); 333 fput(filp); 334 335 err_free: 336 ibdev->dealloc_ucontext(ucontext); 337 338 err: 339 mutex_unlock(&file->mutex); 340 return ret; 341 } 342 343 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 344 const char __user *buf, 345 int in_len, int out_len) 346 { 347 struct ib_uverbs_query_device cmd; 348 struct ib_uverbs_query_device_resp resp; 349 struct ib_device_attr attr; 350 int ret; 351 352 if (out_len < sizeof resp) 353 return -ENOSPC; 354 355 if (copy_from_user(&cmd, buf, sizeof cmd)) 356 return -EFAULT; 357 358 ret = ib_query_device(file->device->ib_dev, &attr); 359 if (ret) 360 return ret; 361 362 memset(&resp, 0, sizeof resp); 363 364 resp.fw_ver = attr.fw_ver; 365 resp.node_guid = file->device->ib_dev->node_guid; 366 resp.sys_image_guid = attr.sys_image_guid; 367 resp.max_mr_size = attr.max_mr_size; 368 resp.page_size_cap = attr.page_size_cap; 369 resp.vendor_id = attr.vendor_id; 370 resp.vendor_part_id = attr.vendor_part_id; 371 resp.hw_ver = attr.hw_ver; 372 resp.max_qp = attr.max_qp; 373 resp.max_qp_wr = attr.max_qp_wr; 374 resp.device_cap_flags = attr.device_cap_flags; 375 resp.max_sge = attr.max_sge; 376 resp.max_sge_rd = attr.max_sge_rd; 377 resp.max_cq = attr.max_cq; 378 resp.max_cqe = attr.max_cqe; 379 resp.max_mr = attr.max_mr; 380 resp.max_pd = attr.max_pd; 381 resp.max_qp_rd_atom = attr.max_qp_rd_atom; 382 resp.max_ee_rd_atom = attr.max_ee_rd_atom; 383 resp.max_res_rd_atom = attr.max_res_rd_atom; 384 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; 385 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; 386 resp.atomic_cap = attr.atomic_cap; 387 resp.max_ee = attr.max_ee; 388 resp.max_rdd = attr.max_rdd; 389 resp.max_mw = attr.max_mw; 390 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; 391 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; 392 resp.max_mcast_grp = attr.max_mcast_grp; 393 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; 394 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; 395 resp.max_ah = attr.max_ah; 396 resp.max_fmr = attr.max_fmr; 397 resp.max_map_per_fmr = attr.max_map_per_fmr; 398 resp.max_srq = attr.max_srq; 399 resp.max_srq_wr = attr.max_srq_wr; 400 resp.max_srq_sge = attr.max_srq_sge; 401 resp.max_pkeys = attr.max_pkeys; 402 resp.local_ca_ack_delay = attr.local_ca_ack_delay; 403 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; 404 405 if (copy_to_user((void __user *) (unsigned long) cmd.response, 406 &resp, sizeof resp)) 407 return -EFAULT; 408 409 return in_len; 410 } 411 412 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 413 const char __user *buf, 414 int in_len, int out_len) 415 { 416 struct ib_uverbs_query_port cmd; 417 struct ib_uverbs_query_port_resp resp; 418 struct ib_port_attr attr; 419 int ret; 420 421 if (out_len < sizeof resp) 422 return -ENOSPC; 423 424 if (copy_from_user(&cmd, buf, sizeof cmd)) 425 return -EFAULT; 426 427 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr); 428 if (ret) 429 return ret; 430 431 memset(&resp, 0, sizeof resp); 432 433 resp.state = attr.state; 434 resp.max_mtu = attr.max_mtu; 435 resp.active_mtu = attr.active_mtu; 436 resp.gid_tbl_len = attr.gid_tbl_len; 437 resp.port_cap_flags = attr.port_cap_flags; 438 resp.max_msg_sz = attr.max_msg_sz; 439 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 440 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 441 resp.pkey_tbl_len = attr.pkey_tbl_len; 442 resp.lid = attr.lid; 443 resp.sm_lid = attr.sm_lid; 444 resp.lmc = attr.lmc; 445 resp.max_vl_num = attr.max_vl_num; 446 resp.sm_sl = attr.sm_sl; 447 resp.subnet_timeout = attr.subnet_timeout; 448 resp.init_type_reply = attr.init_type_reply; 449 resp.active_width = attr.active_width; 450 resp.active_speed = attr.active_speed; 451 resp.phys_state = attr.phys_state; 452 453 if (copy_to_user((void __user *) (unsigned long) cmd.response, 454 &resp, sizeof resp)) 455 return -EFAULT; 456 457 return in_len; 458 } 459 460 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 461 const char __user *buf, 462 int in_len, int out_len) 463 { 464 struct ib_uverbs_alloc_pd cmd; 465 struct ib_uverbs_alloc_pd_resp resp; 466 struct ib_udata udata; 467 struct ib_uobject *uobj; 468 struct ib_pd *pd; 469 int ret; 470 471 if (out_len < sizeof resp) 472 return -ENOSPC; 473 474 if (copy_from_user(&cmd, buf, sizeof cmd)) 475 return -EFAULT; 476 477 INIT_UDATA(&udata, buf + sizeof cmd, 478 (unsigned long) cmd.response + sizeof resp, 479 in_len - sizeof cmd, out_len - sizeof resp); 480 481 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 482 if (!uobj) 483 return -ENOMEM; 484 485 init_uobj(uobj, 0, file->ucontext, &pd_lock_key); 486 down_write(&uobj->mutex); 487 488 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 489 file->ucontext, &udata); 490 if (IS_ERR(pd)) { 491 ret = PTR_ERR(pd); 492 goto err; 493 } 494 495 pd->device = file->device->ib_dev; 496 pd->uobject = uobj; 497 atomic_set(&pd->usecnt, 0); 498 499 uobj->object = pd; 500 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 501 if (ret) 502 goto err_idr; 503 504 memset(&resp, 0, sizeof resp); 505 resp.pd_handle = uobj->id; 506 507 if (copy_to_user((void __user *) (unsigned long) cmd.response, 508 &resp, sizeof resp)) { 509 ret = -EFAULT; 510 goto err_copy; 511 } 512 513 mutex_lock(&file->mutex); 514 list_add_tail(&uobj->list, &file->ucontext->pd_list); 515 mutex_unlock(&file->mutex); 516 517 uobj->live = 1; 518 519 up_write(&uobj->mutex); 520 521 return in_len; 522 523 err_copy: 524 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 525 526 err_idr: 527 ib_dealloc_pd(pd); 528 529 err: 530 put_uobj_write(uobj); 531 return ret; 532 } 533 534 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 535 const char __user *buf, 536 int in_len, int out_len) 537 { 538 struct ib_uverbs_dealloc_pd cmd; 539 struct ib_uobject *uobj; 540 int ret; 541 542 if (copy_from_user(&cmd, buf, sizeof cmd)) 543 return -EFAULT; 544 545 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 546 if (!uobj) 547 return -EINVAL; 548 549 ret = ib_dealloc_pd(uobj->object); 550 if (!ret) 551 uobj->live = 0; 552 553 put_uobj_write(uobj); 554 555 if (ret) 556 return ret; 557 558 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 559 560 mutex_lock(&file->mutex); 561 list_del(&uobj->list); 562 mutex_unlock(&file->mutex); 563 564 put_uobj(uobj); 565 566 return in_len; 567 } 568 569 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 570 const char __user *buf, int in_len, 571 int out_len) 572 { 573 struct ib_uverbs_reg_mr cmd; 574 struct ib_uverbs_reg_mr_resp resp; 575 struct ib_udata udata; 576 struct ib_umem_object *obj; 577 struct ib_pd *pd; 578 struct ib_mr *mr; 579 int ret; 580 581 if (out_len < sizeof resp) 582 return -ENOSPC; 583 584 if (copy_from_user(&cmd, buf, sizeof cmd)) 585 return -EFAULT; 586 587 INIT_UDATA(&udata, buf + sizeof cmd, 588 (unsigned long) cmd.response + sizeof resp, 589 in_len - sizeof cmd, out_len - sizeof resp); 590 591 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 592 return -EINVAL; 593 594 /* 595 * Local write permission is required if remote write or 596 * remote atomic permission is also requested. 597 */ 598 if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 599 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE)) 600 return -EINVAL; 601 602 obj = kmalloc(sizeof *obj, GFP_KERNEL); 603 if (!obj) 604 return -ENOMEM; 605 606 init_uobj(&obj->uobject, 0, file->ucontext, &mr_lock_key); 607 down_write(&obj->uobject.mutex); 608 609 /* 610 * We ask for writable memory if any access flags other than 611 * "remote read" are set. "Local write" and "remote write" 612 * obviously require write access. "Remote atomic" can do 613 * things like fetch and add, which will modify memory, and 614 * "MW bind" can change permissions by binding a window. 615 */ 616 ret = ib_umem_get(file->device->ib_dev, &obj->umem, 617 (void *) (unsigned long) cmd.start, cmd.length, 618 !!(cmd.access_flags & ~IB_ACCESS_REMOTE_READ)); 619 if (ret) 620 goto err_free; 621 622 obj->umem.virt_base = cmd.hca_va; 623 624 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 625 if (!pd) { 626 ret = -EINVAL; 627 goto err_release; 628 } 629 630 mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata); 631 if (IS_ERR(mr)) { 632 ret = PTR_ERR(mr); 633 goto err_put; 634 } 635 636 mr->device = pd->device; 637 mr->pd = pd; 638 mr->uobject = &obj->uobject; 639 atomic_inc(&pd->usecnt); 640 atomic_set(&mr->usecnt, 0); 641 642 obj->uobject.object = mr; 643 ret = idr_add_uobj(&ib_uverbs_mr_idr, &obj->uobject); 644 if (ret) 645 goto err_unreg; 646 647 memset(&resp, 0, sizeof resp); 648 resp.lkey = mr->lkey; 649 resp.rkey = mr->rkey; 650 resp.mr_handle = obj->uobject.id; 651 652 if (copy_to_user((void __user *) (unsigned long) cmd.response, 653 &resp, sizeof resp)) { 654 ret = -EFAULT; 655 goto err_copy; 656 } 657 658 put_pd_read(pd); 659 660 mutex_lock(&file->mutex); 661 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list); 662 mutex_unlock(&file->mutex); 663 664 obj->uobject.live = 1; 665 666 up_write(&obj->uobject.mutex); 667 668 return in_len; 669 670 err_copy: 671 idr_remove_uobj(&ib_uverbs_mr_idr, &obj->uobject); 672 673 err_unreg: 674 ib_dereg_mr(mr); 675 676 err_put: 677 put_pd_read(pd); 678 679 err_release: 680 ib_umem_release(file->device->ib_dev, &obj->umem); 681 682 err_free: 683 put_uobj_write(&obj->uobject); 684 return ret; 685 } 686 687 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 688 const char __user *buf, int in_len, 689 int out_len) 690 { 691 struct ib_uverbs_dereg_mr cmd; 692 struct ib_mr *mr; 693 struct ib_uobject *uobj; 694 struct ib_umem_object *memobj; 695 int ret = -EINVAL; 696 697 if (copy_from_user(&cmd, buf, sizeof cmd)) 698 return -EFAULT; 699 700 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 701 if (!uobj) 702 return -EINVAL; 703 704 memobj = container_of(uobj, struct ib_umem_object, uobject); 705 mr = uobj->object; 706 707 ret = ib_dereg_mr(mr); 708 if (!ret) 709 uobj->live = 0; 710 711 put_uobj_write(uobj); 712 713 if (ret) 714 return ret; 715 716 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 717 718 mutex_lock(&file->mutex); 719 list_del(&uobj->list); 720 mutex_unlock(&file->mutex); 721 722 ib_umem_release(file->device->ib_dev, &memobj->umem); 723 724 put_uobj(uobj); 725 726 return in_len; 727 } 728 729 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 730 const char __user *buf, int in_len, 731 int out_len) 732 { 733 struct ib_uverbs_create_comp_channel cmd; 734 struct ib_uverbs_create_comp_channel_resp resp; 735 struct file *filp; 736 737 if (out_len < sizeof resp) 738 return -ENOSPC; 739 740 if (copy_from_user(&cmd, buf, sizeof cmd)) 741 return -EFAULT; 742 743 filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd); 744 if (IS_ERR(filp)) 745 return PTR_ERR(filp); 746 747 if (copy_to_user((void __user *) (unsigned long) cmd.response, 748 &resp, sizeof resp)) { 749 put_unused_fd(resp.fd); 750 fput(filp); 751 return -EFAULT; 752 } 753 754 fd_install(resp.fd, filp); 755 return in_len; 756 } 757 758 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 759 const char __user *buf, int in_len, 760 int out_len) 761 { 762 struct ib_uverbs_create_cq cmd; 763 struct ib_uverbs_create_cq_resp resp; 764 struct ib_udata udata; 765 struct ib_ucq_object *obj; 766 struct ib_uverbs_event_file *ev_file = NULL; 767 struct ib_cq *cq; 768 int ret; 769 770 if (out_len < sizeof resp) 771 return -ENOSPC; 772 773 if (copy_from_user(&cmd, buf, sizeof cmd)) 774 return -EFAULT; 775 776 INIT_UDATA(&udata, buf + sizeof cmd, 777 (unsigned long) cmd.response + sizeof resp, 778 in_len - sizeof cmd, out_len - sizeof resp); 779 780 if (cmd.comp_vector >= file->device->num_comp_vectors) 781 return -EINVAL; 782 783 obj = kmalloc(sizeof *obj, GFP_KERNEL); 784 if (!obj) 785 return -ENOMEM; 786 787 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key); 788 down_write(&obj->uobject.mutex); 789 790 if (cmd.comp_channel >= 0) { 791 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel); 792 if (!ev_file) { 793 ret = -EINVAL; 794 goto err; 795 } 796 } 797 798 obj->uverbs_file = file; 799 obj->comp_events_reported = 0; 800 obj->async_events_reported = 0; 801 INIT_LIST_HEAD(&obj->comp_list); 802 INIT_LIST_HEAD(&obj->async_list); 803 804 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 805 file->ucontext, &udata); 806 if (IS_ERR(cq)) { 807 ret = PTR_ERR(cq); 808 goto err_file; 809 } 810 811 cq->device = file->device->ib_dev; 812 cq->uobject = &obj->uobject; 813 cq->comp_handler = ib_uverbs_comp_handler; 814 cq->event_handler = ib_uverbs_cq_event_handler; 815 cq->cq_context = ev_file; 816 atomic_set(&cq->usecnt, 0); 817 818 obj->uobject.object = cq; 819 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 820 if (ret) 821 goto err_free; 822 823 memset(&resp, 0, sizeof resp); 824 resp.cq_handle = obj->uobject.id; 825 resp.cqe = cq->cqe; 826 827 if (copy_to_user((void __user *) (unsigned long) cmd.response, 828 &resp, sizeof resp)) { 829 ret = -EFAULT; 830 goto err_copy; 831 } 832 833 mutex_lock(&file->mutex); 834 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 835 mutex_unlock(&file->mutex); 836 837 obj->uobject.live = 1; 838 839 up_write(&obj->uobject.mutex); 840 841 return in_len; 842 843 err_copy: 844 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 845 846 err_free: 847 ib_destroy_cq(cq); 848 849 err_file: 850 if (ev_file) 851 ib_uverbs_release_ucq(file, ev_file, obj); 852 853 err: 854 put_uobj_write(&obj->uobject); 855 return ret; 856 } 857 858 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 859 const char __user *buf, int in_len, 860 int out_len) 861 { 862 struct ib_uverbs_resize_cq cmd; 863 struct ib_uverbs_resize_cq_resp resp; 864 struct ib_udata udata; 865 struct ib_cq *cq; 866 int ret = -EINVAL; 867 868 if (copy_from_user(&cmd, buf, sizeof cmd)) 869 return -EFAULT; 870 871 INIT_UDATA(&udata, buf + sizeof cmd, 872 (unsigned long) cmd.response + sizeof resp, 873 in_len - sizeof cmd, out_len - sizeof resp); 874 875 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 876 if (!cq) 877 return -EINVAL; 878 879 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 880 if (ret) 881 goto out; 882 883 resp.cqe = cq->cqe; 884 885 if (copy_to_user((void __user *) (unsigned long) cmd.response, 886 &resp, sizeof resp.cqe)) 887 ret = -EFAULT; 888 889 out: 890 put_cq_read(cq); 891 892 return ret ? ret : in_len; 893 } 894 895 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 896 const char __user *buf, int in_len, 897 int out_len) 898 { 899 struct ib_uverbs_poll_cq cmd; 900 struct ib_uverbs_poll_cq_resp *resp; 901 struct ib_cq *cq; 902 struct ib_wc *wc; 903 int ret = 0; 904 int i; 905 int rsize; 906 907 if (copy_from_user(&cmd, buf, sizeof cmd)) 908 return -EFAULT; 909 910 wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL); 911 if (!wc) 912 return -ENOMEM; 913 914 rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc); 915 resp = kmalloc(rsize, GFP_KERNEL); 916 if (!resp) { 917 ret = -ENOMEM; 918 goto out_wc; 919 } 920 921 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 922 if (!cq) { 923 ret = -EINVAL; 924 goto out; 925 } 926 927 resp->count = ib_poll_cq(cq, cmd.ne, wc); 928 929 put_cq_read(cq); 930 931 for (i = 0; i < resp->count; i++) { 932 resp->wc[i].wr_id = wc[i].wr_id; 933 resp->wc[i].status = wc[i].status; 934 resp->wc[i].opcode = wc[i].opcode; 935 resp->wc[i].vendor_err = wc[i].vendor_err; 936 resp->wc[i].byte_len = wc[i].byte_len; 937 resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data; 938 resp->wc[i].qp_num = wc[i].qp->qp_num; 939 resp->wc[i].src_qp = wc[i].src_qp; 940 resp->wc[i].wc_flags = wc[i].wc_flags; 941 resp->wc[i].pkey_index = wc[i].pkey_index; 942 resp->wc[i].slid = wc[i].slid; 943 resp->wc[i].sl = wc[i].sl; 944 resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits; 945 resp->wc[i].port_num = wc[i].port_num; 946 } 947 948 if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize)) 949 ret = -EFAULT; 950 951 out: 952 kfree(resp); 953 954 out_wc: 955 kfree(wc); 956 return ret ? ret : in_len; 957 } 958 959 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 960 const char __user *buf, int in_len, 961 int out_len) 962 { 963 struct ib_uverbs_req_notify_cq cmd; 964 struct ib_cq *cq; 965 966 if (copy_from_user(&cmd, buf, sizeof cmd)) 967 return -EFAULT; 968 969 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 970 if (!cq) 971 return -EINVAL; 972 973 ib_req_notify_cq(cq, cmd.solicited_only ? 974 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 975 976 put_cq_read(cq); 977 978 return in_len; 979 } 980 981 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 982 const char __user *buf, int in_len, 983 int out_len) 984 { 985 struct ib_uverbs_destroy_cq cmd; 986 struct ib_uverbs_destroy_cq_resp resp; 987 struct ib_uobject *uobj; 988 struct ib_cq *cq; 989 struct ib_ucq_object *obj; 990 struct ib_uverbs_event_file *ev_file; 991 int ret = -EINVAL; 992 993 if (copy_from_user(&cmd, buf, sizeof cmd)) 994 return -EFAULT; 995 996 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 997 if (!uobj) 998 return -EINVAL; 999 cq = uobj->object; 1000 ev_file = cq->cq_context; 1001 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1002 1003 ret = ib_destroy_cq(cq); 1004 if (!ret) 1005 uobj->live = 0; 1006 1007 put_uobj_write(uobj); 1008 1009 if (ret) 1010 return ret; 1011 1012 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1013 1014 mutex_lock(&file->mutex); 1015 list_del(&uobj->list); 1016 mutex_unlock(&file->mutex); 1017 1018 ib_uverbs_release_ucq(file, ev_file, obj); 1019 1020 memset(&resp, 0, sizeof resp); 1021 resp.comp_events_reported = obj->comp_events_reported; 1022 resp.async_events_reported = obj->async_events_reported; 1023 1024 put_uobj(uobj); 1025 1026 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1027 &resp, sizeof resp)) 1028 return -EFAULT; 1029 1030 return in_len; 1031 } 1032 1033 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1034 const char __user *buf, int in_len, 1035 int out_len) 1036 { 1037 struct ib_uverbs_create_qp cmd; 1038 struct ib_uverbs_create_qp_resp resp; 1039 struct ib_udata udata; 1040 struct ib_uqp_object *obj; 1041 struct ib_pd *pd; 1042 struct ib_cq *scq, *rcq; 1043 struct ib_srq *srq; 1044 struct ib_qp *qp; 1045 struct ib_qp_init_attr attr; 1046 int ret; 1047 1048 if (out_len < sizeof resp) 1049 return -ENOSPC; 1050 1051 if (copy_from_user(&cmd, buf, sizeof cmd)) 1052 return -EFAULT; 1053 1054 INIT_UDATA(&udata, buf + sizeof cmd, 1055 (unsigned long) cmd.response + sizeof resp, 1056 in_len - sizeof cmd, out_len - sizeof resp); 1057 1058 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1059 if (!obj) 1060 return -ENOMEM; 1061 1062 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); 1063 down_write(&obj->uevent.uobject.mutex); 1064 1065 srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; 1066 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1067 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0); 1068 rcq = cmd.recv_cq_handle == cmd.send_cq_handle ? 1069 scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1); 1070 1071 if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { 1072 ret = -EINVAL; 1073 goto err_put; 1074 } 1075 1076 attr.event_handler = ib_uverbs_qp_event_handler; 1077 attr.qp_context = file; 1078 attr.send_cq = scq; 1079 attr.recv_cq = rcq; 1080 attr.srq = srq; 1081 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 1082 attr.qp_type = cmd.qp_type; 1083 1084 attr.cap.max_send_wr = cmd.max_send_wr; 1085 attr.cap.max_recv_wr = cmd.max_recv_wr; 1086 attr.cap.max_send_sge = cmd.max_send_sge; 1087 attr.cap.max_recv_sge = cmd.max_recv_sge; 1088 attr.cap.max_inline_data = cmd.max_inline_data; 1089 1090 obj->uevent.events_reported = 0; 1091 INIT_LIST_HEAD(&obj->uevent.event_list); 1092 INIT_LIST_HEAD(&obj->mcast_list); 1093 1094 qp = pd->device->create_qp(pd, &attr, &udata); 1095 if (IS_ERR(qp)) { 1096 ret = PTR_ERR(qp); 1097 goto err_put; 1098 } 1099 1100 qp->device = pd->device; 1101 qp->pd = pd; 1102 qp->send_cq = attr.send_cq; 1103 qp->recv_cq = attr.recv_cq; 1104 qp->srq = attr.srq; 1105 qp->uobject = &obj->uevent.uobject; 1106 qp->event_handler = attr.event_handler; 1107 qp->qp_context = attr.qp_context; 1108 qp->qp_type = attr.qp_type; 1109 atomic_inc(&pd->usecnt); 1110 atomic_inc(&attr.send_cq->usecnt); 1111 atomic_inc(&attr.recv_cq->usecnt); 1112 if (attr.srq) 1113 atomic_inc(&attr.srq->usecnt); 1114 1115 obj->uevent.uobject.object = qp; 1116 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1117 if (ret) 1118 goto err_destroy; 1119 1120 memset(&resp, 0, sizeof resp); 1121 resp.qpn = qp->qp_num; 1122 resp.qp_handle = obj->uevent.uobject.id; 1123 resp.max_recv_sge = attr.cap.max_recv_sge; 1124 resp.max_send_sge = attr.cap.max_send_sge; 1125 resp.max_recv_wr = attr.cap.max_recv_wr; 1126 resp.max_send_wr = attr.cap.max_send_wr; 1127 resp.max_inline_data = attr.cap.max_inline_data; 1128 1129 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1130 &resp, sizeof resp)) { 1131 ret = -EFAULT; 1132 goto err_copy; 1133 } 1134 1135 put_pd_read(pd); 1136 put_cq_read(scq); 1137 if (rcq != scq) 1138 put_cq_read(rcq); 1139 if (srq) 1140 put_srq_read(srq); 1141 1142 mutex_lock(&file->mutex); 1143 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1144 mutex_unlock(&file->mutex); 1145 1146 obj->uevent.uobject.live = 1; 1147 1148 up_write(&obj->uevent.uobject.mutex); 1149 1150 return in_len; 1151 1152 err_copy: 1153 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1154 1155 err_destroy: 1156 ib_destroy_qp(qp); 1157 1158 err_put: 1159 if (pd) 1160 put_pd_read(pd); 1161 if (scq) 1162 put_cq_read(scq); 1163 if (rcq && rcq != scq) 1164 put_cq_read(rcq); 1165 if (srq) 1166 put_srq_read(srq); 1167 1168 put_uobj_write(&obj->uevent.uobject); 1169 return ret; 1170 } 1171 1172 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1173 const char __user *buf, int in_len, 1174 int out_len) 1175 { 1176 struct ib_uverbs_query_qp cmd; 1177 struct ib_uverbs_query_qp_resp resp; 1178 struct ib_qp *qp; 1179 struct ib_qp_attr *attr; 1180 struct ib_qp_init_attr *init_attr; 1181 int ret; 1182 1183 if (copy_from_user(&cmd, buf, sizeof cmd)) 1184 return -EFAULT; 1185 1186 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1187 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1188 if (!attr || !init_attr) { 1189 ret = -ENOMEM; 1190 goto out; 1191 } 1192 1193 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1194 if (!qp) { 1195 ret = -EINVAL; 1196 goto out; 1197 } 1198 1199 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1200 1201 put_qp_read(qp); 1202 1203 if (ret) 1204 goto out; 1205 1206 memset(&resp, 0, sizeof resp); 1207 1208 resp.qp_state = attr->qp_state; 1209 resp.cur_qp_state = attr->cur_qp_state; 1210 resp.path_mtu = attr->path_mtu; 1211 resp.path_mig_state = attr->path_mig_state; 1212 resp.qkey = attr->qkey; 1213 resp.rq_psn = attr->rq_psn; 1214 resp.sq_psn = attr->sq_psn; 1215 resp.dest_qp_num = attr->dest_qp_num; 1216 resp.qp_access_flags = attr->qp_access_flags; 1217 resp.pkey_index = attr->pkey_index; 1218 resp.alt_pkey_index = attr->alt_pkey_index; 1219 resp.sq_draining = attr->sq_draining; 1220 resp.max_rd_atomic = attr->max_rd_atomic; 1221 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1222 resp.min_rnr_timer = attr->min_rnr_timer; 1223 resp.port_num = attr->port_num; 1224 resp.timeout = attr->timeout; 1225 resp.retry_cnt = attr->retry_cnt; 1226 resp.rnr_retry = attr->rnr_retry; 1227 resp.alt_port_num = attr->alt_port_num; 1228 resp.alt_timeout = attr->alt_timeout; 1229 1230 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 1231 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 1232 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 1233 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 1234 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 1235 resp.dest.dlid = attr->ah_attr.dlid; 1236 resp.dest.sl = attr->ah_attr.sl; 1237 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 1238 resp.dest.static_rate = attr->ah_attr.static_rate; 1239 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 1240 resp.dest.port_num = attr->ah_attr.port_num; 1241 1242 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 1243 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 1244 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 1245 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 1246 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 1247 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 1248 resp.alt_dest.sl = attr->alt_ah_attr.sl; 1249 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 1250 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 1251 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 1252 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 1253 1254 resp.max_send_wr = init_attr->cap.max_send_wr; 1255 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1256 resp.max_send_sge = init_attr->cap.max_send_sge; 1257 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1258 resp.max_inline_data = init_attr->cap.max_inline_data; 1259 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1260 1261 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1262 &resp, sizeof resp)) 1263 ret = -EFAULT; 1264 1265 out: 1266 kfree(attr); 1267 kfree(init_attr); 1268 1269 return ret ? ret : in_len; 1270 } 1271 1272 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 1273 const char __user *buf, int in_len, 1274 int out_len) 1275 { 1276 struct ib_uverbs_modify_qp cmd; 1277 struct ib_udata udata; 1278 struct ib_qp *qp; 1279 struct ib_qp_attr *attr; 1280 int ret; 1281 1282 if (copy_from_user(&cmd, buf, sizeof cmd)) 1283 return -EFAULT; 1284 1285 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 1286 out_len); 1287 1288 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1289 if (!attr) 1290 return -ENOMEM; 1291 1292 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1293 if (!qp) { 1294 ret = -EINVAL; 1295 goto out; 1296 } 1297 1298 attr->qp_state = cmd.qp_state; 1299 attr->cur_qp_state = cmd.cur_qp_state; 1300 attr->path_mtu = cmd.path_mtu; 1301 attr->path_mig_state = cmd.path_mig_state; 1302 attr->qkey = cmd.qkey; 1303 attr->rq_psn = cmd.rq_psn; 1304 attr->sq_psn = cmd.sq_psn; 1305 attr->dest_qp_num = cmd.dest_qp_num; 1306 attr->qp_access_flags = cmd.qp_access_flags; 1307 attr->pkey_index = cmd.pkey_index; 1308 attr->alt_pkey_index = cmd.alt_pkey_index; 1309 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 1310 attr->max_rd_atomic = cmd.max_rd_atomic; 1311 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 1312 attr->min_rnr_timer = cmd.min_rnr_timer; 1313 attr->port_num = cmd.port_num; 1314 attr->timeout = cmd.timeout; 1315 attr->retry_cnt = cmd.retry_cnt; 1316 attr->rnr_retry = cmd.rnr_retry; 1317 attr->alt_port_num = cmd.alt_port_num; 1318 attr->alt_timeout = cmd.alt_timeout; 1319 1320 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 1321 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 1322 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 1323 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 1324 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 1325 attr->ah_attr.dlid = cmd.dest.dlid; 1326 attr->ah_attr.sl = cmd.dest.sl; 1327 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 1328 attr->ah_attr.static_rate = cmd.dest.static_rate; 1329 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 1330 attr->ah_attr.port_num = cmd.dest.port_num; 1331 1332 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 1333 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 1334 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 1335 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 1336 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 1337 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 1338 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 1339 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 1340 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 1341 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 1342 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 1343 1344 ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata); 1345 1346 put_qp_read(qp); 1347 1348 if (ret) 1349 goto out; 1350 1351 ret = in_len; 1352 1353 out: 1354 kfree(attr); 1355 1356 return ret; 1357 } 1358 1359 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 1360 const char __user *buf, int in_len, 1361 int out_len) 1362 { 1363 struct ib_uverbs_destroy_qp cmd; 1364 struct ib_uverbs_destroy_qp_resp resp; 1365 struct ib_uobject *uobj; 1366 struct ib_qp *qp; 1367 struct ib_uqp_object *obj; 1368 int ret = -EINVAL; 1369 1370 if (copy_from_user(&cmd, buf, sizeof cmd)) 1371 return -EFAULT; 1372 1373 memset(&resp, 0, sizeof resp); 1374 1375 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 1376 if (!uobj) 1377 return -EINVAL; 1378 qp = uobj->object; 1379 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 1380 1381 if (!list_empty(&obj->mcast_list)) { 1382 put_uobj_write(uobj); 1383 return -EBUSY; 1384 } 1385 1386 ret = ib_destroy_qp(qp); 1387 if (!ret) 1388 uobj->live = 0; 1389 1390 put_uobj_write(uobj); 1391 1392 if (ret) 1393 return ret; 1394 1395 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 1396 1397 mutex_lock(&file->mutex); 1398 list_del(&uobj->list); 1399 mutex_unlock(&file->mutex); 1400 1401 ib_uverbs_release_uevent(file, &obj->uevent); 1402 1403 resp.events_reported = obj->uevent.events_reported; 1404 1405 put_uobj(uobj); 1406 1407 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1408 &resp, sizeof resp)) 1409 return -EFAULT; 1410 1411 return in_len; 1412 } 1413 1414 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 1415 const char __user *buf, int in_len, 1416 int out_len) 1417 { 1418 struct ib_uverbs_post_send cmd; 1419 struct ib_uverbs_post_send_resp resp; 1420 struct ib_uverbs_send_wr *user_wr; 1421 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 1422 struct ib_qp *qp; 1423 int i, sg_ind; 1424 int is_ud; 1425 ssize_t ret = -EINVAL; 1426 1427 if (copy_from_user(&cmd, buf, sizeof cmd)) 1428 return -EFAULT; 1429 1430 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 1431 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 1432 return -EINVAL; 1433 1434 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 1435 return -EINVAL; 1436 1437 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 1438 if (!user_wr) 1439 return -ENOMEM; 1440 1441 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1442 if (!qp) 1443 goto out; 1444 1445 is_ud = qp->qp_type == IB_QPT_UD; 1446 sg_ind = 0; 1447 last = NULL; 1448 for (i = 0; i < cmd.wr_count; ++i) { 1449 if (copy_from_user(user_wr, 1450 buf + sizeof cmd + i * cmd.wqe_size, 1451 cmd.wqe_size)) { 1452 ret = -EFAULT; 1453 goto out_put; 1454 } 1455 1456 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 1457 ret = -EINVAL; 1458 goto out_put; 1459 } 1460 1461 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 1462 user_wr->num_sge * sizeof (struct ib_sge), 1463 GFP_KERNEL); 1464 if (!next) { 1465 ret = -ENOMEM; 1466 goto out_put; 1467 } 1468 1469 if (!last) 1470 wr = next; 1471 else 1472 last->next = next; 1473 last = next; 1474 1475 next->next = NULL; 1476 next->wr_id = user_wr->wr_id; 1477 next->num_sge = user_wr->num_sge; 1478 next->opcode = user_wr->opcode; 1479 next->send_flags = user_wr->send_flags; 1480 next->imm_data = (__be32 __force) user_wr->imm_data; 1481 1482 if (is_ud) { 1483 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, 1484 file->ucontext); 1485 if (!next->wr.ud.ah) { 1486 ret = -EINVAL; 1487 goto out_put; 1488 } 1489 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; 1490 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 1491 } else { 1492 switch (next->opcode) { 1493 case IB_WR_RDMA_WRITE: 1494 case IB_WR_RDMA_WRITE_WITH_IMM: 1495 case IB_WR_RDMA_READ: 1496 next->wr.rdma.remote_addr = 1497 user_wr->wr.rdma.remote_addr; 1498 next->wr.rdma.rkey = 1499 user_wr->wr.rdma.rkey; 1500 break; 1501 case IB_WR_ATOMIC_CMP_AND_SWP: 1502 case IB_WR_ATOMIC_FETCH_AND_ADD: 1503 next->wr.atomic.remote_addr = 1504 user_wr->wr.atomic.remote_addr; 1505 next->wr.atomic.compare_add = 1506 user_wr->wr.atomic.compare_add; 1507 next->wr.atomic.swap = user_wr->wr.atomic.swap; 1508 next->wr.atomic.rkey = user_wr->wr.atomic.rkey; 1509 break; 1510 default: 1511 break; 1512 } 1513 } 1514 1515 if (next->num_sge) { 1516 next->sg_list = (void *) next + 1517 ALIGN(sizeof *next, sizeof (struct ib_sge)); 1518 if (copy_from_user(next->sg_list, 1519 buf + sizeof cmd + 1520 cmd.wr_count * cmd.wqe_size + 1521 sg_ind * sizeof (struct ib_sge), 1522 next->num_sge * sizeof (struct ib_sge))) { 1523 ret = -EFAULT; 1524 goto out_put; 1525 } 1526 sg_ind += next->num_sge; 1527 } else 1528 next->sg_list = NULL; 1529 } 1530 1531 resp.bad_wr = 0; 1532 ret = qp->device->post_send(qp, wr, &bad_wr); 1533 if (ret) 1534 for (next = wr; next; next = next->next) { 1535 ++resp.bad_wr; 1536 if (next == bad_wr) 1537 break; 1538 } 1539 1540 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1541 &resp, sizeof resp)) 1542 ret = -EFAULT; 1543 1544 out_put: 1545 put_qp_read(qp); 1546 1547 while (wr) { 1548 if (is_ud && wr->wr.ud.ah) 1549 put_ah_read(wr->wr.ud.ah); 1550 next = wr->next; 1551 kfree(wr); 1552 wr = next; 1553 } 1554 1555 out: 1556 kfree(user_wr); 1557 1558 return ret ? ret : in_len; 1559 } 1560 1561 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 1562 int in_len, 1563 u32 wr_count, 1564 u32 sge_count, 1565 u32 wqe_size) 1566 { 1567 struct ib_uverbs_recv_wr *user_wr; 1568 struct ib_recv_wr *wr = NULL, *last, *next; 1569 int sg_ind; 1570 int i; 1571 int ret; 1572 1573 if (in_len < wqe_size * wr_count + 1574 sge_count * sizeof (struct ib_uverbs_sge)) 1575 return ERR_PTR(-EINVAL); 1576 1577 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 1578 return ERR_PTR(-EINVAL); 1579 1580 user_wr = kmalloc(wqe_size, GFP_KERNEL); 1581 if (!user_wr) 1582 return ERR_PTR(-ENOMEM); 1583 1584 sg_ind = 0; 1585 last = NULL; 1586 for (i = 0; i < wr_count; ++i) { 1587 if (copy_from_user(user_wr, buf + i * wqe_size, 1588 wqe_size)) { 1589 ret = -EFAULT; 1590 goto err; 1591 } 1592 1593 if (user_wr->num_sge + sg_ind > sge_count) { 1594 ret = -EINVAL; 1595 goto err; 1596 } 1597 1598 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 1599 user_wr->num_sge * sizeof (struct ib_sge), 1600 GFP_KERNEL); 1601 if (!next) { 1602 ret = -ENOMEM; 1603 goto err; 1604 } 1605 1606 if (!last) 1607 wr = next; 1608 else 1609 last->next = next; 1610 last = next; 1611 1612 next->next = NULL; 1613 next->wr_id = user_wr->wr_id; 1614 next->num_sge = user_wr->num_sge; 1615 1616 if (next->num_sge) { 1617 next->sg_list = (void *) next + 1618 ALIGN(sizeof *next, sizeof (struct ib_sge)); 1619 if (copy_from_user(next->sg_list, 1620 buf + wr_count * wqe_size + 1621 sg_ind * sizeof (struct ib_sge), 1622 next->num_sge * sizeof (struct ib_sge))) { 1623 ret = -EFAULT; 1624 goto err; 1625 } 1626 sg_ind += next->num_sge; 1627 } else 1628 next->sg_list = NULL; 1629 } 1630 1631 kfree(user_wr); 1632 return wr; 1633 1634 err: 1635 kfree(user_wr); 1636 1637 while (wr) { 1638 next = wr->next; 1639 kfree(wr); 1640 wr = next; 1641 } 1642 1643 return ERR_PTR(ret); 1644 } 1645 1646 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 1647 const char __user *buf, int in_len, 1648 int out_len) 1649 { 1650 struct ib_uverbs_post_recv cmd; 1651 struct ib_uverbs_post_recv_resp resp; 1652 struct ib_recv_wr *wr, *next, *bad_wr; 1653 struct ib_qp *qp; 1654 ssize_t ret = -EINVAL; 1655 1656 if (copy_from_user(&cmd, buf, sizeof cmd)) 1657 return -EFAULT; 1658 1659 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 1660 in_len - sizeof cmd, cmd.wr_count, 1661 cmd.sge_count, cmd.wqe_size); 1662 if (IS_ERR(wr)) 1663 return PTR_ERR(wr); 1664 1665 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1666 if (!qp) 1667 goto out; 1668 1669 resp.bad_wr = 0; 1670 ret = qp->device->post_recv(qp, wr, &bad_wr); 1671 1672 put_qp_read(qp); 1673 1674 if (ret) 1675 for (next = wr; next; next = next->next) { 1676 ++resp.bad_wr; 1677 if (next == bad_wr) 1678 break; 1679 } 1680 1681 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1682 &resp, sizeof resp)) 1683 ret = -EFAULT; 1684 1685 out: 1686 while (wr) { 1687 next = wr->next; 1688 kfree(wr); 1689 wr = next; 1690 } 1691 1692 return ret ? ret : in_len; 1693 } 1694 1695 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 1696 const char __user *buf, int in_len, 1697 int out_len) 1698 { 1699 struct ib_uverbs_post_srq_recv cmd; 1700 struct ib_uverbs_post_srq_recv_resp resp; 1701 struct ib_recv_wr *wr, *next, *bad_wr; 1702 struct ib_srq *srq; 1703 ssize_t ret = -EINVAL; 1704 1705 if (copy_from_user(&cmd, buf, sizeof cmd)) 1706 return -EFAULT; 1707 1708 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 1709 in_len - sizeof cmd, cmd.wr_count, 1710 cmd.sge_count, cmd.wqe_size); 1711 if (IS_ERR(wr)) 1712 return PTR_ERR(wr); 1713 1714 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 1715 if (!srq) 1716 goto out; 1717 1718 resp.bad_wr = 0; 1719 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 1720 1721 put_srq_read(srq); 1722 1723 if (ret) 1724 for (next = wr; next; next = next->next) { 1725 ++resp.bad_wr; 1726 if (next == bad_wr) 1727 break; 1728 } 1729 1730 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1731 &resp, sizeof resp)) 1732 ret = -EFAULT; 1733 1734 out: 1735 while (wr) { 1736 next = wr->next; 1737 kfree(wr); 1738 wr = next; 1739 } 1740 1741 return ret ? ret : in_len; 1742 } 1743 1744 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 1745 const char __user *buf, int in_len, 1746 int out_len) 1747 { 1748 struct ib_uverbs_create_ah cmd; 1749 struct ib_uverbs_create_ah_resp resp; 1750 struct ib_uobject *uobj; 1751 struct ib_pd *pd; 1752 struct ib_ah *ah; 1753 struct ib_ah_attr attr; 1754 int ret; 1755 1756 if (out_len < sizeof resp) 1757 return -ENOSPC; 1758 1759 if (copy_from_user(&cmd, buf, sizeof cmd)) 1760 return -EFAULT; 1761 1762 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 1763 if (!uobj) 1764 return -ENOMEM; 1765 1766 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key); 1767 down_write(&uobj->mutex); 1768 1769 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1770 if (!pd) { 1771 ret = -EINVAL; 1772 goto err; 1773 } 1774 1775 attr.dlid = cmd.attr.dlid; 1776 attr.sl = cmd.attr.sl; 1777 attr.src_path_bits = cmd.attr.src_path_bits; 1778 attr.static_rate = cmd.attr.static_rate; 1779 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 1780 attr.port_num = cmd.attr.port_num; 1781 attr.grh.flow_label = cmd.attr.grh.flow_label; 1782 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 1783 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 1784 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 1785 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 1786 1787 ah = ib_create_ah(pd, &attr); 1788 if (IS_ERR(ah)) { 1789 ret = PTR_ERR(ah); 1790 goto err_put; 1791 } 1792 1793 ah->uobject = uobj; 1794 uobj->object = ah; 1795 1796 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 1797 if (ret) 1798 goto err_destroy; 1799 1800 resp.ah_handle = uobj->id; 1801 1802 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1803 &resp, sizeof resp)) { 1804 ret = -EFAULT; 1805 goto err_copy; 1806 } 1807 1808 put_pd_read(pd); 1809 1810 mutex_lock(&file->mutex); 1811 list_add_tail(&uobj->list, &file->ucontext->ah_list); 1812 mutex_unlock(&file->mutex); 1813 1814 uobj->live = 1; 1815 1816 up_write(&uobj->mutex); 1817 1818 return in_len; 1819 1820 err_copy: 1821 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 1822 1823 err_destroy: 1824 ib_destroy_ah(ah); 1825 1826 err_put: 1827 put_pd_read(pd); 1828 1829 err: 1830 put_uobj_write(uobj); 1831 return ret; 1832 } 1833 1834 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 1835 const char __user *buf, int in_len, int out_len) 1836 { 1837 struct ib_uverbs_destroy_ah cmd; 1838 struct ib_ah *ah; 1839 struct ib_uobject *uobj; 1840 int ret; 1841 1842 if (copy_from_user(&cmd, buf, sizeof cmd)) 1843 return -EFAULT; 1844 1845 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 1846 if (!uobj) 1847 return -EINVAL; 1848 ah = uobj->object; 1849 1850 ret = ib_destroy_ah(ah); 1851 if (!ret) 1852 uobj->live = 0; 1853 1854 put_uobj_write(uobj); 1855 1856 if (ret) 1857 return ret; 1858 1859 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 1860 1861 mutex_lock(&file->mutex); 1862 list_del(&uobj->list); 1863 mutex_unlock(&file->mutex); 1864 1865 put_uobj(uobj); 1866 1867 return in_len; 1868 } 1869 1870 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 1871 const char __user *buf, int in_len, 1872 int out_len) 1873 { 1874 struct ib_uverbs_attach_mcast cmd; 1875 struct ib_qp *qp; 1876 struct ib_uqp_object *obj; 1877 struct ib_uverbs_mcast_entry *mcast; 1878 int ret; 1879 1880 if (copy_from_user(&cmd, buf, sizeof cmd)) 1881 return -EFAULT; 1882 1883 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1884 if (!qp) 1885 return -EINVAL; 1886 1887 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1888 1889 list_for_each_entry(mcast, &obj->mcast_list, list) 1890 if (cmd.mlid == mcast->lid && 1891 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 1892 ret = 0; 1893 goto out_put; 1894 } 1895 1896 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 1897 if (!mcast) { 1898 ret = -ENOMEM; 1899 goto out_put; 1900 } 1901 1902 mcast->lid = cmd.mlid; 1903 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 1904 1905 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 1906 if (!ret) 1907 list_add_tail(&mcast->list, &obj->mcast_list); 1908 else 1909 kfree(mcast); 1910 1911 out_put: 1912 put_qp_read(qp); 1913 1914 return ret ? ret : in_len; 1915 } 1916 1917 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 1918 const char __user *buf, int in_len, 1919 int out_len) 1920 { 1921 struct ib_uverbs_detach_mcast cmd; 1922 struct ib_uqp_object *obj; 1923 struct ib_qp *qp; 1924 struct ib_uverbs_mcast_entry *mcast; 1925 int ret = -EINVAL; 1926 1927 if (copy_from_user(&cmd, buf, sizeof cmd)) 1928 return -EFAULT; 1929 1930 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 1931 if (!qp) 1932 return -EINVAL; 1933 1934 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1935 if (ret) 1936 goto out_put; 1937 1938 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1939 1940 list_for_each_entry(mcast, &obj->mcast_list, list) 1941 if (cmd.mlid == mcast->lid && 1942 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 1943 list_del(&mcast->list); 1944 kfree(mcast); 1945 break; 1946 } 1947 1948 out_put: 1949 put_qp_read(qp); 1950 1951 return ret ? ret : in_len; 1952 } 1953 1954 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 1955 const char __user *buf, int in_len, 1956 int out_len) 1957 { 1958 struct ib_uverbs_create_srq cmd; 1959 struct ib_uverbs_create_srq_resp resp; 1960 struct ib_udata udata; 1961 struct ib_uevent_object *obj; 1962 struct ib_pd *pd; 1963 struct ib_srq *srq; 1964 struct ib_srq_init_attr attr; 1965 int ret; 1966 1967 if (out_len < sizeof resp) 1968 return -ENOSPC; 1969 1970 if (copy_from_user(&cmd, buf, sizeof cmd)) 1971 return -EFAULT; 1972 1973 INIT_UDATA(&udata, buf + sizeof cmd, 1974 (unsigned long) cmd.response + sizeof resp, 1975 in_len - sizeof cmd, out_len - sizeof resp); 1976 1977 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1978 if (!obj) 1979 return -ENOMEM; 1980 1981 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key); 1982 down_write(&obj->uobject.mutex); 1983 1984 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1985 if (!pd) { 1986 ret = -EINVAL; 1987 goto err; 1988 } 1989 1990 attr.event_handler = ib_uverbs_srq_event_handler; 1991 attr.srq_context = file; 1992 attr.attr.max_wr = cmd.max_wr; 1993 attr.attr.max_sge = cmd.max_sge; 1994 attr.attr.srq_limit = cmd.srq_limit; 1995 1996 obj->events_reported = 0; 1997 INIT_LIST_HEAD(&obj->event_list); 1998 1999 srq = pd->device->create_srq(pd, &attr, &udata); 2000 if (IS_ERR(srq)) { 2001 ret = PTR_ERR(srq); 2002 goto err_put; 2003 } 2004 2005 srq->device = pd->device; 2006 srq->pd = pd; 2007 srq->uobject = &obj->uobject; 2008 srq->event_handler = attr.event_handler; 2009 srq->srq_context = attr.srq_context; 2010 atomic_inc(&pd->usecnt); 2011 atomic_set(&srq->usecnt, 0); 2012 2013 obj->uobject.object = srq; 2014 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uobject); 2015 if (ret) 2016 goto err_destroy; 2017 2018 memset(&resp, 0, sizeof resp); 2019 resp.srq_handle = obj->uobject.id; 2020 resp.max_wr = attr.attr.max_wr; 2021 resp.max_sge = attr.attr.max_sge; 2022 2023 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2024 &resp, sizeof resp)) { 2025 ret = -EFAULT; 2026 goto err_copy; 2027 } 2028 2029 put_pd_read(pd); 2030 2031 mutex_lock(&file->mutex); 2032 list_add_tail(&obj->uobject.list, &file->ucontext->srq_list); 2033 mutex_unlock(&file->mutex); 2034 2035 obj->uobject.live = 1; 2036 2037 up_write(&obj->uobject.mutex); 2038 2039 return in_len; 2040 2041 err_copy: 2042 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uobject); 2043 2044 err_destroy: 2045 ib_destroy_srq(srq); 2046 2047 err_put: 2048 put_pd_read(pd); 2049 2050 err: 2051 put_uobj_write(&obj->uobject); 2052 return ret; 2053 } 2054 2055 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 2056 const char __user *buf, int in_len, 2057 int out_len) 2058 { 2059 struct ib_uverbs_modify_srq cmd; 2060 struct ib_udata udata; 2061 struct ib_srq *srq; 2062 struct ib_srq_attr attr; 2063 int ret; 2064 2065 if (copy_from_user(&cmd, buf, sizeof cmd)) 2066 return -EFAULT; 2067 2068 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 2069 out_len); 2070 2071 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2072 if (!srq) 2073 return -EINVAL; 2074 2075 attr.max_wr = cmd.max_wr; 2076 attr.srq_limit = cmd.srq_limit; 2077 2078 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 2079 2080 put_srq_read(srq); 2081 2082 return ret ? ret : in_len; 2083 } 2084 2085 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 2086 const char __user *buf, 2087 int in_len, int out_len) 2088 { 2089 struct ib_uverbs_query_srq cmd; 2090 struct ib_uverbs_query_srq_resp resp; 2091 struct ib_srq_attr attr; 2092 struct ib_srq *srq; 2093 int ret; 2094 2095 if (out_len < sizeof resp) 2096 return -ENOSPC; 2097 2098 if (copy_from_user(&cmd, buf, sizeof cmd)) 2099 return -EFAULT; 2100 2101 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2102 if (!srq) 2103 return -EINVAL; 2104 2105 ret = ib_query_srq(srq, &attr); 2106 2107 put_srq_read(srq); 2108 2109 if (ret) 2110 return ret; 2111 2112 memset(&resp, 0, sizeof resp); 2113 2114 resp.max_wr = attr.max_wr; 2115 resp.max_sge = attr.max_sge; 2116 resp.srq_limit = attr.srq_limit; 2117 2118 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2119 &resp, sizeof resp)) 2120 return -EFAULT; 2121 2122 return in_len; 2123 } 2124 2125 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 2126 const char __user *buf, int in_len, 2127 int out_len) 2128 { 2129 struct ib_uverbs_destroy_srq cmd; 2130 struct ib_uverbs_destroy_srq_resp resp; 2131 struct ib_uobject *uobj; 2132 struct ib_srq *srq; 2133 struct ib_uevent_object *obj; 2134 int ret = -EINVAL; 2135 2136 if (copy_from_user(&cmd, buf, sizeof cmd)) 2137 return -EFAULT; 2138 2139 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 2140 if (!uobj) 2141 return -EINVAL; 2142 srq = uobj->object; 2143 obj = container_of(uobj, struct ib_uevent_object, uobject); 2144 2145 ret = ib_destroy_srq(srq); 2146 if (!ret) 2147 uobj->live = 0; 2148 2149 put_uobj_write(uobj); 2150 2151 if (ret) 2152 return ret; 2153 2154 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 2155 2156 mutex_lock(&file->mutex); 2157 list_del(&uobj->list); 2158 mutex_unlock(&file->mutex); 2159 2160 ib_uverbs_release_uevent(file, obj); 2161 2162 memset(&resp, 0, sizeof resp); 2163 resp.events_reported = obj->events_reported; 2164 2165 put_uobj(uobj); 2166 2167 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2168 &resp, sizeof resp)) 2169 ret = -EFAULT; 2170 2171 return ret ? ret : in_len; 2172 } 2173