1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 * 35 * $Id: uverbs_cmd.c 2708 2005-06-24 17:27:21Z roland $ 36 */ 37 38 #include <linux/file.h> 39 #include <linux/fs.h> 40 41 #include <asm/uaccess.h> 42 43 #include "uverbs.h" 44 45 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 46 do { \ 47 (udata)->inbuf = (void __user *) (ibuf); \ 48 (udata)->outbuf = (void __user *) (obuf); \ 49 (udata)->inlen = (ilen); \ 50 (udata)->outlen = (olen); \ 51 } while (0) 52 53 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 54 const char __user *buf, 55 int in_len, int out_len) 56 { 57 struct ib_uverbs_get_context cmd; 58 struct ib_uverbs_get_context_resp resp; 59 struct ib_udata udata; 60 struct ib_device *ibdev = file->device->ib_dev; 61 struct ib_ucontext *ucontext; 62 struct file *filp; 63 int ret; 64 65 if (out_len < sizeof resp) 66 return -ENOSPC; 67 68 if (copy_from_user(&cmd, buf, sizeof cmd)) 69 return -EFAULT; 70 71 mutex_lock(&file->mutex); 72 73 if (file->ucontext) { 74 ret = -EINVAL; 75 goto err; 76 } 77 78 INIT_UDATA(&udata, buf + sizeof cmd, 79 (unsigned long) cmd.response + sizeof resp, 80 in_len - sizeof cmd, out_len - sizeof resp); 81 82 ucontext = ibdev->alloc_ucontext(ibdev, &udata); 83 if (IS_ERR(ucontext)) 84 return PTR_ERR(file->ucontext); 85 86 ucontext->device = ibdev; 87 INIT_LIST_HEAD(&ucontext->pd_list); 88 INIT_LIST_HEAD(&ucontext->mr_list); 89 INIT_LIST_HEAD(&ucontext->mw_list); 90 INIT_LIST_HEAD(&ucontext->cq_list); 91 INIT_LIST_HEAD(&ucontext->qp_list); 92 INIT_LIST_HEAD(&ucontext->srq_list); 93 INIT_LIST_HEAD(&ucontext->ah_list); 94 95 resp.num_comp_vectors = file->device->num_comp_vectors; 96 97 filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd); 98 if (IS_ERR(filp)) { 99 ret = PTR_ERR(filp); 100 goto err_free; 101 } 102 103 if (copy_to_user((void __user *) (unsigned long) cmd.response, 104 &resp, sizeof resp)) { 105 ret = -EFAULT; 106 goto err_file; 107 } 108 109 file->async_file = filp->private_data; 110 111 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev, 112 ib_uverbs_event_handler); 113 ret = ib_register_event_handler(&file->event_handler); 114 if (ret) 115 goto err_file; 116 117 kref_get(&file->async_file->ref); 118 kref_get(&file->ref); 119 file->ucontext = ucontext; 120 121 fd_install(resp.async_fd, filp); 122 123 mutex_unlock(&file->mutex); 124 125 return in_len; 126 127 err_file: 128 put_unused_fd(resp.async_fd); 129 fput(filp); 130 131 err_free: 132 ibdev->dealloc_ucontext(ucontext); 133 134 err: 135 mutex_unlock(&file->mutex); 136 return ret; 137 } 138 139 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 140 const char __user *buf, 141 int in_len, int out_len) 142 { 143 struct ib_uverbs_query_device cmd; 144 struct ib_uverbs_query_device_resp resp; 145 struct ib_device_attr attr; 146 int ret; 147 148 if (out_len < sizeof resp) 149 return -ENOSPC; 150 151 if (copy_from_user(&cmd, buf, sizeof cmd)) 152 return -EFAULT; 153 154 ret = ib_query_device(file->device->ib_dev, &attr); 155 if (ret) 156 return ret; 157 158 memset(&resp, 0, sizeof resp); 159 160 resp.fw_ver = attr.fw_ver; 161 resp.node_guid = file->device->ib_dev->node_guid; 162 resp.sys_image_guid = attr.sys_image_guid; 163 resp.max_mr_size = attr.max_mr_size; 164 resp.page_size_cap = attr.page_size_cap; 165 resp.vendor_id = attr.vendor_id; 166 resp.vendor_part_id = attr.vendor_part_id; 167 resp.hw_ver = attr.hw_ver; 168 resp.max_qp = attr.max_qp; 169 resp.max_qp_wr = attr.max_qp_wr; 170 resp.device_cap_flags = attr.device_cap_flags; 171 resp.max_sge = attr.max_sge; 172 resp.max_sge_rd = attr.max_sge_rd; 173 resp.max_cq = attr.max_cq; 174 resp.max_cqe = attr.max_cqe; 175 resp.max_mr = attr.max_mr; 176 resp.max_pd = attr.max_pd; 177 resp.max_qp_rd_atom = attr.max_qp_rd_atom; 178 resp.max_ee_rd_atom = attr.max_ee_rd_atom; 179 resp.max_res_rd_atom = attr.max_res_rd_atom; 180 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; 181 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; 182 resp.atomic_cap = attr.atomic_cap; 183 resp.max_ee = attr.max_ee; 184 resp.max_rdd = attr.max_rdd; 185 resp.max_mw = attr.max_mw; 186 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; 187 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; 188 resp.max_mcast_grp = attr.max_mcast_grp; 189 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; 190 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; 191 resp.max_ah = attr.max_ah; 192 resp.max_fmr = attr.max_fmr; 193 resp.max_map_per_fmr = attr.max_map_per_fmr; 194 resp.max_srq = attr.max_srq; 195 resp.max_srq_wr = attr.max_srq_wr; 196 resp.max_srq_sge = attr.max_srq_sge; 197 resp.max_pkeys = attr.max_pkeys; 198 resp.local_ca_ack_delay = attr.local_ca_ack_delay; 199 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; 200 201 if (copy_to_user((void __user *) (unsigned long) cmd.response, 202 &resp, sizeof resp)) 203 return -EFAULT; 204 205 return in_len; 206 } 207 208 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 209 const char __user *buf, 210 int in_len, int out_len) 211 { 212 struct ib_uverbs_query_port cmd; 213 struct ib_uverbs_query_port_resp resp; 214 struct ib_port_attr attr; 215 int ret; 216 217 if (out_len < sizeof resp) 218 return -ENOSPC; 219 220 if (copy_from_user(&cmd, buf, sizeof cmd)) 221 return -EFAULT; 222 223 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr); 224 if (ret) 225 return ret; 226 227 memset(&resp, 0, sizeof resp); 228 229 resp.state = attr.state; 230 resp.max_mtu = attr.max_mtu; 231 resp.active_mtu = attr.active_mtu; 232 resp.gid_tbl_len = attr.gid_tbl_len; 233 resp.port_cap_flags = attr.port_cap_flags; 234 resp.max_msg_sz = attr.max_msg_sz; 235 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 236 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 237 resp.pkey_tbl_len = attr.pkey_tbl_len; 238 resp.lid = attr.lid; 239 resp.sm_lid = attr.sm_lid; 240 resp.lmc = attr.lmc; 241 resp.max_vl_num = attr.max_vl_num; 242 resp.sm_sl = attr.sm_sl; 243 resp.subnet_timeout = attr.subnet_timeout; 244 resp.init_type_reply = attr.init_type_reply; 245 resp.active_width = attr.active_width; 246 resp.active_speed = attr.active_speed; 247 resp.phys_state = attr.phys_state; 248 249 if (copy_to_user((void __user *) (unsigned long) cmd.response, 250 &resp, sizeof resp)) 251 return -EFAULT; 252 253 return in_len; 254 } 255 256 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 257 const char __user *buf, 258 int in_len, int out_len) 259 { 260 struct ib_uverbs_alloc_pd cmd; 261 struct ib_uverbs_alloc_pd_resp resp; 262 struct ib_udata udata; 263 struct ib_uobject *uobj; 264 struct ib_pd *pd; 265 int ret; 266 267 if (out_len < sizeof resp) 268 return -ENOSPC; 269 270 if (copy_from_user(&cmd, buf, sizeof cmd)) 271 return -EFAULT; 272 273 INIT_UDATA(&udata, buf + sizeof cmd, 274 (unsigned long) cmd.response + sizeof resp, 275 in_len - sizeof cmd, out_len - sizeof resp); 276 277 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 278 if (!uobj) 279 return -ENOMEM; 280 281 uobj->context = file->ucontext; 282 283 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 284 file->ucontext, &udata); 285 if (IS_ERR(pd)) { 286 ret = PTR_ERR(pd); 287 goto err; 288 } 289 290 pd->device = file->device->ib_dev; 291 pd->uobject = uobj; 292 atomic_set(&pd->usecnt, 0); 293 294 mutex_lock(&ib_uverbs_idr_mutex); 295 296 retry: 297 if (!idr_pre_get(&ib_uverbs_pd_idr, GFP_KERNEL)) { 298 ret = -ENOMEM; 299 goto err_up; 300 } 301 302 ret = idr_get_new(&ib_uverbs_pd_idr, pd, &uobj->id); 303 304 if (ret == -EAGAIN) 305 goto retry; 306 if (ret) 307 goto err_up; 308 309 memset(&resp, 0, sizeof resp); 310 resp.pd_handle = uobj->id; 311 312 if (copy_to_user((void __user *) (unsigned long) cmd.response, 313 &resp, sizeof resp)) { 314 ret = -EFAULT; 315 goto err_idr; 316 } 317 318 mutex_lock(&file->mutex); 319 list_add_tail(&uobj->list, &file->ucontext->pd_list); 320 mutex_unlock(&file->mutex); 321 322 mutex_unlock(&ib_uverbs_idr_mutex); 323 324 return in_len; 325 326 err_idr: 327 idr_remove(&ib_uverbs_pd_idr, uobj->id); 328 329 err_up: 330 mutex_unlock(&ib_uverbs_idr_mutex); 331 ib_dealloc_pd(pd); 332 333 err: 334 kfree(uobj); 335 return ret; 336 } 337 338 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 339 const char __user *buf, 340 int in_len, int out_len) 341 { 342 struct ib_uverbs_dealloc_pd cmd; 343 struct ib_pd *pd; 344 struct ib_uobject *uobj; 345 int ret = -EINVAL; 346 347 if (copy_from_user(&cmd, buf, sizeof cmd)) 348 return -EFAULT; 349 350 mutex_lock(&ib_uverbs_idr_mutex); 351 352 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 353 if (!pd || pd->uobject->context != file->ucontext) 354 goto out; 355 356 uobj = pd->uobject; 357 358 ret = ib_dealloc_pd(pd); 359 if (ret) 360 goto out; 361 362 idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle); 363 364 mutex_lock(&file->mutex); 365 list_del(&uobj->list); 366 mutex_unlock(&file->mutex); 367 368 kfree(uobj); 369 370 out: 371 mutex_unlock(&ib_uverbs_idr_mutex); 372 373 return ret ? ret : in_len; 374 } 375 376 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 377 const char __user *buf, int in_len, 378 int out_len) 379 { 380 struct ib_uverbs_reg_mr cmd; 381 struct ib_uverbs_reg_mr_resp resp; 382 struct ib_udata udata; 383 struct ib_umem_object *obj; 384 struct ib_pd *pd; 385 struct ib_mr *mr; 386 int ret; 387 388 if (out_len < sizeof resp) 389 return -ENOSPC; 390 391 if (copy_from_user(&cmd, buf, sizeof cmd)) 392 return -EFAULT; 393 394 INIT_UDATA(&udata, buf + sizeof cmd, 395 (unsigned long) cmd.response + sizeof resp, 396 in_len - sizeof cmd, out_len - sizeof resp); 397 398 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 399 return -EINVAL; 400 401 /* 402 * Local write permission is required if remote write or 403 * remote atomic permission is also requested. 404 */ 405 if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 406 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE)) 407 return -EINVAL; 408 409 obj = kmalloc(sizeof *obj, GFP_KERNEL); 410 if (!obj) 411 return -ENOMEM; 412 413 obj->uobject.context = file->ucontext; 414 415 /* 416 * We ask for writable memory if any access flags other than 417 * "remote read" are set. "Local write" and "remote write" 418 * obviously require write access. "Remote atomic" can do 419 * things like fetch and add, which will modify memory, and 420 * "MW bind" can change permissions by binding a window. 421 */ 422 ret = ib_umem_get(file->device->ib_dev, &obj->umem, 423 (void *) (unsigned long) cmd.start, cmd.length, 424 !!(cmd.access_flags & ~IB_ACCESS_REMOTE_READ)); 425 if (ret) 426 goto err_free; 427 428 obj->umem.virt_base = cmd.hca_va; 429 430 mutex_lock(&ib_uverbs_idr_mutex); 431 432 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 433 if (!pd || pd->uobject->context != file->ucontext) { 434 ret = -EINVAL; 435 goto err_up; 436 } 437 438 if (!pd->device->reg_user_mr) { 439 ret = -ENOSYS; 440 goto err_up; 441 } 442 443 mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata); 444 if (IS_ERR(mr)) { 445 ret = PTR_ERR(mr); 446 goto err_up; 447 } 448 449 mr->device = pd->device; 450 mr->pd = pd; 451 mr->uobject = &obj->uobject; 452 atomic_inc(&pd->usecnt); 453 atomic_set(&mr->usecnt, 0); 454 455 memset(&resp, 0, sizeof resp); 456 resp.lkey = mr->lkey; 457 resp.rkey = mr->rkey; 458 459 retry: 460 if (!idr_pre_get(&ib_uverbs_mr_idr, GFP_KERNEL)) { 461 ret = -ENOMEM; 462 goto err_unreg; 463 } 464 465 ret = idr_get_new(&ib_uverbs_mr_idr, mr, &obj->uobject.id); 466 467 if (ret == -EAGAIN) 468 goto retry; 469 if (ret) 470 goto err_unreg; 471 472 resp.mr_handle = obj->uobject.id; 473 474 if (copy_to_user((void __user *) (unsigned long) cmd.response, 475 &resp, sizeof resp)) { 476 ret = -EFAULT; 477 goto err_idr; 478 } 479 480 mutex_lock(&file->mutex); 481 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list); 482 mutex_unlock(&file->mutex); 483 484 mutex_unlock(&ib_uverbs_idr_mutex); 485 486 return in_len; 487 488 err_idr: 489 idr_remove(&ib_uverbs_mr_idr, obj->uobject.id); 490 491 err_unreg: 492 ib_dereg_mr(mr); 493 atomic_dec(&pd->usecnt); 494 495 err_up: 496 mutex_unlock(&ib_uverbs_idr_mutex); 497 498 ib_umem_release(file->device->ib_dev, &obj->umem); 499 500 err_free: 501 kfree(obj); 502 return ret; 503 } 504 505 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 506 const char __user *buf, int in_len, 507 int out_len) 508 { 509 struct ib_uverbs_dereg_mr cmd; 510 struct ib_mr *mr; 511 struct ib_umem_object *memobj; 512 int ret = -EINVAL; 513 514 if (copy_from_user(&cmd, buf, sizeof cmd)) 515 return -EFAULT; 516 517 mutex_lock(&ib_uverbs_idr_mutex); 518 519 mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle); 520 if (!mr || mr->uobject->context != file->ucontext) 521 goto out; 522 523 memobj = container_of(mr->uobject, struct ib_umem_object, uobject); 524 525 ret = ib_dereg_mr(mr); 526 if (ret) 527 goto out; 528 529 idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle); 530 531 mutex_lock(&file->mutex); 532 list_del(&memobj->uobject.list); 533 mutex_unlock(&file->mutex); 534 535 ib_umem_release(file->device->ib_dev, &memobj->umem); 536 kfree(memobj); 537 538 out: 539 mutex_unlock(&ib_uverbs_idr_mutex); 540 541 return ret ? ret : in_len; 542 } 543 544 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 545 const char __user *buf, int in_len, 546 int out_len) 547 { 548 struct ib_uverbs_create_comp_channel cmd; 549 struct ib_uverbs_create_comp_channel_resp resp; 550 struct file *filp; 551 552 if (out_len < sizeof resp) 553 return -ENOSPC; 554 555 if (copy_from_user(&cmd, buf, sizeof cmd)) 556 return -EFAULT; 557 558 filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd); 559 if (IS_ERR(filp)) 560 return PTR_ERR(filp); 561 562 if (copy_to_user((void __user *) (unsigned long) cmd.response, 563 &resp, sizeof resp)) { 564 put_unused_fd(resp.fd); 565 fput(filp); 566 return -EFAULT; 567 } 568 569 fd_install(resp.fd, filp); 570 return in_len; 571 } 572 573 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 574 const char __user *buf, int in_len, 575 int out_len) 576 { 577 struct ib_uverbs_create_cq cmd; 578 struct ib_uverbs_create_cq_resp resp; 579 struct ib_udata udata; 580 struct ib_ucq_object *uobj; 581 struct ib_uverbs_event_file *ev_file = NULL; 582 struct ib_cq *cq; 583 int ret; 584 585 if (out_len < sizeof resp) 586 return -ENOSPC; 587 588 if (copy_from_user(&cmd, buf, sizeof cmd)) 589 return -EFAULT; 590 591 INIT_UDATA(&udata, buf + sizeof cmd, 592 (unsigned long) cmd.response + sizeof resp, 593 in_len - sizeof cmd, out_len - sizeof resp); 594 595 if (cmd.comp_vector >= file->device->num_comp_vectors) 596 return -EINVAL; 597 598 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 599 if (!uobj) 600 return -ENOMEM; 601 602 if (cmd.comp_channel >= 0) { 603 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel); 604 if (!ev_file) { 605 ret = -EINVAL; 606 goto err; 607 } 608 } 609 610 uobj->uobject.user_handle = cmd.user_handle; 611 uobj->uobject.context = file->ucontext; 612 uobj->uverbs_file = file; 613 uobj->comp_events_reported = 0; 614 uobj->async_events_reported = 0; 615 INIT_LIST_HEAD(&uobj->comp_list); 616 INIT_LIST_HEAD(&uobj->async_list); 617 618 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 619 file->ucontext, &udata); 620 if (IS_ERR(cq)) { 621 ret = PTR_ERR(cq); 622 goto err; 623 } 624 625 cq->device = file->device->ib_dev; 626 cq->uobject = &uobj->uobject; 627 cq->comp_handler = ib_uverbs_comp_handler; 628 cq->event_handler = ib_uverbs_cq_event_handler; 629 cq->cq_context = ev_file; 630 atomic_set(&cq->usecnt, 0); 631 632 mutex_lock(&ib_uverbs_idr_mutex); 633 634 retry: 635 if (!idr_pre_get(&ib_uverbs_cq_idr, GFP_KERNEL)) { 636 ret = -ENOMEM; 637 goto err_up; 638 } 639 640 ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->uobject.id); 641 642 if (ret == -EAGAIN) 643 goto retry; 644 if (ret) 645 goto err_up; 646 647 memset(&resp, 0, sizeof resp); 648 resp.cq_handle = uobj->uobject.id; 649 resp.cqe = cq->cqe; 650 651 if (copy_to_user((void __user *) (unsigned long) cmd.response, 652 &resp, sizeof resp)) { 653 ret = -EFAULT; 654 goto err_idr; 655 } 656 657 mutex_lock(&file->mutex); 658 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list); 659 mutex_unlock(&file->mutex); 660 661 mutex_unlock(&ib_uverbs_idr_mutex); 662 663 return in_len; 664 665 err_idr: 666 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id); 667 668 err_up: 669 mutex_unlock(&ib_uverbs_idr_mutex); 670 ib_destroy_cq(cq); 671 672 err: 673 if (ev_file) 674 ib_uverbs_release_ucq(file, ev_file, uobj); 675 kfree(uobj); 676 return ret; 677 } 678 679 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 680 const char __user *buf, int in_len, 681 int out_len) 682 { 683 struct ib_uverbs_resize_cq cmd; 684 struct ib_uverbs_resize_cq_resp resp; 685 struct ib_udata udata; 686 struct ib_cq *cq; 687 int ret = -EINVAL; 688 689 if (copy_from_user(&cmd, buf, sizeof cmd)) 690 return -EFAULT; 691 692 INIT_UDATA(&udata, buf + sizeof cmd, 693 (unsigned long) cmd.response + sizeof resp, 694 in_len - sizeof cmd, out_len - sizeof resp); 695 696 mutex_lock(&ib_uverbs_idr_mutex); 697 698 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 699 if (!cq || cq->uobject->context != file->ucontext || !cq->device->resize_cq) 700 goto out; 701 702 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 703 if (ret) 704 goto out; 705 706 memset(&resp, 0, sizeof resp); 707 resp.cqe = cq->cqe; 708 709 if (copy_to_user((void __user *) (unsigned long) cmd.response, 710 &resp, sizeof resp)) 711 ret = -EFAULT; 712 713 out: 714 mutex_unlock(&ib_uverbs_idr_mutex); 715 716 return ret ? ret : in_len; 717 } 718 719 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 720 const char __user *buf, int in_len, 721 int out_len) 722 { 723 struct ib_uverbs_poll_cq cmd; 724 struct ib_uverbs_poll_cq_resp *resp; 725 struct ib_cq *cq; 726 struct ib_wc *wc; 727 int ret = 0; 728 int i; 729 int rsize; 730 731 if (copy_from_user(&cmd, buf, sizeof cmd)) 732 return -EFAULT; 733 734 wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL); 735 if (!wc) 736 return -ENOMEM; 737 738 rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc); 739 resp = kmalloc(rsize, GFP_KERNEL); 740 if (!resp) { 741 ret = -ENOMEM; 742 goto out_wc; 743 } 744 745 mutex_lock(&ib_uverbs_idr_mutex); 746 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 747 if (!cq || cq->uobject->context != file->ucontext) { 748 ret = -EINVAL; 749 goto out; 750 } 751 752 resp->count = ib_poll_cq(cq, cmd.ne, wc); 753 754 for (i = 0; i < resp->count; i++) { 755 resp->wc[i].wr_id = wc[i].wr_id; 756 resp->wc[i].status = wc[i].status; 757 resp->wc[i].opcode = wc[i].opcode; 758 resp->wc[i].vendor_err = wc[i].vendor_err; 759 resp->wc[i].byte_len = wc[i].byte_len; 760 resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data; 761 resp->wc[i].qp_num = wc[i].qp_num; 762 resp->wc[i].src_qp = wc[i].src_qp; 763 resp->wc[i].wc_flags = wc[i].wc_flags; 764 resp->wc[i].pkey_index = wc[i].pkey_index; 765 resp->wc[i].slid = wc[i].slid; 766 resp->wc[i].sl = wc[i].sl; 767 resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits; 768 resp->wc[i].port_num = wc[i].port_num; 769 } 770 771 if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize)) 772 ret = -EFAULT; 773 774 out: 775 mutex_unlock(&ib_uverbs_idr_mutex); 776 kfree(resp); 777 778 out_wc: 779 kfree(wc); 780 return ret ? ret : in_len; 781 } 782 783 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 784 const char __user *buf, int in_len, 785 int out_len) 786 { 787 struct ib_uverbs_req_notify_cq cmd; 788 struct ib_cq *cq; 789 int ret = -EINVAL; 790 791 if (copy_from_user(&cmd, buf, sizeof cmd)) 792 return -EFAULT; 793 794 mutex_lock(&ib_uverbs_idr_mutex); 795 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 796 if (cq && cq->uobject->context == file->ucontext) { 797 ib_req_notify_cq(cq, cmd.solicited_only ? 798 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 799 ret = in_len; 800 } 801 mutex_unlock(&ib_uverbs_idr_mutex); 802 803 return ret; 804 } 805 806 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 807 const char __user *buf, int in_len, 808 int out_len) 809 { 810 struct ib_uverbs_destroy_cq cmd; 811 struct ib_uverbs_destroy_cq_resp resp; 812 struct ib_cq *cq; 813 struct ib_ucq_object *uobj; 814 struct ib_uverbs_event_file *ev_file; 815 u64 user_handle; 816 int ret = -EINVAL; 817 818 if (copy_from_user(&cmd, buf, sizeof cmd)) 819 return -EFAULT; 820 821 memset(&resp, 0, sizeof resp); 822 823 mutex_lock(&ib_uverbs_idr_mutex); 824 825 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 826 if (!cq || cq->uobject->context != file->ucontext) 827 goto out; 828 829 user_handle = cq->uobject->user_handle; 830 uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); 831 ev_file = cq->cq_context; 832 833 ret = ib_destroy_cq(cq); 834 if (ret) 835 goto out; 836 837 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle); 838 839 mutex_lock(&file->mutex); 840 list_del(&uobj->uobject.list); 841 mutex_unlock(&file->mutex); 842 843 ib_uverbs_release_ucq(file, ev_file, uobj); 844 845 resp.comp_events_reported = uobj->comp_events_reported; 846 resp.async_events_reported = uobj->async_events_reported; 847 848 kfree(uobj); 849 850 if (copy_to_user((void __user *) (unsigned long) cmd.response, 851 &resp, sizeof resp)) 852 ret = -EFAULT; 853 854 out: 855 mutex_unlock(&ib_uverbs_idr_mutex); 856 857 return ret ? ret : in_len; 858 } 859 860 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 861 const char __user *buf, int in_len, 862 int out_len) 863 { 864 struct ib_uverbs_create_qp cmd; 865 struct ib_uverbs_create_qp_resp resp; 866 struct ib_udata udata; 867 struct ib_uqp_object *uobj; 868 struct ib_pd *pd; 869 struct ib_cq *scq, *rcq; 870 struct ib_srq *srq; 871 struct ib_qp *qp; 872 struct ib_qp_init_attr attr; 873 int ret; 874 875 if (out_len < sizeof resp) 876 return -ENOSPC; 877 878 if (copy_from_user(&cmd, buf, sizeof cmd)) 879 return -EFAULT; 880 881 INIT_UDATA(&udata, buf + sizeof cmd, 882 (unsigned long) cmd.response + sizeof resp, 883 in_len - sizeof cmd, out_len - sizeof resp); 884 885 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 886 if (!uobj) 887 return -ENOMEM; 888 889 mutex_lock(&ib_uverbs_idr_mutex); 890 891 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 892 scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle); 893 rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle); 894 srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL; 895 896 if (!pd || pd->uobject->context != file->ucontext || 897 !scq || scq->uobject->context != file->ucontext || 898 !rcq || rcq->uobject->context != file->ucontext || 899 (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) { 900 ret = -EINVAL; 901 goto err_up; 902 } 903 904 attr.event_handler = ib_uverbs_qp_event_handler; 905 attr.qp_context = file; 906 attr.send_cq = scq; 907 attr.recv_cq = rcq; 908 attr.srq = srq; 909 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 910 attr.qp_type = cmd.qp_type; 911 912 attr.cap.max_send_wr = cmd.max_send_wr; 913 attr.cap.max_recv_wr = cmd.max_recv_wr; 914 attr.cap.max_send_sge = cmd.max_send_sge; 915 attr.cap.max_recv_sge = cmd.max_recv_sge; 916 attr.cap.max_inline_data = cmd.max_inline_data; 917 918 uobj->uevent.uobject.user_handle = cmd.user_handle; 919 uobj->uevent.uobject.context = file->ucontext; 920 uobj->uevent.events_reported = 0; 921 INIT_LIST_HEAD(&uobj->uevent.event_list); 922 INIT_LIST_HEAD(&uobj->mcast_list); 923 924 qp = pd->device->create_qp(pd, &attr, &udata); 925 if (IS_ERR(qp)) { 926 ret = PTR_ERR(qp); 927 goto err_up; 928 } 929 930 qp->device = pd->device; 931 qp->pd = pd; 932 qp->send_cq = attr.send_cq; 933 qp->recv_cq = attr.recv_cq; 934 qp->srq = attr.srq; 935 qp->uobject = &uobj->uevent.uobject; 936 qp->event_handler = attr.event_handler; 937 qp->qp_context = attr.qp_context; 938 qp->qp_type = attr.qp_type; 939 atomic_inc(&pd->usecnt); 940 atomic_inc(&attr.send_cq->usecnt); 941 atomic_inc(&attr.recv_cq->usecnt); 942 if (attr.srq) 943 atomic_inc(&attr.srq->usecnt); 944 945 memset(&resp, 0, sizeof resp); 946 resp.qpn = qp->qp_num; 947 948 retry: 949 if (!idr_pre_get(&ib_uverbs_qp_idr, GFP_KERNEL)) { 950 ret = -ENOMEM; 951 goto err_destroy; 952 } 953 954 ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uevent.uobject.id); 955 956 if (ret == -EAGAIN) 957 goto retry; 958 if (ret) 959 goto err_destroy; 960 961 resp.qp_handle = uobj->uevent.uobject.id; 962 resp.max_recv_sge = attr.cap.max_recv_sge; 963 resp.max_send_sge = attr.cap.max_send_sge; 964 resp.max_recv_wr = attr.cap.max_recv_wr; 965 resp.max_send_wr = attr.cap.max_send_wr; 966 resp.max_inline_data = attr.cap.max_inline_data; 967 968 if (copy_to_user((void __user *) (unsigned long) cmd.response, 969 &resp, sizeof resp)) { 970 ret = -EFAULT; 971 goto err_idr; 972 } 973 974 mutex_lock(&file->mutex); 975 list_add_tail(&uobj->uevent.uobject.list, &file->ucontext->qp_list); 976 mutex_unlock(&file->mutex); 977 978 mutex_unlock(&ib_uverbs_idr_mutex); 979 980 return in_len; 981 982 err_idr: 983 idr_remove(&ib_uverbs_qp_idr, uobj->uevent.uobject.id); 984 985 err_destroy: 986 ib_destroy_qp(qp); 987 atomic_dec(&pd->usecnt); 988 atomic_dec(&attr.send_cq->usecnt); 989 atomic_dec(&attr.recv_cq->usecnt); 990 if (attr.srq) 991 atomic_dec(&attr.srq->usecnt); 992 993 err_up: 994 mutex_unlock(&ib_uverbs_idr_mutex); 995 996 kfree(uobj); 997 return ret; 998 } 999 1000 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 1001 const char __user *buf, int in_len, 1002 int out_len) 1003 { 1004 struct ib_uverbs_query_qp cmd; 1005 struct ib_uverbs_query_qp_resp resp; 1006 struct ib_qp *qp; 1007 struct ib_qp_attr *attr; 1008 struct ib_qp_init_attr *init_attr; 1009 int ret; 1010 1011 if (copy_from_user(&cmd, buf, sizeof cmd)) 1012 return -EFAULT; 1013 1014 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1015 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 1016 if (!attr || !init_attr) { 1017 ret = -ENOMEM; 1018 goto out; 1019 } 1020 1021 mutex_lock(&ib_uverbs_idr_mutex); 1022 1023 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1024 if (qp && qp->uobject->context == file->ucontext) 1025 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 1026 else 1027 ret = -EINVAL; 1028 1029 mutex_unlock(&ib_uverbs_idr_mutex); 1030 1031 if (ret) 1032 goto out; 1033 1034 memset(&resp, 0, sizeof resp); 1035 1036 resp.qp_state = attr->qp_state; 1037 resp.cur_qp_state = attr->cur_qp_state; 1038 resp.path_mtu = attr->path_mtu; 1039 resp.path_mig_state = attr->path_mig_state; 1040 resp.qkey = attr->qkey; 1041 resp.rq_psn = attr->rq_psn; 1042 resp.sq_psn = attr->sq_psn; 1043 resp.dest_qp_num = attr->dest_qp_num; 1044 resp.qp_access_flags = attr->qp_access_flags; 1045 resp.pkey_index = attr->pkey_index; 1046 resp.alt_pkey_index = attr->alt_pkey_index; 1047 resp.en_sqd_async_notify = attr->en_sqd_async_notify; 1048 resp.max_rd_atomic = attr->max_rd_atomic; 1049 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 1050 resp.min_rnr_timer = attr->min_rnr_timer; 1051 resp.port_num = attr->port_num; 1052 resp.timeout = attr->timeout; 1053 resp.retry_cnt = attr->retry_cnt; 1054 resp.rnr_retry = attr->rnr_retry; 1055 resp.alt_port_num = attr->alt_port_num; 1056 resp.alt_timeout = attr->alt_timeout; 1057 1058 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 1059 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 1060 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 1061 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 1062 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 1063 resp.dest.dlid = attr->ah_attr.dlid; 1064 resp.dest.sl = attr->ah_attr.sl; 1065 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 1066 resp.dest.static_rate = attr->ah_attr.static_rate; 1067 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 1068 resp.dest.port_num = attr->ah_attr.port_num; 1069 1070 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 1071 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 1072 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 1073 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 1074 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 1075 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 1076 resp.alt_dest.sl = attr->alt_ah_attr.sl; 1077 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 1078 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 1079 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 1080 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 1081 1082 resp.max_send_wr = init_attr->cap.max_send_wr; 1083 resp.max_recv_wr = init_attr->cap.max_recv_wr; 1084 resp.max_send_sge = init_attr->cap.max_send_sge; 1085 resp.max_recv_sge = init_attr->cap.max_recv_sge; 1086 resp.max_inline_data = init_attr->cap.max_inline_data; 1087 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1088 1089 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1090 &resp, sizeof resp)) 1091 ret = -EFAULT; 1092 1093 out: 1094 kfree(attr); 1095 kfree(init_attr); 1096 1097 return ret ? ret : in_len; 1098 } 1099 1100 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 1101 const char __user *buf, int in_len, 1102 int out_len) 1103 { 1104 struct ib_uverbs_modify_qp cmd; 1105 struct ib_qp *qp; 1106 struct ib_qp_attr *attr; 1107 int ret; 1108 1109 if (copy_from_user(&cmd, buf, sizeof cmd)) 1110 return -EFAULT; 1111 1112 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1113 if (!attr) 1114 return -ENOMEM; 1115 1116 mutex_lock(&ib_uverbs_idr_mutex); 1117 1118 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1119 if (!qp || qp->uobject->context != file->ucontext) { 1120 ret = -EINVAL; 1121 goto out; 1122 } 1123 1124 attr->qp_state = cmd.qp_state; 1125 attr->cur_qp_state = cmd.cur_qp_state; 1126 attr->path_mtu = cmd.path_mtu; 1127 attr->path_mig_state = cmd.path_mig_state; 1128 attr->qkey = cmd.qkey; 1129 attr->rq_psn = cmd.rq_psn; 1130 attr->sq_psn = cmd.sq_psn; 1131 attr->dest_qp_num = cmd.dest_qp_num; 1132 attr->qp_access_flags = cmd.qp_access_flags; 1133 attr->pkey_index = cmd.pkey_index; 1134 attr->alt_pkey_index = cmd.alt_pkey_index; 1135 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 1136 attr->max_rd_atomic = cmd.max_rd_atomic; 1137 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 1138 attr->min_rnr_timer = cmd.min_rnr_timer; 1139 attr->port_num = cmd.port_num; 1140 attr->timeout = cmd.timeout; 1141 attr->retry_cnt = cmd.retry_cnt; 1142 attr->rnr_retry = cmd.rnr_retry; 1143 attr->alt_port_num = cmd.alt_port_num; 1144 attr->alt_timeout = cmd.alt_timeout; 1145 1146 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 1147 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 1148 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 1149 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 1150 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 1151 attr->ah_attr.dlid = cmd.dest.dlid; 1152 attr->ah_attr.sl = cmd.dest.sl; 1153 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 1154 attr->ah_attr.static_rate = cmd.dest.static_rate; 1155 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 1156 attr->ah_attr.port_num = cmd.dest.port_num; 1157 1158 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 1159 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 1160 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 1161 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 1162 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 1163 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 1164 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 1165 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 1166 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 1167 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 1168 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 1169 1170 ret = ib_modify_qp(qp, attr, cmd.attr_mask); 1171 if (ret) 1172 goto out; 1173 1174 ret = in_len; 1175 1176 out: 1177 mutex_unlock(&ib_uverbs_idr_mutex); 1178 kfree(attr); 1179 1180 return ret; 1181 } 1182 1183 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 1184 const char __user *buf, int in_len, 1185 int out_len) 1186 { 1187 struct ib_uverbs_destroy_qp cmd; 1188 struct ib_uverbs_destroy_qp_resp resp; 1189 struct ib_qp *qp; 1190 struct ib_uqp_object *uobj; 1191 int ret = -EINVAL; 1192 1193 if (copy_from_user(&cmd, buf, sizeof cmd)) 1194 return -EFAULT; 1195 1196 memset(&resp, 0, sizeof resp); 1197 1198 mutex_lock(&ib_uverbs_idr_mutex); 1199 1200 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1201 if (!qp || qp->uobject->context != file->ucontext) 1202 goto out; 1203 1204 uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1205 1206 if (!list_empty(&uobj->mcast_list)) { 1207 ret = -EBUSY; 1208 goto out; 1209 } 1210 1211 ret = ib_destroy_qp(qp); 1212 if (ret) 1213 goto out; 1214 1215 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 1216 1217 mutex_lock(&file->mutex); 1218 list_del(&uobj->uevent.uobject.list); 1219 mutex_unlock(&file->mutex); 1220 1221 ib_uverbs_release_uevent(file, &uobj->uevent); 1222 1223 resp.events_reported = uobj->uevent.events_reported; 1224 1225 kfree(uobj); 1226 1227 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1228 &resp, sizeof resp)) 1229 ret = -EFAULT; 1230 1231 out: 1232 mutex_unlock(&ib_uverbs_idr_mutex); 1233 1234 return ret ? ret : in_len; 1235 } 1236 1237 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 1238 const char __user *buf, int in_len, 1239 int out_len) 1240 { 1241 struct ib_uverbs_post_send cmd; 1242 struct ib_uverbs_post_send_resp resp; 1243 struct ib_uverbs_send_wr *user_wr; 1244 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 1245 struct ib_qp *qp; 1246 int i, sg_ind; 1247 ssize_t ret = -EINVAL; 1248 1249 if (copy_from_user(&cmd, buf, sizeof cmd)) 1250 return -EFAULT; 1251 1252 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 1253 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 1254 return -EINVAL; 1255 1256 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 1257 return -EINVAL; 1258 1259 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 1260 if (!user_wr) 1261 return -ENOMEM; 1262 1263 mutex_lock(&ib_uverbs_idr_mutex); 1264 1265 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1266 if (!qp || qp->uobject->context != file->ucontext) 1267 goto out; 1268 1269 sg_ind = 0; 1270 last = NULL; 1271 for (i = 0; i < cmd.wr_count; ++i) { 1272 if (copy_from_user(user_wr, 1273 buf + sizeof cmd + i * cmd.wqe_size, 1274 cmd.wqe_size)) { 1275 ret = -EFAULT; 1276 goto out; 1277 } 1278 1279 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 1280 ret = -EINVAL; 1281 goto out; 1282 } 1283 1284 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 1285 user_wr->num_sge * sizeof (struct ib_sge), 1286 GFP_KERNEL); 1287 if (!next) { 1288 ret = -ENOMEM; 1289 goto out; 1290 } 1291 1292 if (!last) 1293 wr = next; 1294 else 1295 last->next = next; 1296 last = next; 1297 1298 next->next = NULL; 1299 next->wr_id = user_wr->wr_id; 1300 next->num_sge = user_wr->num_sge; 1301 next->opcode = user_wr->opcode; 1302 next->send_flags = user_wr->send_flags; 1303 next->imm_data = (__be32 __force) user_wr->imm_data; 1304 1305 if (qp->qp_type == IB_QPT_UD) { 1306 next->wr.ud.ah = idr_find(&ib_uverbs_ah_idr, 1307 user_wr->wr.ud.ah); 1308 if (!next->wr.ud.ah) { 1309 ret = -EINVAL; 1310 goto out; 1311 } 1312 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; 1313 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 1314 } else { 1315 switch (next->opcode) { 1316 case IB_WR_RDMA_WRITE: 1317 case IB_WR_RDMA_WRITE_WITH_IMM: 1318 case IB_WR_RDMA_READ: 1319 next->wr.rdma.remote_addr = 1320 user_wr->wr.rdma.remote_addr; 1321 next->wr.rdma.rkey = 1322 user_wr->wr.rdma.rkey; 1323 break; 1324 case IB_WR_ATOMIC_CMP_AND_SWP: 1325 case IB_WR_ATOMIC_FETCH_AND_ADD: 1326 next->wr.atomic.remote_addr = 1327 user_wr->wr.atomic.remote_addr; 1328 next->wr.atomic.compare_add = 1329 user_wr->wr.atomic.compare_add; 1330 next->wr.atomic.swap = user_wr->wr.atomic.swap; 1331 next->wr.atomic.rkey = user_wr->wr.atomic.rkey; 1332 break; 1333 default: 1334 break; 1335 } 1336 } 1337 1338 if (next->num_sge) { 1339 next->sg_list = (void *) next + 1340 ALIGN(sizeof *next, sizeof (struct ib_sge)); 1341 if (copy_from_user(next->sg_list, 1342 buf + sizeof cmd + 1343 cmd.wr_count * cmd.wqe_size + 1344 sg_ind * sizeof (struct ib_sge), 1345 next->num_sge * sizeof (struct ib_sge))) { 1346 ret = -EFAULT; 1347 goto out; 1348 } 1349 sg_ind += next->num_sge; 1350 } else 1351 next->sg_list = NULL; 1352 } 1353 1354 resp.bad_wr = 0; 1355 ret = qp->device->post_send(qp, wr, &bad_wr); 1356 if (ret) 1357 for (next = wr; next; next = next->next) { 1358 ++resp.bad_wr; 1359 if (next == bad_wr) 1360 break; 1361 } 1362 1363 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1364 &resp, sizeof resp)) 1365 ret = -EFAULT; 1366 1367 out: 1368 mutex_unlock(&ib_uverbs_idr_mutex); 1369 1370 while (wr) { 1371 next = wr->next; 1372 kfree(wr); 1373 wr = next; 1374 } 1375 1376 kfree(user_wr); 1377 1378 return ret ? ret : in_len; 1379 } 1380 1381 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 1382 int in_len, 1383 u32 wr_count, 1384 u32 sge_count, 1385 u32 wqe_size) 1386 { 1387 struct ib_uverbs_recv_wr *user_wr; 1388 struct ib_recv_wr *wr = NULL, *last, *next; 1389 int sg_ind; 1390 int i; 1391 int ret; 1392 1393 if (in_len < wqe_size * wr_count + 1394 sge_count * sizeof (struct ib_uverbs_sge)) 1395 return ERR_PTR(-EINVAL); 1396 1397 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 1398 return ERR_PTR(-EINVAL); 1399 1400 user_wr = kmalloc(wqe_size, GFP_KERNEL); 1401 if (!user_wr) 1402 return ERR_PTR(-ENOMEM); 1403 1404 sg_ind = 0; 1405 last = NULL; 1406 for (i = 0; i < wr_count; ++i) { 1407 if (copy_from_user(user_wr, buf + i * wqe_size, 1408 wqe_size)) { 1409 ret = -EFAULT; 1410 goto err; 1411 } 1412 1413 if (user_wr->num_sge + sg_ind > sge_count) { 1414 ret = -EINVAL; 1415 goto err; 1416 } 1417 1418 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 1419 user_wr->num_sge * sizeof (struct ib_sge), 1420 GFP_KERNEL); 1421 if (!next) { 1422 ret = -ENOMEM; 1423 goto err; 1424 } 1425 1426 if (!last) 1427 wr = next; 1428 else 1429 last->next = next; 1430 last = next; 1431 1432 next->next = NULL; 1433 next->wr_id = user_wr->wr_id; 1434 next->num_sge = user_wr->num_sge; 1435 1436 if (next->num_sge) { 1437 next->sg_list = (void *) next + 1438 ALIGN(sizeof *next, sizeof (struct ib_sge)); 1439 if (copy_from_user(next->sg_list, 1440 buf + wr_count * wqe_size + 1441 sg_ind * sizeof (struct ib_sge), 1442 next->num_sge * sizeof (struct ib_sge))) { 1443 ret = -EFAULT; 1444 goto err; 1445 } 1446 sg_ind += next->num_sge; 1447 } else 1448 next->sg_list = NULL; 1449 } 1450 1451 kfree(user_wr); 1452 return wr; 1453 1454 err: 1455 kfree(user_wr); 1456 1457 while (wr) { 1458 next = wr->next; 1459 kfree(wr); 1460 wr = next; 1461 } 1462 1463 return ERR_PTR(ret); 1464 } 1465 1466 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 1467 const char __user *buf, int in_len, 1468 int out_len) 1469 { 1470 struct ib_uverbs_post_recv cmd; 1471 struct ib_uverbs_post_recv_resp resp; 1472 struct ib_recv_wr *wr, *next, *bad_wr; 1473 struct ib_qp *qp; 1474 ssize_t ret = -EINVAL; 1475 1476 if (copy_from_user(&cmd, buf, sizeof cmd)) 1477 return -EFAULT; 1478 1479 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 1480 in_len - sizeof cmd, cmd.wr_count, 1481 cmd.sge_count, cmd.wqe_size); 1482 if (IS_ERR(wr)) 1483 return PTR_ERR(wr); 1484 1485 mutex_lock(&ib_uverbs_idr_mutex); 1486 1487 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1488 if (!qp || qp->uobject->context != file->ucontext) 1489 goto out; 1490 1491 resp.bad_wr = 0; 1492 ret = qp->device->post_recv(qp, wr, &bad_wr); 1493 if (ret) 1494 for (next = wr; next; next = next->next) { 1495 ++resp.bad_wr; 1496 if (next == bad_wr) 1497 break; 1498 } 1499 1500 1501 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1502 &resp, sizeof resp)) 1503 ret = -EFAULT; 1504 1505 out: 1506 mutex_unlock(&ib_uverbs_idr_mutex); 1507 1508 while (wr) { 1509 next = wr->next; 1510 kfree(wr); 1511 wr = next; 1512 } 1513 1514 return ret ? ret : in_len; 1515 } 1516 1517 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 1518 const char __user *buf, int in_len, 1519 int out_len) 1520 { 1521 struct ib_uverbs_post_srq_recv cmd; 1522 struct ib_uverbs_post_srq_recv_resp resp; 1523 struct ib_recv_wr *wr, *next, *bad_wr; 1524 struct ib_srq *srq; 1525 ssize_t ret = -EINVAL; 1526 1527 if (copy_from_user(&cmd, buf, sizeof cmd)) 1528 return -EFAULT; 1529 1530 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 1531 in_len - sizeof cmd, cmd.wr_count, 1532 cmd.sge_count, cmd.wqe_size); 1533 if (IS_ERR(wr)) 1534 return PTR_ERR(wr); 1535 1536 mutex_lock(&ib_uverbs_idr_mutex); 1537 1538 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1539 if (!srq || srq->uobject->context != file->ucontext) 1540 goto out; 1541 1542 resp.bad_wr = 0; 1543 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 1544 if (ret) 1545 for (next = wr; next; next = next->next) { 1546 ++resp.bad_wr; 1547 if (next == bad_wr) 1548 break; 1549 } 1550 1551 1552 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1553 &resp, sizeof resp)) 1554 ret = -EFAULT; 1555 1556 out: 1557 mutex_unlock(&ib_uverbs_idr_mutex); 1558 1559 while (wr) { 1560 next = wr->next; 1561 kfree(wr); 1562 wr = next; 1563 } 1564 1565 return ret ? ret : in_len; 1566 } 1567 1568 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 1569 const char __user *buf, int in_len, 1570 int out_len) 1571 { 1572 struct ib_uverbs_create_ah cmd; 1573 struct ib_uverbs_create_ah_resp resp; 1574 struct ib_uobject *uobj; 1575 struct ib_pd *pd; 1576 struct ib_ah *ah; 1577 struct ib_ah_attr attr; 1578 int ret; 1579 1580 if (out_len < sizeof resp) 1581 return -ENOSPC; 1582 1583 if (copy_from_user(&cmd, buf, sizeof cmd)) 1584 return -EFAULT; 1585 1586 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 1587 if (!uobj) 1588 return -ENOMEM; 1589 1590 mutex_lock(&ib_uverbs_idr_mutex); 1591 1592 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 1593 if (!pd || pd->uobject->context != file->ucontext) { 1594 ret = -EINVAL; 1595 goto err_up; 1596 } 1597 1598 uobj->user_handle = cmd.user_handle; 1599 uobj->context = file->ucontext; 1600 1601 attr.dlid = cmd.attr.dlid; 1602 attr.sl = cmd.attr.sl; 1603 attr.src_path_bits = cmd.attr.src_path_bits; 1604 attr.static_rate = cmd.attr.static_rate; 1605 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 1606 attr.port_num = cmd.attr.port_num; 1607 attr.grh.flow_label = cmd.attr.grh.flow_label; 1608 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 1609 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 1610 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 1611 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 1612 1613 ah = ib_create_ah(pd, &attr); 1614 if (IS_ERR(ah)) { 1615 ret = PTR_ERR(ah); 1616 goto err_up; 1617 } 1618 1619 ah->uobject = uobj; 1620 1621 retry: 1622 if (!idr_pre_get(&ib_uverbs_ah_idr, GFP_KERNEL)) { 1623 ret = -ENOMEM; 1624 goto err_destroy; 1625 } 1626 1627 ret = idr_get_new(&ib_uverbs_ah_idr, ah, &uobj->id); 1628 1629 if (ret == -EAGAIN) 1630 goto retry; 1631 if (ret) 1632 goto err_destroy; 1633 1634 resp.ah_handle = uobj->id; 1635 1636 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1637 &resp, sizeof resp)) { 1638 ret = -EFAULT; 1639 goto err_idr; 1640 } 1641 1642 mutex_lock(&file->mutex); 1643 list_add_tail(&uobj->list, &file->ucontext->ah_list); 1644 mutex_unlock(&file->mutex); 1645 1646 mutex_unlock(&ib_uverbs_idr_mutex); 1647 1648 return in_len; 1649 1650 err_idr: 1651 idr_remove(&ib_uverbs_ah_idr, uobj->id); 1652 1653 err_destroy: 1654 ib_destroy_ah(ah); 1655 1656 err_up: 1657 mutex_unlock(&ib_uverbs_idr_mutex); 1658 1659 kfree(uobj); 1660 return ret; 1661 } 1662 1663 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 1664 const char __user *buf, int in_len, int out_len) 1665 { 1666 struct ib_uverbs_destroy_ah cmd; 1667 struct ib_ah *ah; 1668 struct ib_uobject *uobj; 1669 int ret = -EINVAL; 1670 1671 if (copy_from_user(&cmd, buf, sizeof cmd)) 1672 return -EFAULT; 1673 1674 mutex_lock(&ib_uverbs_idr_mutex); 1675 1676 ah = idr_find(&ib_uverbs_ah_idr, cmd.ah_handle); 1677 if (!ah || ah->uobject->context != file->ucontext) 1678 goto out; 1679 1680 uobj = ah->uobject; 1681 1682 ret = ib_destroy_ah(ah); 1683 if (ret) 1684 goto out; 1685 1686 idr_remove(&ib_uverbs_ah_idr, cmd.ah_handle); 1687 1688 mutex_lock(&file->mutex); 1689 list_del(&uobj->list); 1690 mutex_unlock(&file->mutex); 1691 1692 kfree(uobj); 1693 1694 out: 1695 mutex_unlock(&ib_uverbs_idr_mutex); 1696 1697 return ret ? ret : in_len; 1698 } 1699 1700 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 1701 const char __user *buf, int in_len, 1702 int out_len) 1703 { 1704 struct ib_uverbs_attach_mcast cmd; 1705 struct ib_qp *qp; 1706 struct ib_uqp_object *uobj; 1707 struct ib_uverbs_mcast_entry *mcast; 1708 int ret = -EINVAL; 1709 1710 if (copy_from_user(&cmd, buf, sizeof cmd)) 1711 return -EFAULT; 1712 1713 mutex_lock(&ib_uverbs_idr_mutex); 1714 1715 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1716 if (!qp || qp->uobject->context != file->ucontext) 1717 goto out; 1718 1719 uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1720 1721 list_for_each_entry(mcast, &uobj->mcast_list, list) 1722 if (cmd.mlid == mcast->lid && 1723 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 1724 ret = 0; 1725 goto out; 1726 } 1727 1728 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 1729 if (!mcast) { 1730 ret = -ENOMEM; 1731 goto out; 1732 } 1733 1734 mcast->lid = cmd.mlid; 1735 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 1736 1737 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 1738 if (!ret) { 1739 uobj = container_of(qp->uobject, struct ib_uqp_object, 1740 uevent.uobject); 1741 list_add_tail(&mcast->list, &uobj->mcast_list); 1742 } else 1743 kfree(mcast); 1744 1745 out: 1746 mutex_unlock(&ib_uverbs_idr_mutex); 1747 1748 return ret ? ret : in_len; 1749 } 1750 1751 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 1752 const char __user *buf, int in_len, 1753 int out_len) 1754 { 1755 struct ib_uverbs_detach_mcast cmd; 1756 struct ib_uqp_object *uobj; 1757 struct ib_qp *qp; 1758 struct ib_uverbs_mcast_entry *mcast; 1759 int ret = -EINVAL; 1760 1761 if (copy_from_user(&cmd, buf, sizeof cmd)) 1762 return -EFAULT; 1763 1764 mutex_lock(&ib_uverbs_idr_mutex); 1765 1766 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1767 if (!qp || qp->uobject->context != file->ucontext) 1768 goto out; 1769 1770 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1771 if (ret) 1772 goto out; 1773 1774 uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1775 1776 list_for_each_entry(mcast, &uobj->mcast_list, list) 1777 if (cmd.mlid == mcast->lid && 1778 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 1779 list_del(&mcast->list); 1780 kfree(mcast); 1781 break; 1782 } 1783 1784 out: 1785 mutex_unlock(&ib_uverbs_idr_mutex); 1786 1787 return ret ? ret : in_len; 1788 } 1789 1790 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 1791 const char __user *buf, int in_len, 1792 int out_len) 1793 { 1794 struct ib_uverbs_create_srq cmd; 1795 struct ib_uverbs_create_srq_resp resp; 1796 struct ib_udata udata; 1797 struct ib_uevent_object *uobj; 1798 struct ib_pd *pd; 1799 struct ib_srq *srq; 1800 struct ib_srq_init_attr attr; 1801 int ret; 1802 1803 if (out_len < sizeof resp) 1804 return -ENOSPC; 1805 1806 if (copy_from_user(&cmd, buf, sizeof cmd)) 1807 return -EFAULT; 1808 1809 INIT_UDATA(&udata, buf + sizeof cmd, 1810 (unsigned long) cmd.response + sizeof resp, 1811 in_len - sizeof cmd, out_len - sizeof resp); 1812 1813 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 1814 if (!uobj) 1815 return -ENOMEM; 1816 1817 mutex_lock(&ib_uverbs_idr_mutex); 1818 1819 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 1820 1821 if (!pd || pd->uobject->context != file->ucontext) { 1822 ret = -EINVAL; 1823 goto err_up; 1824 } 1825 1826 attr.event_handler = ib_uverbs_srq_event_handler; 1827 attr.srq_context = file; 1828 attr.attr.max_wr = cmd.max_wr; 1829 attr.attr.max_sge = cmd.max_sge; 1830 attr.attr.srq_limit = cmd.srq_limit; 1831 1832 uobj->uobject.user_handle = cmd.user_handle; 1833 uobj->uobject.context = file->ucontext; 1834 uobj->events_reported = 0; 1835 INIT_LIST_HEAD(&uobj->event_list); 1836 1837 srq = pd->device->create_srq(pd, &attr, &udata); 1838 if (IS_ERR(srq)) { 1839 ret = PTR_ERR(srq); 1840 goto err_up; 1841 } 1842 1843 srq->device = pd->device; 1844 srq->pd = pd; 1845 srq->uobject = &uobj->uobject; 1846 srq->event_handler = attr.event_handler; 1847 srq->srq_context = attr.srq_context; 1848 atomic_inc(&pd->usecnt); 1849 atomic_set(&srq->usecnt, 0); 1850 1851 memset(&resp, 0, sizeof resp); 1852 1853 retry: 1854 if (!idr_pre_get(&ib_uverbs_srq_idr, GFP_KERNEL)) { 1855 ret = -ENOMEM; 1856 goto err_destroy; 1857 } 1858 1859 ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->uobject.id); 1860 1861 if (ret == -EAGAIN) 1862 goto retry; 1863 if (ret) 1864 goto err_destroy; 1865 1866 resp.srq_handle = uobj->uobject.id; 1867 resp.max_wr = attr.attr.max_wr; 1868 resp.max_sge = attr.attr.max_sge; 1869 1870 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1871 &resp, sizeof resp)) { 1872 ret = -EFAULT; 1873 goto err_idr; 1874 } 1875 1876 mutex_lock(&file->mutex); 1877 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list); 1878 mutex_unlock(&file->mutex); 1879 1880 mutex_unlock(&ib_uverbs_idr_mutex); 1881 1882 return in_len; 1883 1884 err_idr: 1885 idr_remove(&ib_uverbs_srq_idr, uobj->uobject.id); 1886 1887 err_destroy: 1888 ib_destroy_srq(srq); 1889 atomic_dec(&pd->usecnt); 1890 1891 err_up: 1892 mutex_unlock(&ib_uverbs_idr_mutex); 1893 1894 kfree(uobj); 1895 return ret; 1896 } 1897 1898 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 1899 const char __user *buf, int in_len, 1900 int out_len) 1901 { 1902 struct ib_uverbs_modify_srq cmd; 1903 struct ib_srq *srq; 1904 struct ib_srq_attr attr; 1905 int ret; 1906 1907 if (copy_from_user(&cmd, buf, sizeof cmd)) 1908 return -EFAULT; 1909 1910 mutex_lock(&ib_uverbs_idr_mutex); 1911 1912 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1913 if (!srq || srq->uobject->context != file->ucontext) { 1914 ret = -EINVAL; 1915 goto out; 1916 } 1917 1918 attr.max_wr = cmd.max_wr; 1919 attr.srq_limit = cmd.srq_limit; 1920 1921 ret = ib_modify_srq(srq, &attr, cmd.attr_mask); 1922 1923 out: 1924 mutex_unlock(&ib_uverbs_idr_mutex); 1925 1926 return ret ? ret : in_len; 1927 } 1928 1929 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 1930 const char __user *buf, 1931 int in_len, int out_len) 1932 { 1933 struct ib_uverbs_query_srq cmd; 1934 struct ib_uverbs_query_srq_resp resp; 1935 struct ib_srq_attr attr; 1936 struct ib_srq *srq; 1937 int ret; 1938 1939 if (out_len < sizeof resp) 1940 return -ENOSPC; 1941 1942 if (copy_from_user(&cmd, buf, sizeof cmd)) 1943 return -EFAULT; 1944 1945 mutex_lock(&ib_uverbs_idr_mutex); 1946 1947 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1948 if (srq && srq->uobject->context == file->ucontext) 1949 ret = ib_query_srq(srq, &attr); 1950 else 1951 ret = -EINVAL; 1952 1953 mutex_unlock(&ib_uverbs_idr_mutex); 1954 1955 if (ret) 1956 goto out; 1957 1958 memset(&resp, 0, sizeof resp); 1959 1960 resp.max_wr = attr.max_wr; 1961 resp.max_sge = attr.max_sge; 1962 resp.srq_limit = attr.srq_limit; 1963 1964 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1965 &resp, sizeof resp)) 1966 ret = -EFAULT; 1967 1968 out: 1969 return ret ? ret : in_len; 1970 } 1971 1972 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 1973 const char __user *buf, int in_len, 1974 int out_len) 1975 { 1976 struct ib_uverbs_destroy_srq cmd; 1977 struct ib_uverbs_destroy_srq_resp resp; 1978 struct ib_srq *srq; 1979 struct ib_uevent_object *uobj; 1980 int ret = -EINVAL; 1981 1982 if (copy_from_user(&cmd, buf, sizeof cmd)) 1983 return -EFAULT; 1984 1985 mutex_lock(&ib_uverbs_idr_mutex); 1986 1987 memset(&resp, 0, sizeof resp); 1988 1989 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1990 if (!srq || srq->uobject->context != file->ucontext) 1991 goto out; 1992 1993 uobj = container_of(srq->uobject, struct ib_uevent_object, uobject); 1994 1995 ret = ib_destroy_srq(srq); 1996 if (ret) 1997 goto out; 1998 1999 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); 2000 2001 mutex_lock(&file->mutex); 2002 list_del(&uobj->uobject.list); 2003 mutex_unlock(&file->mutex); 2004 2005 ib_uverbs_release_uevent(file, uobj); 2006 2007 resp.events_reported = uobj->events_reported; 2008 2009 kfree(uobj); 2010 2011 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2012 &resp, sizeof resp)) 2013 ret = -EFAULT; 2014 2015 out: 2016 mutex_unlock(&ib_uverbs_idr_mutex); 2017 2018 return ret ? ret : in_len; 2019 } 2020