1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * $Id: uverbs_cmd.c 2708 2005-06-24 17:27:21Z roland $ 35 */ 36 37 #include <linux/file.h> 38 #include <linux/fs.h> 39 40 #include <asm/uaccess.h> 41 42 #include "uverbs.h" 43 44 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 45 do { \ 46 (udata)->inbuf = (void __user *) (ibuf); \ 47 (udata)->outbuf = (void __user *) (obuf); \ 48 (udata)->inlen = (ilen); \ 49 (udata)->outlen = (olen); \ 50 } while (0) 51 52 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 53 const char __user *buf, 54 int in_len, int out_len) 55 { 56 struct ib_uverbs_get_context cmd; 57 struct ib_uverbs_get_context_resp resp; 58 struct ib_udata udata; 59 struct ib_device *ibdev = file->device->ib_dev; 60 struct ib_ucontext *ucontext; 61 struct file *filp; 62 int ret; 63 64 if (out_len < sizeof resp) 65 return -ENOSPC; 66 67 if (copy_from_user(&cmd, buf, sizeof cmd)) 68 return -EFAULT; 69 70 down(&file->mutex); 71 72 if (file->ucontext) { 73 ret = -EINVAL; 74 goto err; 75 } 76 77 INIT_UDATA(&udata, buf + sizeof cmd, 78 (unsigned long) cmd.response + sizeof resp, 79 in_len - sizeof cmd, out_len - sizeof resp); 80 81 ucontext = ibdev->alloc_ucontext(ibdev, &udata); 82 if (IS_ERR(ucontext)) 83 return PTR_ERR(file->ucontext); 84 85 ucontext->device = ibdev; 86 INIT_LIST_HEAD(&ucontext->pd_list); 87 INIT_LIST_HEAD(&ucontext->mr_list); 88 INIT_LIST_HEAD(&ucontext->mw_list); 89 INIT_LIST_HEAD(&ucontext->cq_list); 90 INIT_LIST_HEAD(&ucontext->qp_list); 91 INIT_LIST_HEAD(&ucontext->srq_list); 92 INIT_LIST_HEAD(&ucontext->ah_list); 93 94 resp.num_comp_vectors = file->device->num_comp_vectors; 95 96 filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd); 97 if (IS_ERR(filp)) { 98 ret = PTR_ERR(filp); 99 goto err_free; 100 } 101 102 if (copy_to_user((void __user *) (unsigned long) cmd.response, 103 &resp, sizeof resp)) { 104 ret = -EFAULT; 105 goto err_file; 106 } 107 108 file->async_file = filp->private_data; 109 110 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev, 111 ib_uverbs_event_handler); 112 ret = ib_register_event_handler(&file->event_handler); 113 if (ret) 114 goto err_file; 115 116 kref_get(&file->async_file->ref); 117 kref_get(&file->ref); 118 file->ucontext = ucontext; 119 120 fd_install(resp.async_fd, filp); 121 122 up(&file->mutex); 123 124 return in_len; 125 126 err_file: 127 put_unused_fd(resp.async_fd); 128 fput(filp); 129 130 err_free: 131 ibdev->dealloc_ucontext(ucontext); 132 133 err: 134 up(&file->mutex); 135 return ret; 136 } 137 138 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 139 const char __user *buf, 140 int in_len, int out_len) 141 { 142 struct ib_uverbs_query_device cmd; 143 struct ib_uverbs_query_device_resp resp; 144 struct ib_device_attr attr; 145 int ret; 146 147 if (out_len < sizeof resp) 148 return -ENOSPC; 149 150 if (copy_from_user(&cmd, buf, sizeof cmd)) 151 return -EFAULT; 152 153 ret = ib_query_device(file->device->ib_dev, &attr); 154 if (ret) 155 return ret; 156 157 memset(&resp, 0, sizeof resp); 158 159 resp.fw_ver = attr.fw_ver; 160 resp.node_guid = attr.node_guid; 161 resp.sys_image_guid = attr.sys_image_guid; 162 resp.max_mr_size = attr.max_mr_size; 163 resp.page_size_cap = attr.page_size_cap; 164 resp.vendor_id = attr.vendor_id; 165 resp.vendor_part_id = attr.vendor_part_id; 166 resp.hw_ver = attr.hw_ver; 167 resp.max_qp = attr.max_qp; 168 resp.max_qp_wr = attr.max_qp_wr; 169 resp.device_cap_flags = attr.device_cap_flags; 170 resp.max_sge = attr.max_sge; 171 resp.max_sge_rd = attr.max_sge_rd; 172 resp.max_cq = attr.max_cq; 173 resp.max_cqe = attr.max_cqe; 174 resp.max_mr = attr.max_mr; 175 resp.max_pd = attr.max_pd; 176 resp.max_qp_rd_atom = attr.max_qp_rd_atom; 177 resp.max_ee_rd_atom = attr.max_ee_rd_atom; 178 resp.max_res_rd_atom = attr.max_res_rd_atom; 179 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; 180 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; 181 resp.atomic_cap = attr.atomic_cap; 182 resp.max_ee = attr.max_ee; 183 resp.max_rdd = attr.max_rdd; 184 resp.max_mw = attr.max_mw; 185 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; 186 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; 187 resp.max_mcast_grp = attr.max_mcast_grp; 188 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; 189 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; 190 resp.max_ah = attr.max_ah; 191 resp.max_fmr = attr.max_fmr; 192 resp.max_map_per_fmr = attr.max_map_per_fmr; 193 resp.max_srq = attr.max_srq; 194 resp.max_srq_wr = attr.max_srq_wr; 195 resp.max_srq_sge = attr.max_srq_sge; 196 resp.max_pkeys = attr.max_pkeys; 197 resp.local_ca_ack_delay = attr.local_ca_ack_delay; 198 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; 199 200 if (copy_to_user((void __user *) (unsigned long) cmd.response, 201 &resp, sizeof resp)) 202 return -EFAULT; 203 204 return in_len; 205 } 206 207 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 208 const char __user *buf, 209 int in_len, int out_len) 210 { 211 struct ib_uverbs_query_port cmd; 212 struct ib_uverbs_query_port_resp resp; 213 struct ib_port_attr attr; 214 int ret; 215 216 if (out_len < sizeof resp) 217 return -ENOSPC; 218 219 if (copy_from_user(&cmd, buf, sizeof cmd)) 220 return -EFAULT; 221 222 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr); 223 if (ret) 224 return ret; 225 226 memset(&resp, 0, sizeof resp); 227 228 resp.state = attr.state; 229 resp.max_mtu = attr.max_mtu; 230 resp.active_mtu = attr.active_mtu; 231 resp.gid_tbl_len = attr.gid_tbl_len; 232 resp.port_cap_flags = attr.port_cap_flags; 233 resp.max_msg_sz = attr.max_msg_sz; 234 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 235 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 236 resp.pkey_tbl_len = attr.pkey_tbl_len; 237 resp.lid = attr.lid; 238 resp.sm_lid = attr.sm_lid; 239 resp.lmc = attr.lmc; 240 resp.max_vl_num = attr.max_vl_num; 241 resp.sm_sl = attr.sm_sl; 242 resp.subnet_timeout = attr.subnet_timeout; 243 resp.init_type_reply = attr.init_type_reply; 244 resp.active_width = attr.active_width; 245 resp.active_speed = attr.active_speed; 246 resp.phys_state = attr.phys_state; 247 248 if (copy_to_user((void __user *) (unsigned long) cmd.response, 249 &resp, sizeof resp)) 250 return -EFAULT; 251 252 return in_len; 253 } 254 255 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 256 const char __user *buf, 257 int in_len, int out_len) 258 { 259 struct ib_uverbs_alloc_pd cmd; 260 struct ib_uverbs_alloc_pd_resp resp; 261 struct ib_udata udata; 262 struct ib_uobject *uobj; 263 struct ib_pd *pd; 264 int ret; 265 266 if (out_len < sizeof resp) 267 return -ENOSPC; 268 269 if (copy_from_user(&cmd, buf, sizeof cmd)) 270 return -EFAULT; 271 272 INIT_UDATA(&udata, buf + sizeof cmd, 273 (unsigned long) cmd.response + sizeof resp, 274 in_len - sizeof cmd, out_len - sizeof resp); 275 276 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 277 if (!uobj) 278 return -ENOMEM; 279 280 uobj->context = file->ucontext; 281 282 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 283 file->ucontext, &udata); 284 if (IS_ERR(pd)) { 285 ret = PTR_ERR(pd); 286 goto err; 287 } 288 289 pd->device = file->device->ib_dev; 290 pd->uobject = uobj; 291 atomic_set(&pd->usecnt, 0); 292 293 down(&ib_uverbs_idr_mutex); 294 295 retry: 296 if (!idr_pre_get(&ib_uverbs_pd_idr, GFP_KERNEL)) { 297 ret = -ENOMEM; 298 goto err_up; 299 } 300 301 ret = idr_get_new(&ib_uverbs_pd_idr, pd, &uobj->id); 302 303 if (ret == -EAGAIN) 304 goto retry; 305 if (ret) 306 goto err_up; 307 308 memset(&resp, 0, sizeof resp); 309 resp.pd_handle = uobj->id; 310 311 if (copy_to_user((void __user *) (unsigned long) cmd.response, 312 &resp, sizeof resp)) { 313 ret = -EFAULT; 314 goto err_idr; 315 } 316 317 down(&file->mutex); 318 list_add_tail(&uobj->list, &file->ucontext->pd_list); 319 up(&file->mutex); 320 321 up(&ib_uverbs_idr_mutex); 322 323 return in_len; 324 325 err_idr: 326 idr_remove(&ib_uverbs_pd_idr, uobj->id); 327 328 err_up: 329 up(&ib_uverbs_idr_mutex); 330 ib_dealloc_pd(pd); 331 332 err: 333 kfree(uobj); 334 return ret; 335 } 336 337 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 338 const char __user *buf, 339 int in_len, int out_len) 340 { 341 struct ib_uverbs_dealloc_pd cmd; 342 struct ib_pd *pd; 343 struct ib_uobject *uobj; 344 int ret = -EINVAL; 345 346 if (copy_from_user(&cmd, buf, sizeof cmd)) 347 return -EFAULT; 348 349 down(&ib_uverbs_idr_mutex); 350 351 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 352 if (!pd || pd->uobject->context != file->ucontext) 353 goto out; 354 355 uobj = pd->uobject; 356 357 ret = ib_dealloc_pd(pd); 358 if (ret) 359 goto out; 360 361 idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle); 362 363 down(&file->mutex); 364 list_del(&uobj->list); 365 up(&file->mutex); 366 367 kfree(uobj); 368 369 out: 370 up(&ib_uverbs_idr_mutex); 371 372 return ret ? ret : in_len; 373 } 374 375 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 376 const char __user *buf, int in_len, 377 int out_len) 378 { 379 struct ib_uverbs_reg_mr cmd; 380 struct ib_uverbs_reg_mr_resp resp; 381 struct ib_udata udata; 382 struct ib_umem_object *obj; 383 struct ib_pd *pd; 384 struct ib_mr *mr; 385 int ret; 386 387 if (out_len < sizeof resp) 388 return -ENOSPC; 389 390 if (copy_from_user(&cmd, buf, sizeof cmd)) 391 return -EFAULT; 392 393 INIT_UDATA(&udata, buf + sizeof cmd, 394 (unsigned long) cmd.response + sizeof resp, 395 in_len - sizeof cmd, out_len - sizeof resp); 396 397 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 398 return -EINVAL; 399 400 /* 401 * Local write permission is required if remote write or 402 * remote atomic permission is also requested. 403 */ 404 if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 405 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE)) 406 return -EINVAL; 407 408 obj = kmalloc(sizeof *obj, GFP_KERNEL); 409 if (!obj) 410 return -ENOMEM; 411 412 obj->uobject.context = file->ucontext; 413 414 /* 415 * We ask for writable memory if any access flags other than 416 * "remote read" are set. "Local write" and "remote write" 417 * obviously require write access. "Remote atomic" can do 418 * things like fetch and add, which will modify memory, and 419 * "MW bind" can change permissions by binding a window. 420 */ 421 ret = ib_umem_get(file->device->ib_dev, &obj->umem, 422 (void *) (unsigned long) cmd.start, cmd.length, 423 !!(cmd.access_flags & ~IB_ACCESS_REMOTE_READ)); 424 if (ret) 425 goto err_free; 426 427 obj->umem.virt_base = cmd.hca_va; 428 429 down(&ib_uverbs_idr_mutex); 430 431 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 432 if (!pd || pd->uobject->context != file->ucontext) { 433 ret = -EINVAL; 434 goto err_up; 435 } 436 437 if (!pd->device->reg_user_mr) { 438 ret = -ENOSYS; 439 goto err_up; 440 } 441 442 mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata); 443 if (IS_ERR(mr)) { 444 ret = PTR_ERR(mr); 445 goto err_up; 446 } 447 448 mr->device = pd->device; 449 mr->pd = pd; 450 mr->uobject = &obj->uobject; 451 atomic_inc(&pd->usecnt); 452 atomic_set(&mr->usecnt, 0); 453 454 memset(&resp, 0, sizeof resp); 455 resp.lkey = mr->lkey; 456 resp.rkey = mr->rkey; 457 458 retry: 459 if (!idr_pre_get(&ib_uverbs_mr_idr, GFP_KERNEL)) { 460 ret = -ENOMEM; 461 goto err_unreg; 462 } 463 464 ret = idr_get_new(&ib_uverbs_mr_idr, mr, &obj->uobject.id); 465 466 if (ret == -EAGAIN) 467 goto retry; 468 if (ret) 469 goto err_unreg; 470 471 resp.mr_handle = obj->uobject.id; 472 473 if (copy_to_user((void __user *) (unsigned long) cmd.response, 474 &resp, sizeof resp)) { 475 ret = -EFAULT; 476 goto err_idr; 477 } 478 479 down(&file->mutex); 480 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list); 481 up(&file->mutex); 482 483 up(&ib_uverbs_idr_mutex); 484 485 return in_len; 486 487 err_idr: 488 idr_remove(&ib_uverbs_mr_idr, obj->uobject.id); 489 490 err_unreg: 491 ib_dereg_mr(mr); 492 atomic_dec(&pd->usecnt); 493 494 err_up: 495 up(&ib_uverbs_idr_mutex); 496 497 ib_umem_release(file->device->ib_dev, &obj->umem); 498 499 err_free: 500 kfree(obj); 501 return ret; 502 } 503 504 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 505 const char __user *buf, int in_len, 506 int out_len) 507 { 508 struct ib_uverbs_dereg_mr cmd; 509 struct ib_mr *mr; 510 struct ib_umem_object *memobj; 511 int ret = -EINVAL; 512 513 if (copy_from_user(&cmd, buf, sizeof cmd)) 514 return -EFAULT; 515 516 down(&ib_uverbs_idr_mutex); 517 518 mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle); 519 if (!mr || mr->uobject->context != file->ucontext) 520 goto out; 521 522 memobj = container_of(mr->uobject, struct ib_umem_object, uobject); 523 524 ret = ib_dereg_mr(mr); 525 if (ret) 526 goto out; 527 528 idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle); 529 530 down(&file->mutex); 531 list_del(&memobj->uobject.list); 532 up(&file->mutex); 533 534 ib_umem_release(file->device->ib_dev, &memobj->umem); 535 kfree(memobj); 536 537 out: 538 up(&ib_uverbs_idr_mutex); 539 540 return ret ? ret : in_len; 541 } 542 543 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 544 const char __user *buf, int in_len, 545 int out_len) 546 { 547 struct ib_uverbs_create_comp_channel cmd; 548 struct ib_uverbs_create_comp_channel_resp resp; 549 struct file *filp; 550 551 if (out_len < sizeof resp) 552 return -ENOSPC; 553 554 if (copy_from_user(&cmd, buf, sizeof cmd)) 555 return -EFAULT; 556 557 filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd); 558 if (IS_ERR(filp)) 559 return PTR_ERR(filp); 560 561 if (copy_to_user((void __user *) (unsigned long) cmd.response, 562 &resp, sizeof resp)) { 563 put_unused_fd(resp.fd); 564 fput(filp); 565 return -EFAULT; 566 } 567 568 fd_install(resp.fd, filp); 569 return in_len; 570 } 571 572 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 573 const char __user *buf, int in_len, 574 int out_len) 575 { 576 struct ib_uverbs_create_cq cmd; 577 struct ib_uverbs_create_cq_resp resp; 578 struct ib_udata udata; 579 struct ib_ucq_object *uobj; 580 struct ib_uverbs_event_file *ev_file = NULL; 581 struct ib_cq *cq; 582 int ret; 583 584 if (out_len < sizeof resp) 585 return -ENOSPC; 586 587 if (copy_from_user(&cmd, buf, sizeof cmd)) 588 return -EFAULT; 589 590 INIT_UDATA(&udata, buf + sizeof cmd, 591 (unsigned long) cmd.response + sizeof resp, 592 in_len - sizeof cmd, out_len - sizeof resp); 593 594 if (cmd.comp_vector >= file->device->num_comp_vectors) 595 return -EINVAL; 596 597 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 598 if (!uobj) 599 return -ENOMEM; 600 601 if (cmd.comp_channel >= 0) { 602 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel); 603 if (!ev_file) { 604 ret = -EINVAL; 605 goto err; 606 } 607 } 608 609 uobj->uobject.user_handle = cmd.user_handle; 610 uobj->uobject.context = file->ucontext; 611 uobj->uverbs_file = file; 612 uobj->comp_events_reported = 0; 613 uobj->async_events_reported = 0; 614 INIT_LIST_HEAD(&uobj->comp_list); 615 INIT_LIST_HEAD(&uobj->async_list); 616 617 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 618 file->ucontext, &udata); 619 if (IS_ERR(cq)) { 620 ret = PTR_ERR(cq); 621 goto err; 622 } 623 624 cq->device = file->device->ib_dev; 625 cq->uobject = &uobj->uobject; 626 cq->comp_handler = ib_uverbs_comp_handler; 627 cq->event_handler = ib_uverbs_cq_event_handler; 628 cq->cq_context = ev_file; 629 atomic_set(&cq->usecnt, 0); 630 631 down(&ib_uverbs_idr_mutex); 632 633 retry: 634 if (!idr_pre_get(&ib_uverbs_cq_idr, GFP_KERNEL)) { 635 ret = -ENOMEM; 636 goto err_up; 637 } 638 639 ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->uobject.id); 640 641 if (ret == -EAGAIN) 642 goto retry; 643 if (ret) 644 goto err_up; 645 646 memset(&resp, 0, sizeof resp); 647 resp.cq_handle = uobj->uobject.id; 648 resp.cqe = cq->cqe; 649 650 if (copy_to_user((void __user *) (unsigned long) cmd.response, 651 &resp, sizeof resp)) { 652 ret = -EFAULT; 653 goto err_idr; 654 } 655 656 down(&file->mutex); 657 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list); 658 up(&file->mutex); 659 660 up(&ib_uverbs_idr_mutex); 661 662 return in_len; 663 664 err_idr: 665 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id); 666 667 err_up: 668 up(&ib_uverbs_idr_mutex); 669 ib_destroy_cq(cq); 670 671 err: 672 if (ev_file) 673 ib_uverbs_release_ucq(file, ev_file, uobj); 674 kfree(uobj); 675 return ret; 676 } 677 678 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 679 const char __user *buf, int in_len, 680 int out_len) 681 { 682 struct ib_uverbs_poll_cq cmd; 683 struct ib_uverbs_poll_cq_resp *resp; 684 struct ib_cq *cq; 685 struct ib_wc *wc; 686 int ret = 0; 687 int i; 688 int rsize; 689 690 if (copy_from_user(&cmd, buf, sizeof cmd)) 691 return -EFAULT; 692 693 wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL); 694 if (!wc) 695 return -ENOMEM; 696 697 rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc); 698 resp = kmalloc(rsize, GFP_KERNEL); 699 if (!resp) { 700 ret = -ENOMEM; 701 goto out_wc; 702 } 703 704 down(&ib_uverbs_idr_mutex); 705 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 706 if (!cq || cq->uobject->context != file->ucontext) { 707 ret = -EINVAL; 708 goto out; 709 } 710 711 resp->count = ib_poll_cq(cq, cmd.ne, wc); 712 713 for (i = 0; i < resp->count; i++) { 714 resp->wc[i].wr_id = wc[i].wr_id; 715 resp->wc[i].status = wc[i].status; 716 resp->wc[i].opcode = wc[i].opcode; 717 resp->wc[i].vendor_err = wc[i].vendor_err; 718 resp->wc[i].byte_len = wc[i].byte_len; 719 resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data; 720 resp->wc[i].qp_num = wc[i].qp_num; 721 resp->wc[i].src_qp = wc[i].src_qp; 722 resp->wc[i].wc_flags = wc[i].wc_flags; 723 resp->wc[i].pkey_index = wc[i].pkey_index; 724 resp->wc[i].slid = wc[i].slid; 725 resp->wc[i].sl = wc[i].sl; 726 resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits; 727 resp->wc[i].port_num = wc[i].port_num; 728 } 729 730 if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize)) 731 ret = -EFAULT; 732 733 out: 734 up(&ib_uverbs_idr_mutex); 735 kfree(resp); 736 737 out_wc: 738 kfree(wc); 739 return ret ? ret : in_len; 740 } 741 742 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 743 const char __user *buf, int in_len, 744 int out_len) 745 { 746 struct ib_uverbs_req_notify_cq cmd; 747 struct ib_cq *cq; 748 int ret = -EINVAL; 749 750 if (copy_from_user(&cmd, buf, sizeof cmd)) 751 return -EFAULT; 752 753 down(&ib_uverbs_idr_mutex); 754 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 755 if (cq && cq->uobject->context == file->ucontext) { 756 ib_req_notify_cq(cq, cmd.solicited_only ? 757 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 758 ret = in_len; 759 } 760 up(&ib_uverbs_idr_mutex); 761 762 return ret; 763 } 764 765 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 766 const char __user *buf, int in_len, 767 int out_len) 768 { 769 struct ib_uverbs_destroy_cq cmd; 770 struct ib_uverbs_destroy_cq_resp resp; 771 struct ib_cq *cq; 772 struct ib_ucq_object *uobj; 773 struct ib_uverbs_event_file *ev_file; 774 u64 user_handle; 775 int ret = -EINVAL; 776 777 if (copy_from_user(&cmd, buf, sizeof cmd)) 778 return -EFAULT; 779 780 memset(&resp, 0, sizeof resp); 781 782 down(&ib_uverbs_idr_mutex); 783 784 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 785 if (!cq || cq->uobject->context != file->ucontext) 786 goto out; 787 788 user_handle = cq->uobject->user_handle; 789 uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); 790 ev_file = cq->cq_context; 791 792 ret = ib_destroy_cq(cq); 793 if (ret) 794 goto out; 795 796 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle); 797 798 down(&file->mutex); 799 list_del(&uobj->uobject.list); 800 up(&file->mutex); 801 802 ib_uverbs_release_ucq(file, ev_file, uobj); 803 804 resp.comp_events_reported = uobj->comp_events_reported; 805 resp.async_events_reported = uobj->async_events_reported; 806 807 kfree(uobj); 808 809 if (copy_to_user((void __user *) (unsigned long) cmd.response, 810 &resp, sizeof resp)) 811 ret = -EFAULT; 812 813 out: 814 up(&ib_uverbs_idr_mutex); 815 816 return ret ? ret : in_len; 817 } 818 819 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 820 const char __user *buf, int in_len, 821 int out_len) 822 { 823 struct ib_uverbs_create_qp cmd; 824 struct ib_uverbs_create_qp_resp resp; 825 struct ib_udata udata; 826 struct ib_uqp_object *uobj; 827 struct ib_pd *pd; 828 struct ib_cq *scq, *rcq; 829 struct ib_srq *srq; 830 struct ib_qp *qp; 831 struct ib_qp_init_attr attr; 832 int ret; 833 834 if (out_len < sizeof resp) 835 return -ENOSPC; 836 837 if (copy_from_user(&cmd, buf, sizeof cmd)) 838 return -EFAULT; 839 840 INIT_UDATA(&udata, buf + sizeof cmd, 841 (unsigned long) cmd.response + sizeof resp, 842 in_len - sizeof cmd, out_len - sizeof resp); 843 844 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 845 if (!uobj) 846 return -ENOMEM; 847 848 down(&ib_uverbs_idr_mutex); 849 850 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 851 scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle); 852 rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle); 853 srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL; 854 855 if (!pd || pd->uobject->context != file->ucontext || 856 !scq || scq->uobject->context != file->ucontext || 857 !rcq || rcq->uobject->context != file->ucontext || 858 (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) { 859 ret = -EINVAL; 860 goto err_up; 861 } 862 863 attr.event_handler = ib_uverbs_qp_event_handler; 864 attr.qp_context = file; 865 attr.send_cq = scq; 866 attr.recv_cq = rcq; 867 attr.srq = srq; 868 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 869 attr.qp_type = cmd.qp_type; 870 871 attr.cap.max_send_wr = cmd.max_send_wr; 872 attr.cap.max_recv_wr = cmd.max_recv_wr; 873 attr.cap.max_send_sge = cmd.max_send_sge; 874 attr.cap.max_recv_sge = cmd.max_recv_sge; 875 attr.cap.max_inline_data = cmd.max_inline_data; 876 877 uobj->uevent.uobject.user_handle = cmd.user_handle; 878 uobj->uevent.uobject.context = file->ucontext; 879 uobj->uevent.events_reported = 0; 880 INIT_LIST_HEAD(&uobj->uevent.event_list); 881 INIT_LIST_HEAD(&uobj->mcast_list); 882 883 qp = pd->device->create_qp(pd, &attr, &udata); 884 if (IS_ERR(qp)) { 885 ret = PTR_ERR(qp); 886 goto err_up; 887 } 888 889 qp->device = pd->device; 890 qp->pd = pd; 891 qp->send_cq = attr.send_cq; 892 qp->recv_cq = attr.recv_cq; 893 qp->srq = attr.srq; 894 qp->uobject = &uobj->uevent.uobject; 895 qp->event_handler = attr.event_handler; 896 qp->qp_context = attr.qp_context; 897 qp->qp_type = attr.qp_type; 898 atomic_inc(&pd->usecnt); 899 atomic_inc(&attr.send_cq->usecnt); 900 atomic_inc(&attr.recv_cq->usecnt); 901 if (attr.srq) 902 atomic_inc(&attr.srq->usecnt); 903 904 memset(&resp, 0, sizeof resp); 905 resp.qpn = qp->qp_num; 906 907 retry: 908 if (!idr_pre_get(&ib_uverbs_qp_idr, GFP_KERNEL)) { 909 ret = -ENOMEM; 910 goto err_destroy; 911 } 912 913 ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uevent.uobject.id); 914 915 if (ret == -EAGAIN) 916 goto retry; 917 if (ret) 918 goto err_destroy; 919 920 resp.qp_handle = uobj->uevent.uobject.id; 921 resp.max_recv_sge = attr.cap.max_recv_sge; 922 resp.max_send_sge = attr.cap.max_send_sge; 923 resp.max_recv_wr = attr.cap.max_recv_wr; 924 resp.max_send_wr = attr.cap.max_send_wr; 925 resp.max_inline_data = attr.cap.max_inline_data; 926 927 if (copy_to_user((void __user *) (unsigned long) cmd.response, 928 &resp, sizeof resp)) { 929 ret = -EFAULT; 930 goto err_idr; 931 } 932 933 down(&file->mutex); 934 list_add_tail(&uobj->uevent.uobject.list, &file->ucontext->qp_list); 935 up(&file->mutex); 936 937 up(&ib_uverbs_idr_mutex); 938 939 return in_len; 940 941 err_idr: 942 idr_remove(&ib_uverbs_qp_idr, uobj->uevent.uobject.id); 943 944 err_destroy: 945 ib_destroy_qp(qp); 946 atomic_dec(&pd->usecnt); 947 atomic_dec(&attr.send_cq->usecnt); 948 atomic_dec(&attr.recv_cq->usecnt); 949 if (attr.srq) 950 atomic_dec(&attr.srq->usecnt); 951 952 err_up: 953 up(&ib_uverbs_idr_mutex); 954 955 kfree(uobj); 956 return ret; 957 } 958 959 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 960 const char __user *buf, int in_len, 961 int out_len) 962 { 963 struct ib_uverbs_modify_qp cmd; 964 struct ib_qp *qp; 965 struct ib_qp_attr *attr; 966 int ret; 967 968 if (copy_from_user(&cmd, buf, sizeof cmd)) 969 return -EFAULT; 970 971 attr = kmalloc(sizeof *attr, GFP_KERNEL); 972 if (!attr) 973 return -ENOMEM; 974 975 down(&ib_uverbs_idr_mutex); 976 977 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 978 if (!qp || qp->uobject->context != file->ucontext) { 979 ret = -EINVAL; 980 goto out; 981 } 982 983 attr->qp_state = cmd.qp_state; 984 attr->cur_qp_state = cmd.cur_qp_state; 985 attr->path_mtu = cmd.path_mtu; 986 attr->path_mig_state = cmd.path_mig_state; 987 attr->qkey = cmd.qkey; 988 attr->rq_psn = cmd.rq_psn; 989 attr->sq_psn = cmd.sq_psn; 990 attr->dest_qp_num = cmd.dest_qp_num; 991 attr->qp_access_flags = cmd.qp_access_flags; 992 attr->pkey_index = cmd.pkey_index; 993 attr->alt_pkey_index = cmd.pkey_index; 994 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 995 attr->max_rd_atomic = cmd.max_rd_atomic; 996 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 997 attr->min_rnr_timer = cmd.min_rnr_timer; 998 attr->port_num = cmd.port_num; 999 attr->timeout = cmd.timeout; 1000 attr->retry_cnt = cmd.retry_cnt; 1001 attr->rnr_retry = cmd.rnr_retry; 1002 attr->alt_port_num = cmd.alt_port_num; 1003 attr->alt_timeout = cmd.alt_timeout; 1004 1005 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 1006 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 1007 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 1008 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 1009 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 1010 attr->ah_attr.dlid = cmd.dest.dlid; 1011 attr->ah_attr.sl = cmd.dest.sl; 1012 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 1013 attr->ah_attr.static_rate = cmd.dest.static_rate; 1014 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 1015 attr->ah_attr.port_num = cmd.dest.port_num; 1016 1017 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 1018 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 1019 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 1020 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 1021 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 1022 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 1023 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 1024 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 1025 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 1026 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 1027 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 1028 1029 ret = ib_modify_qp(qp, attr, cmd.attr_mask); 1030 if (ret) 1031 goto out; 1032 1033 ret = in_len; 1034 1035 out: 1036 up(&ib_uverbs_idr_mutex); 1037 kfree(attr); 1038 1039 return ret; 1040 } 1041 1042 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 1043 const char __user *buf, int in_len, 1044 int out_len) 1045 { 1046 struct ib_uverbs_destroy_qp cmd; 1047 struct ib_uverbs_destroy_qp_resp resp; 1048 struct ib_qp *qp; 1049 struct ib_uqp_object *uobj; 1050 int ret = -EINVAL; 1051 1052 if (copy_from_user(&cmd, buf, sizeof cmd)) 1053 return -EFAULT; 1054 1055 memset(&resp, 0, sizeof resp); 1056 1057 down(&ib_uverbs_idr_mutex); 1058 1059 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1060 if (!qp || qp->uobject->context != file->ucontext) 1061 goto out; 1062 1063 uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1064 1065 if (!list_empty(&uobj->mcast_list)) { 1066 ret = -EBUSY; 1067 goto out; 1068 } 1069 1070 ret = ib_destroy_qp(qp); 1071 if (ret) 1072 goto out; 1073 1074 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 1075 1076 down(&file->mutex); 1077 list_del(&uobj->uevent.uobject.list); 1078 up(&file->mutex); 1079 1080 ib_uverbs_release_uevent(file, &uobj->uevent); 1081 1082 resp.events_reported = uobj->uevent.events_reported; 1083 1084 kfree(uobj); 1085 1086 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1087 &resp, sizeof resp)) 1088 ret = -EFAULT; 1089 1090 out: 1091 up(&ib_uverbs_idr_mutex); 1092 1093 return ret ? ret : in_len; 1094 } 1095 1096 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 1097 const char __user *buf, int in_len, 1098 int out_len) 1099 { 1100 struct ib_uverbs_post_send cmd; 1101 struct ib_uverbs_post_send_resp resp; 1102 struct ib_uverbs_send_wr *user_wr; 1103 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 1104 struct ib_qp *qp; 1105 int i, sg_ind; 1106 ssize_t ret = -EINVAL; 1107 1108 if (copy_from_user(&cmd, buf, sizeof cmd)) 1109 return -EFAULT; 1110 1111 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 1112 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 1113 return -EINVAL; 1114 1115 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 1116 return -EINVAL; 1117 1118 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 1119 if (!user_wr) 1120 return -ENOMEM; 1121 1122 down(&ib_uverbs_idr_mutex); 1123 1124 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1125 if (!qp || qp->uobject->context != file->ucontext) 1126 goto out; 1127 1128 sg_ind = 0; 1129 last = NULL; 1130 for (i = 0; i < cmd.wr_count; ++i) { 1131 if (copy_from_user(user_wr, 1132 buf + sizeof cmd + i * cmd.wqe_size, 1133 cmd.wqe_size)) { 1134 ret = -EFAULT; 1135 goto out; 1136 } 1137 1138 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 1139 ret = -EINVAL; 1140 goto out; 1141 } 1142 1143 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 1144 user_wr->num_sge * sizeof (struct ib_sge), 1145 GFP_KERNEL); 1146 if (!next) { 1147 ret = -ENOMEM; 1148 goto out; 1149 } 1150 1151 if (!last) 1152 wr = next; 1153 else 1154 last->next = next; 1155 last = next; 1156 1157 next->next = NULL; 1158 next->wr_id = user_wr->wr_id; 1159 next->num_sge = user_wr->num_sge; 1160 next->opcode = user_wr->opcode; 1161 next->send_flags = user_wr->send_flags; 1162 next->imm_data = (__be32 __force) user_wr->imm_data; 1163 1164 if (qp->qp_type == IB_QPT_UD) { 1165 next->wr.ud.ah = idr_find(&ib_uverbs_ah_idr, 1166 user_wr->wr.ud.ah); 1167 if (!next->wr.ud.ah) { 1168 ret = -EINVAL; 1169 goto out; 1170 } 1171 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; 1172 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 1173 } else { 1174 switch (next->opcode) { 1175 case IB_WR_RDMA_WRITE: 1176 case IB_WR_RDMA_WRITE_WITH_IMM: 1177 case IB_WR_RDMA_READ: 1178 next->wr.rdma.remote_addr = 1179 user_wr->wr.rdma.remote_addr; 1180 next->wr.rdma.rkey = 1181 user_wr->wr.rdma.rkey; 1182 break; 1183 case IB_WR_ATOMIC_CMP_AND_SWP: 1184 case IB_WR_ATOMIC_FETCH_AND_ADD: 1185 next->wr.atomic.remote_addr = 1186 user_wr->wr.atomic.remote_addr; 1187 next->wr.atomic.compare_add = 1188 user_wr->wr.atomic.compare_add; 1189 next->wr.atomic.swap = user_wr->wr.atomic.swap; 1190 next->wr.atomic.rkey = user_wr->wr.atomic.rkey; 1191 break; 1192 default: 1193 break; 1194 } 1195 } 1196 1197 if (next->num_sge) { 1198 next->sg_list = (void *) next + 1199 ALIGN(sizeof *next, sizeof (struct ib_sge)); 1200 if (copy_from_user(next->sg_list, 1201 buf + sizeof cmd + 1202 cmd.wr_count * cmd.wqe_size + 1203 sg_ind * sizeof (struct ib_sge), 1204 next->num_sge * sizeof (struct ib_sge))) { 1205 ret = -EFAULT; 1206 goto out; 1207 } 1208 sg_ind += next->num_sge; 1209 } else 1210 next->sg_list = NULL; 1211 } 1212 1213 resp.bad_wr = 0; 1214 ret = qp->device->post_send(qp, wr, &bad_wr); 1215 if (ret) 1216 for (next = wr; next; next = next->next) { 1217 ++resp.bad_wr; 1218 if (next == bad_wr) 1219 break; 1220 } 1221 1222 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1223 &resp, sizeof resp)) 1224 ret = -EFAULT; 1225 1226 out: 1227 up(&ib_uverbs_idr_mutex); 1228 1229 while (wr) { 1230 next = wr->next; 1231 kfree(wr); 1232 wr = next; 1233 } 1234 1235 kfree(user_wr); 1236 1237 return ret ? ret : in_len; 1238 } 1239 1240 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 1241 int in_len, 1242 u32 wr_count, 1243 u32 sge_count, 1244 u32 wqe_size) 1245 { 1246 struct ib_uverbs_recv_wr *user_wr; 1247 struct ib_recv_wr *wr = NULL, *last, *next; 1248 int sg_ind; 1249 int i; 1250 int ret; 1251 1252 if (in_len < wqe_size * wr_count + 1253 sge_count * sizeof (struct ib_uverbs_sge)) 1254 return ERR_PTR(-EINVAL); 1255 1256 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 1257 return ERR_PTR(-EINVAL); 1258 1259 user_wr = kmalloc(wqe_size, GFP_KERNEL); 1260 if (!user_wr) 1261 return ERR_PTR(-ENOMEM); 1262 1263 sg_ind = 0; 1264 last = NULL; 1265 for (i = 0; i < wr_count; ++i) { 1266 if (copy_from_user(user_wr, buf + i * wqe_size, 1267 wqe_size)) { 1268 ret = -EFAULT; 1269 goto err; 1270 } 1271 1272 if (user_wr->num_sge + sg_ind > sge_count) { 1273 ret = -EINVAL; 1274 goto err; 1275 } 1276 1277 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 1278 user_wr->num_sge * sizeof (struct ib_sge), 1279 GFP_KERNEL); 1280 if (!next) { 1281 ret = -ENOMEM; 1282 goto err; 1283 } 1284 1285 if (!last) 1286 wr = next; 1287 else 1288 last->next = next; 1289 last = next; 1290 1291 next->next = NULL; 1292 next->wr_id = user_wr->wr_id; 1293 next->num_sge = user_wr->num_sge; 1294 1295 if (next->num_sge) { 1296 next->sg_list = (void *) next + 1297 ALIGN(sizeof *next, sizeof (struct ib_sge)); 1298 if (copy_from_user(next->sg_list, 1299 buf + wr_count * wqe_size + 1300 sg_ind * sizeof (struct ib_sge), 1301 next->num_sge * sizeof (struct ib_sge))) { 1302 ret = -EFAULT; 1303 goto err; 1304 } 1305 sg_ind += next->num_sge; 1306 } else 1307 next->sg_list = NULL; 1308 } 1309 1310 kfree(user_wr); 1311 return wr; 1312 1313 err: 1314 kfree(user_wr); 1315 1316 while (wr) { 1317 next = wr->next; 1318 kfree(wr); 1319 wr = next; 1320 } 1321 1322 return ERR_PTR(ret); 1323 } 1324 1325 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 1326 const char __user *buf, int in_len, 1327 int out_len) 1328 { 1329 struct ib_uverbs_post_recv cmd; 1330 struct ib_uverbs_post_recv_resp resp; 1331 struct ib_recv_wr *wr, *next, *bad_wr; 1332 struct ib_qp *qp; 1333 ssize_t ret = -EINVAL; 1334 1335 if (copy_from_user(&cmd, buf, sizeof cmd)) 1336 return -EFAULT; 1337 1338 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 1339 in_len - sizeof cmd, cmd.wr_count, 1340 cmd.sge_count, cmd.wqe_size); 1341 if (IS_ERR(wr)) 1342 return PTR_ERR(wr); 1343 1344 down(&ib_uverbs_idr_mutex); 1345 1346 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1347 if (!qp || qp->uobject->context != file->ucontext) 1348 goto out; 1349 1350 resp.bad_wr = 0; 1351 ret = qp->device->post_recv(qp, wr, &bad_wr); 1352 if (ret) 1353 for (next = wr; next; next = next->next) { 1354 ++resp.bad_wr; 1355 if (next == bad_wr) 1356 break; 1357 } 1358 1359 1360 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1361 &resp, sizeof resp)) 1362 ret = -EFAULT; 1363 1364 out: 1365 up(&ib_uverbs_idr_mutex); 1366 1367 while (wr) { 1368 next = wr->next; 1369 kfree(wr); 1370 wr = next; 1371 } 1372 1373 return ret ? ret : in_len; 1374 } 1375 1376 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 1377 const char __user *buf, int in_len, 1378 int out_len) 1379 { 1380 struct ib_uverbs_post_srq_recv cmd; 1381 struct ib_uverbs_post_srq_recv_resp resp; 1382 struct ib_recv_wr *wr, *next, *bad_wr; 1383 struct ib_srq *srq; 1384 ssize_t ret = -EINVAL; 1385 1386 if (copy_from_user(&cmd, buf, sizeof cmd)) 1387 return -EFAULT; 1388 1389 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 1390 in_len - sizeof cmd, cmd.wr_count, 1391 cmd.sge_count, cmd.wqe_size); 1392 if (IS_ERR(wr)) 1393 return PTR_ERR(wr); 1394 1395 down(&ib_uverbs_idr_mutex); 1396 1397 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1398 if (!srq || srq->uobject->context != file->ucontext) 1399 goto out; 1400 1401 resp.bad_wr = 0; 1402 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 1403 if (ret) 1404 for (next = wr; next; next = next->next) { 1405 ++resp.bad_wr; 1406 if (next == bad_wr) 1407 break; 1408 } 1409 1410 1411 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1412 &resp, sizeof resp)) 1413 ret = -EFAULT; 1414 1415 out: 1416 up(&ib_uverbs_idr_mutex); 1417 1418 while (wr) { 1419 next = wr->next; 1420 kfree(wr); 1421 wr = next; 1422 } 1423 1424 return ret ? ret : in_len; 1425 } 1426 1427 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 1428 const char __user *buf, int in_len, 1429 int out_len) 1430 { 1431 struct ib_uverbs_create_ah cmd; 1432 struct ib_uverbs_create_ah_resp resp; 1433 struct ib_uobject *uobj; 1434 struct ib_pd *pd; 1435 struct ib_ah *ah; 1436 struct ib_ah_attr attr; 1437 int ret; 1438 1439 if (out_len < sizeof resp) 1440 return -ENOSPC; 1441 1442 if (copy_from_user(&cmd, buf, sizeof cmd)) 1443 return -EFAULT; 1444 1445 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 1446 if (!uobj) 1447 return -ENOMEM; 1448 1449 down(&ib_uverbs_idr_mutex); 1450 1451 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 1452 if (!pd || pd->uobject->context != file->ucontext) { 1453 ret = -EINVAL; 1454 goto err_up; 1455 } 1456 1457 uobj->user_handle = cmd.user_handle; 1458 uobj->context = file->ucontext; 1459 1460 attr.dlid = cmd.attr.dlid; 1461 attr.sl = cmd.attr.sl; 1462 attr.src_path_bits = cmd.attr.src_path_bits; 1463 attr.static_rate = cmd.attr.static_rate; 1464 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 1465 attr.port_num = cmd.attr.port_num; 1466 attr.grh.flow_label = cmd.attr.grh.flow_label; 1467 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 1468 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 1469 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 1470 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 1471 1472 ah = ib_create_ah(pd, &attr); 1473 if (IS_ERR(ah)) { 1474 ret = PTR_ERR(ah); 1475 goto err_up; 1476 } 1477 1478 ah->uobject = uobj; 1479 1480 retry: 1481 if (!idr_pre_get(&ib_uverbs_ah_idr, GFP_KERNEL)) { 1482 ret = -ENOMEM; 1483 goto err_destroy; 1484 } 1485 1486 ret = idr_get_new(&ib_uverbs_ah_idr, ah, &uobj->id); 1487 1488 if (ret == -EAGAIN) 1489 goto retry; 1490 if (ret) 1491 goto err_destroy; 1492 1493 resp.ah_handle = uobj->id; 1494 1495 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1496 &resp, sizeof resp)) { 1497 ret = -EFAULT; 1498 goto err_idr; 1499 } 1500 1501 down(&file->mutex); 1502 list_add_tail(&uobj->list, &file->ucontext->ah_list); 1503 up(&file->mutex); 1504 1505 up(&ib_uverbs_idr_mutex); 1506 1507 return in_len; 1508 1509 err_idr: 1510 idr_remove(&ib_uverbs_ah_idr, uobj->id); 1511 1512 err_destroy: 1513 ib_destroy_ah(ah); 1514 1515 err_up: 1516 up(&ib_uverbs_idr_mutex); 1517 1518 kfree(uobj); 1519 return ret; 1520 } 1521 1522 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 1523 const char __user *buf, int in_len, int out_len) 1524 { 1525 struct ib_uverbs_destroy_ah cmd; 1526 struct ib_ah *ah; 1527 struct ib_uobject *uobj; 1528 int ret = -EINVAL; 1529 1530 if (copy_from_user(&cmd, buf, sizeof cmd)) 1531 return -EFAULT; 1532 1533 down(&ib_uverbs_idr_mutex); 1534 1535 ah = idr_find(&ib_uverbs_ah_idr, cmd.ah_handle); 1536 if (!ah || ah->uobject->context != file->ucontext) 1537 goto out; 1538 1539 uobj = ah->uobject; 1540 1541 ret = ib_destroy_ah(ah); 1542 if (ret) 1543 goto out; 1544 1545 idr_remove(&ib_uverbs_ah_idr, cmd.ah_handle); 1546 1547 down(&file->mutex); 1548 list_del(&uobj->list); 1549 up(&file->mutex); 1550 1551 kfree(uobj); 1552 1553 out: 1554 up(&ib_uverbs_idr_mutex); 1555 1556 return ret ? ret : in_len; 1557 } 1558 1559 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 1560 const char __user *buf, int in_len, 1561 int out_len) 1562 { 1563 struct ib_uverbs_attach_mcast cmd; 1564 struct ib_qp *qp; 1565 struct ib_uqp_object *uobj; 1566 struct ib_uverbs_mcast_entry *mcast; 1567 int ret = -EINVAL; 1568 1569 if (copy_from_user(&cmd, buf, sizeof cmd)) 1570 return -EFAULT; 1571 1572 down(&ib_uverbs_idr_mutex); 1573 1574 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1575 if (!qp || qp->uobject->context != file->ucontext) 1576 goto out; 1577 1578 uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1579 1580 list_for_each_entry(mcast, &uobj->mcast_list, list) 1581 if (cmd.mlid == mcast->lid && 1582 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 1583 ret = 0; 1584 goto out; 1585 } 1586 1587 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 1588 if (!mcast) { 1589 ret = -ENOMEM; 1590 goto out; 1591 } 1592 1593 mcast->lid = cmd.mlid; 1594 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 1595 1596 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 1597 if (!ret) { 1598 uobj = container_of(qp->uobject, struct ib_uqp_object, 1599 uevent.uobject); 1600 list_add_tail(&mcast->list, &uobj->mcast_list); 1601 } else 1602 kfree(mcast); 1603 1604 out: 1605 up(&ib_uverbs_idr_mutex); 1606 1607 return ret ? ret : in_len; 1608 } 1609 1610 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 1611 const char __user *buf, int in_len, 1612 int out_len) 1613 { 1614 struct ib_uverbs_detach_mcast cmd; 1615 struct ib_uqp_object *uobj; 1616 struct ib_qp *qp; 1617 struct ib_uverbs_mcast_entry *mcast; 1618 int ret = -EINVAL; 1619 1620 if (copy_from_user(&cmd, buf, sizeof cmd)) 1621 return -EFAULT; 1622 1623 down(&ib_uverbs_idr_mutex); 1624 1625 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1626 if (!qp || qp->uobject->context != file->ucontext) 1627 goto out; 1628 1629 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1630 if (ret) 1631 goto out; 1632 1633 uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1634 1635 list_for_each_entry(mcast, &uobj->mcast_list, list) 1636 if (cmd.mlid == mcast->lid && 1637 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 1638 list_del(&mcast->list); 1639 kfree(mcast); 1640 break; 1641 } 1642 1643 out: 1644 up(&ib_uverbs_idr_mutex); 1645 1646 return ret ? ret : in_len; 1647 } 1648 1649 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 1650 const char __user *buf, int in_len, 1651 int out_len) 1652 { 1653 struct ib_uverbs_create_srq cmd; 1654 struct ib_uverbs_create_srq_resp resp; 1655 struct ib_udata udata; 1656 struct ib_uevent_object *uobj; 1657 struct ib_pd *pd; 1658 struct ib_srq *srq; 1659 struct ib_srq_init_attr attr; 1660 int ret; 1661 1662 if (out_len < sizeof resp) 1663 return -ENOSPC; 1664 1665 if (copy_from_user(&cmd, buf, sizeof cmd)) 1666 return -EFAULT; 1667 1668 INIT_UDATA(&udata, buf + sizeof cmd, 1669 (unsigned long) cmd.response + sizeof resp, 1670 in_len - sizeof cmd, out_len - sizeof resp); 1671 1672 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 1673 if (!uobj) 1674 return -ENOMEM; 1675 1676 down(&ib_uverbs_idr_mutex); 1677 1678 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 1679 1680 if (!pd || pd->uobject->context != file->ucontext) { 1681 ret = -EINVAL; 1682 goto err_up; 1683 } 1684 1685 attr.event_handler = ib_uverbs_srq_event_handler; 1686 attr.srq_context = file; 1687 attr.attr.max_wr = cmd.max_wr; 1688 attr.attr.max_sge = cmd.max_sge; 1689 attr.attr.srq_limit = cmd.srq_limit; 1690 1691 uobj->uobject.user_handle = cmd.user_handle; 1692 uobj->uobject.context = file->ucontext; 1693 uobj->events_reported = 0; 1694 INIT_LIST_HEAD(&uobj->event_list); 1695 1696 srq = pd->device->create_srq(pd, &attr, &udata); 1697 if (IS_ERR(srq)) { 1698 ret = PTR_ERR(srq); 1699 goto err_up; 1700 } 1701 1702 srq->device = pd->device; 1703 srq->pd = pd; 1704 srq->uobject = &uobj->uobject; 1705 srq->event_handler = attr.event_handler; 1706 srq->srq_context = attr.srq_context; 1707 atomic_inc(&pd->usecnt); 1708 atomic_set(&srq->usecnt, 0); 1709 1710 memset(&resp, 0, sizeof resp); 1711 1712 retry: 1713 if (!idr_pre_get(&ib_uverbs_srq_idr, GFP_KERNEL)) { 1714 ret = -ENOMEM; 1715 goto err_destroy; 1716 } 1717 1718 ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->uobject.id); 1719 1720 if (ret == -EAGAIN) 1721 goto retry; 1722 if (ret) 1723 goto err_destroy; 1724 1725 resp.srq_handle = uobj->uobject.id; 1726 1727 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1728 &resp, sizeof resp)) { 1729 ret = -EFAULT; 1730 goto err_idr; 1731 } 1732 1733 down(&file->mutex); 1734 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list); 1735 up(&file->mutex); 1736 1737 up(&ib_uverbs_idr_mutex); 1738 1739 return in_len; 1740 1741 err_idr: 1742 idr_remove(&ib_uverbs_srq_idr, uobj->uobject.id); 1743 1744 err_destroy: 1745 ib_destroy_srq(srq); 1746 atomic_dec(&pd->usecnt); 1747 1748 err_up: 1749 up(&ib_uverbs_idr_mutex); 1750 1751 kfree(uobj); 1752 return ret; 1753 } 1754 1755 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 1756 const char __user *buf, int in_len, 1757 int out_len) 1758 { 1759 struct ib_uverbs_modify_srq cmd; 1760 struct ib_srq *srq; 1761 struct ib_srq_attr attr; 1762 int ret; 1763 1764 if (copy_from_user(&cmd, buf, sizeof cmd)) 1765 return -EFAULT; 1766 1767 down(&ib_uverbs_idr_mutex); 1768 1769 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1770 if (!srq || srq->uobject->context != file->ucontext) { 1771 ret = -EINVAL; 1772 goto out; 1773 } 1774 1775 attr.max_wr = cmd.max_wr; 1776 attr.srq_limit = cmd.srq_limit; 1777 1778 ret = ib_modify_srq(srq, &attr, cmd.attr_mask); 1779 1780 out: 1781 up(&ib_uverbs_idr_mutex); 1782 1783 return ret ? ret : in_len; 1784 } 1785 1786 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 1787 const char __user *buf, int in_len, 1788 int out_len) 1789 { 1790 struct ib_uverbs_destroy_srq cmd; 1791 struct ib_uverbs_destroy_srq_resp resp; 1792 struct ib_srq *srq; 1793 struct ib_uevent_object *uobj; 1794 int ret = -EINVAL; 1795 1796 if (copy_from_user(&cmd, buf, sizeof cmd)) 1797 return -EFAULT; 1798 1799 down(&ib_uverbs_idr_mutex); 1800 1801 memset(&resp, 0, sizeof resp); 1802 1803 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1804 if (!srq || srq->uobject->context != file->ucontext) 1805 goto out; 1806 1807 uobj = container_of(srq->uobject, struct ib_uevent_object, uobject); 1808 1809 ret = ib_destroy_srq(srq); 1810 if (ret) 1811 goto out; 1812 1813 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); 1814 1815 down(&file->mutex); 1816 list_del(&uobj->uobject.list); 1817 up(&file->mutex); 1818 1819 ib_uverbs_release_uevent(file, uobj); 1820 1821 resp.events_reported = uobj->events_reported; 1822 1823 kfree(uobj); 1824 1825 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1826 &resp, sizeof resp)) 1827 ret = -EFAULT; 1828 1829 out: 1830 up(&ib_uverbs_idr_mutex); 1831 1832 return ret ? ret : in_len; 1833 } 1834