1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * $Id: uverbs_cmd.c 2708 2005-06-24 17:27:21Z roland $ 35 */ 36 37 #include <linux/file.h> 38 #include <linux/fs.h> 39 40 #include <asm/uaccess.h> 41 42 #include "uverbs.h" 43 44 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 45 do { \ 46 (udata)->inbuf = (void __user *) (ibuf); \ 47 (udata)->outbuf = (void __user *) (obuf); \ 48 (udata)->inlen = (ilen); \ 49 (udata)->outlen = (olen); \ 50 } while (0) 51 52 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 53 const char __user *buf, 54 int in_len, int out_len) 55 { 56 struct ib_uverbs_get_context cmd; 57 struct ib_uverbs_get_context_resp resp; 58 struct ib_udata udata; 59 struct ib_device *ibdev = file->device->ib_dev; 60 struct ib_ucontext *ucontext; 61 struct file *filp; 62 int ret; 63 64 if (out_len < sizeof resp) 65 return -ENOSPC; 66 67 if (copy_from_user(&cmd, buf, sizeof cmd)) 68 return -EFAULT; 69 70 down(&file->mutex); 71 72 if (file->ucontext) { 73 ret = -EINVAL; 74 goto err; 75 } 76 77 INIT_UDATA(&udata, buf + sizeof cmd, 78 (unsigned long) cmd.response + sizeof resp, 79 in_len - sizeof cmd, out_len - sizeof resp); 80 81 ucontext = ibdev->alloc_ucontext(ibdev, &udata); 82 if (IS_ERR(ucontext)) 83 return PTR_ERR(file->ucontext); 84 85 ucontext->device = ibdev; 86 INIT_LIST_HEAD(&ucontext->pd_list); 87 INIT_LIST_HEAD(&ucontext->mr_list); 88 INIT_LIST_HEAD(&ucontext->mw_list); 89 INIT_LIST_HEAD(&ucontext->cq_list); 90 INIT_LIST_HEAD(&ucontext->qp_list); 91 INIT_LIST_HEAD(&ucontext->srq_list); 92 INIT_LIST_HEAD(&ucontext->ah_list); 93 94 resp.num_comp_vectors = file->device->num_comp_vectors; 95 96 filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd); 97 if (IS_ERR(filp)) { 98 ret = PTR_ERR(filp); 99 goto err_free; 100 } 101 102 if (copy_to_user((void __user *) (unsigned long) cmd.response, 103 &resp, sizeof resp)) { 104 ret = -EFAULT; 105 goto err_file; 106 } 107 108 file->async_file = filp->private_data; 109 110 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev, 111 ib_uverbs_event_handler); 112 ret = ib_register_event_handler(&file->event_handler); 113 if (ret) 114 goto err_file; 115 116 kref_get(&file->async_file->ref); 117 kref_get(&file->ref); 118 file->ucontext = ucontext; 119 120 fd_install(resp.async_fd, filp); 121 122 up(&file->mutex); 123 124 return in_len; 125 126 err_file: 127 put_unused_fd(resp.async_fd); 128 fput(filp); 129 130 err_free: 131 ibdev->dealloc_ucontext(ucontext); 132 133 err: 134 up(&file->mutex); 135 return ret; 136 } 137 138 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 139 const char __user *buf, 140 int in_len, int out_len) 141 { 142 struct ib_uverbs_query_device cmd; 143 struct ib_uverbs_query_device_resp resp; 144 struct ib_device_attr attr; 145 int ret; 146 147 if (out_len < sizeof resp) 148 return -ENOSPC; 149 150 if (copy_from_user(&cmd, buf, sizeof cmd)) 151 return -EFAULT; 152 153 ret = ib_query_device(file->device->ib_dev, &attr); 154 if (ret) 155 return ret; 156 157 memset(&resp, 0, sizeof resp); 158 159 resp.fw_ver = attr.fw_ver; 160 resp.node_guid = attr.node_guid; 161 resp.sys_image_guid = attr.sys_image_guid; 162 resp.max_mr_size = attr.max_mr_size; 163 resp.page_size_cap = attr.page_size_cap; 164 resp.vendor_id = attr.vendor_id; 165 resp.vendor_part_id = attr.vendor_part_id; 166 resp.hw_ver = attr.hw_ver; 167 resp.max_qp = attr.max_qp; 168 resp.max_qp_wr = attr.max_qp_wr; 169 resp.device_cap_flags = attr.device_cap_flags; 170 resp.max_sge = attr.max_sge; 171 resp.max_sge_rd = attr.max_sge_rd; 172 resp.max_cq = attr.max_cq; 173 resp.max_cqe = attr.max_cqe; 174 resp.max_mr = attr.max_mr; 175 resp.max_pd = attr.max_pd; 176 resp.max_qp_rd_atom = attr.max_qp_rd_atom; 177 resp.max_ee_rd_atom = attr.max_ee_rd_atom; 178 resp.max_res_rd_atom = attr.max_res_rd_atom; 179 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; 180 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; 181 resp.atomic_cap = attr.atomic_cap; 182 resp.max_ee = attr.max_ee; 183 resp.max_rdd = attr.max_rdd; 184 resp.max_mw = attr.max_mw; 185 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; 186 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; 187 resp.max_mcast_grp = attr.max_mcast_grp; 188 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; 189 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; 190 resp.max_ah = attr.max_ah; 191 resp.max_fmr = attr.max_fmr; 192 resp.max_map_per_fmr = attr.max_map_per_fmr; 193 resp.max_srq = attr.max_srq; 194 resp.max_srq_wr = attr.max_srq_wr; 195 resp.max_srq_sge = attr.max_srq_sge; 196 resp.max_pkeys = attr.max_pkeys; 197 resp.local_ca_ack_delay = attr.local_ca_ack_delay; 198 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; 199 200 if (copy_to_user((void __user *) (unsigned long) cmd.response, 201 &resp, sizeof resp)) 202 return -EFAULT; 203 204 return in_len; 205 } 206 207 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 208 const char __user *buf, 209 int in_len, int out_len) 210 { 211 struct ib_uverbs_query_port cmd; 212 struct ib_uverbs_query_port_resp resp; 213 struct ib_port_attr attr; 214 int ret; 215 216 if (out_len < sizeof resp) 217 return -ENOSPC; 218 219 if (copy_from_user(&cmd, buf, sizeof cmd)) 220 return -EFAULT; 221 222 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr); 223 if (ret) 224 return ret; 225 226 memset(&resp, 0, sizeof resp); 227 228 resp.state = attr.state; 229 resp.max_mtu = attr.max_mtu; 230 resp.active_mtu = attr.active_mtu; 231 resp.gid_tbl_len = attr.gid_tbl_len; 232 resp.port_cap_flags = attr.port_cap_flags; 233 resp.max_msg_sz = attr.max_msg_sz; 234 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 235 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 236 resp.pkey_tbl_len = attr.pkey_tbl_len; 237 resp.lid = attr.lid; 238 resp.sm_lid = attr.sm_lid; 239 resp.lmc = attr.lmc; 240 resp.max_vl_num = attr.max_vl_num; 241 resp.sm_sl = attr.sm_sl; 242 resp.subnet_timeout = attr.subnet_timeout; 243 resp.init_type_reply = attr.init_type_reply; 244 resp.active_width = attr.active_width; 245 resp.active_speed = attr.active_speed; 246 resp.phys_state = attr.phys_state; 247 248 if (copy_to_user((void __user *) (unsigned long) cmd.response, 249 &resp, sizeof resp)) 250 return -EFAULT; 251 252 return in_len; 253 } 254 255 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 256 const char __user *buf, 257 int in_len, int out_len) 258 { 259 struct ib_uverbs_alloc_pd cmd; 260 struct ib_uverbs_alloc_pd_resp resp; 261 struct ib_udata udata; 262 struct ib_uobject *uobj; 263 struct ib_pd *pd; 264 int ret; 265 266 if (out_len < sizeof resp) 267 return -ENOSPC; 268 269 if (copy_from_user(&cmd, buf, sizeof cmd)) 270 return -EFAULT; 271 272 INIT_UDATA(&udata, buf + sizeof cmd, 273 (unsigned long) cmd.response + sizeof resp, 274 in_len - sizeof cmd, out_len - sizeof resp); 275 276 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 277 if (!uobj) 278 return -ENOMEM; 279 280 uobj->context = file->ucontext; 281 282 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 283 file->ucontext, &udata); 284 if (IS_ERR(pd)) { 285 ret = PTR_ERR(pd); 286 goto err; 287 } 288 289 pd->device = file->device->ib_dev; 290 pd->uobject = uobj; 291 atomic_set(&pd->usecnt, 0); 292 293 down(&ib_uverbs_idr_mutex); 294 295 retry: 296 if (!idr_pre_get(&ib_uverbs_pd_idr, GFP_KERNEL)) { 297 ret = -ENOMEM; 298 goto err_up; 299 } 300 301 ret = idr_get_new(&ib_uverbs_pd_idr, pd, &uobj->id); 302 303 if (ret == -EAGAIN) 304 goto retry; 305 if (ret) 306 goto err_up; 307 308 memset(&resp, 0, sizeof resp); 309 resp.pd_handle = uobj->id; 310 311 if (copy_to_user((void __user *) (unsigned long) cmd.response, 312 &resp, sizeof resp)) { 313 ret = -EFAULT; 314 goto err_idr; 315 } 316 317 down(&file->mutex); 318 list_add_tail(&uobj->list, &file->ucontext->pd_list); 319 up(&file->mutex); 320 321 up(&ib_uverbs_idr_mutex); 322 323 return in_len; 324 325 err_idr: 326 idr_remove(&ib_uverbs_pd_idr, uobj->id); 327 328 err_up: 329 up(&ib_uverbs_idr_mutex); 330 ib_dealloc_pd(pd); 331 332 err: 333 kfree(uobj); 334 return ret; 335 } 336 337 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 338 const char __user *buf, 339 int in_len, int out_len) 340 { 341 struct ib_uverbs_dealloc_pd cmd; 342 struct ib_pd *pd; 343 struct ib_uobject *uobj; 344 int ret = -EINVAL; 345 346 if (copy_from_user(&cmd, buf, sizeof cmd)) 347 return -EFAULT; 348 349 down(&ib_uverbs_idr_mutex); 350 351 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 352 if (!pd || pd->uobject->context != file->ucontext) 353 goto out; 354 355 uobj = pd->uobject; 356 357 ret = ib_dealloc_pd(pd); 358 if (ret) 359 goto out; 360 361 idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle); 362 363 down(&file->mutex); 364 list_del(&uobj->list); 365 up(&file->mutex); 366 367 kfree(uobj); 368 369 out: 370 up(&ib_uverbs_idr_mutex); 371 372 return ret ? ret : in_len; 373 } 374 375 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 376 const char __user *buf, int in_len, 377 int out_len) 378 { 379 struct ib_uverbs_reg_mr cmd; 380 struct ib_uverbs_reg_mr_resp resp; 381 struct ib_udata udata; 382 struct ib_umem_object *obj; 383 struct ib_pd *pd; 384 struct ib_mr *mr; 385 int ret; 386 387 if (out_len < sizeof resp) 388 return -ENOSPC; 389 390 if (copy_from_user(&cmd, buf, sizeof cmd)) 391 return -EFAULT; 392 393 INIT_UDATA(&udata, buf + sizeof cmd, 394 (unsigned long) cmd.response + sizeof resp, 395 in_len - sizeof cmd, out_len - sizeof resp); 396 397 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 398 return -EINVAL; 399 400 /* 401 * Local write permission is required if remote write or 402 * remote atomic permission is also requested. 403 */ 404 if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 405 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE)) 406 return -EINVAL; 407 408 obj = kmalloc(sizeof *obj, GFP_KERNEL); 409 if (!obj) 410 return -ENOMEM; 411 412 obj->uobject.context = file->ucontext; 413 414 /* 415 * We ask for writable memory if any access flags other than 416 * "remote read" are set. "Local write" and "remote write" 417 * obviously require write access. "Remote atomic" can do 418 * things like fetch and add, which will modify memory, and 419 * "MW bind" can change permissions by binding a window. 420 */ 421 ret = ib_umem_get(file->device->ib_dev, &obj->umem, 422 (void *) (unsigned long) cmd.start, cmd.length, 423 !!(cmd.access_flags & ~IB_ACCESS_REMOTE_READ)); 424 if (ret) 425 goto err_free; 426 427 obj->umem.virt_base = cmd.hca_va; 428 429 down(&ib_uverbs_idr_mutex); 430 431 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 432 if (!pd || pd->uobject->context != file->ucontext) { 433 ret = -EINVAL; 434 goto err_up; 435 } 436 437 if (!pd->device->reg_user_mr) { 438 ret = -ENOSYS; 439 goto err_up; 440 } 441 442 mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata); 443 if (IS_ERR(mr)) { 444 ret = PTR_ERR(mr); 445 goto err_up; 446 } 447 448 mr->device = pd->device; 449 mr->pd = pd; 450 mr->uobject = &obj->uobject; 451 atomic_inc(&pd->usecnt); 452 atomic_set(&mr->usecnt, 0); 453 454 memset(&resp, 0, sizeof resp); 455 resp.lkey = mr->lkey; 456 resp.rkey = mr->rkey; 457 458 retry: 459 if (!idr_pre_get(&ib_uverbs_mr_idr, GFP_KERNEL)) { 460 ret = -ENOMEM; 461 goto err_unreg; 462 } 463 464 ret = idr_get_new(&ib_uverbs_mr_idr, mr, &obj->uobject.id); 465 466 if (ret == -EAGAIN) 467 goto retry; 468 if (ret) 469 goto err_unreg; 470 471 resp.mr_handle = obj->uobject.id; 472 473 if (copy_to_user((void __user *) (unsigned long) cmd.response, 474 &resp, sizeof resp)) { 475 ret = -EFAULT; 476 goto err_idr; 477 } 478 479 down(&file->mutex); 480 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list); 481 up(&file->mutex); 482 483 up(&ib_uverbs_idr_mutex); 484 485 return in_len; 486 487 err_idr: 488 idr_remove(&ib_uverbs_mr_idr, obj->uobject.id); 489 490 err_unreg: 491 ib_dereg_mr(mr); 492 493 err_up: 494 up(&ib_uverbs_idr_mutex); 495 496 ib_umem_release(file->device->ib_dev, &obj->umem); 497 498 err_free: 499 kfree(obj); 500 return ret; 501 } 502 503 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 504 const char __user *buf, int in_len, 505 int out_len) 506 { 507 struct ib_uverbs_dereg_mr cmd; 508 struct ib_mr *mr; 509 struct ib_umem_object *memobj; 510 int ret = -EINVAL; 511 512 if (copy_from_user(&cmd, buf, sizeof cmd)) 513 return -EFAULT; 514 515 down(&ib_uverbs_idr_mutex); 516 517 mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle); 518 if (!mr || mr->uobject->context != file->ucontext) 519 goto out; 520 521 memobj = container_of(mr->uobject, struct ib_umem_object, uobject); 522 523 ret = ib_dereg_mr(mr); 524 if (ret) 525 goto out; 526 527 idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle); 528 529 down(&file->mutex); 530 list_del(&memobj->uobject.list); 531 up(&file->mutex); 532 533 ib_umem_release(file->device->ib_dev, &memobj->umem); 534 kfree(memobj); 535 536 out: 537 up(&ib_uverbs_idr_mutex); 538 539 return ret ? ret : in_len; 540 } 541 542 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 543 const char __user *buf, int in_len, 544 int out_len) 545 { 546 struct ib_uverbs_create_comp_channel cmd; 547 struct ib_uverbs_create_comp_channel_resp resp; 548 struct file *filp; 549 550 if (out_len < sizeof resp) 551 return -ENOSPC; 552 553 if (copy_from_user(&cmd, buf, sizeof cmd)) 554 return -EFAULT; 555 556 filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd); 557 if (IS_ERR(filp)) 558 return PTR_ERR(filp); 559 560 if (copy_to_user((void __user *) (unsigned long) cmd.response, 561 &resp, sizeof resp)) { 562 put_unused_fd(resp.fd); 563 fput(filp); 564 return -EFAULT; 565 } 566 567 fd_install(resp.fd, filp); 568 return in_len; 569 } 570 571 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 572 const char __user *buf, int in_len, 573 int out_len) 574 { 575 struct ib_uverbs_create_cq cmd; 576 struct ib_uverbs_create_cq_resp resp; 577 struct ib_udata udata; 578 struct ib_ucq_object *uobj; 579 struct ib_uverbs_event_file *ev_file = NULL; 580 struct ib_cq *cq; 581 int ret; 582 583 if (out_len < sizeof resp) 584 return -ENOSPC; 585 586 if (copy_from_user(&cmd, buf, sizeof cmd)) 587 return -EFAULT; 588 589 INIT_UDATA(&udata, buf + sizeof cmd, 590 (unsigned long) cmd.response + sizeof resp, 591 in_len - sizeof cmd, out_len - sizeof resp); 592 593 if (cmd.comp_vector >= file->device->num_comp_vectors) 594 return -EINVAL; 595 596 if (cmd.comp_channel >= 0) 597 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel); 598 599 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 600 if (!uobj) 601 return -ENOMEM; 602 603 uobj->uobject.user_handle = cmd.user_handle; 604 uobj->uobject.context = file->ucontext; 605 uobj->uverbs_file = file; 606 uobj->comp_events_reported = 0; 607 uobj->async_events_reported = 0; 608 INIT_LIST_HEAD(&uobj->comp_list); 609 INIT_LIST_HEAD(&uobj->async_list); 610 611 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 612 file->ucontext, &udata); 613 if (IS_ERR(cq)) { 614 ret = PTR_ERR(cq); 615 goto err; 616 } 617 618 cq->device = file->device->ib_dev; 619 cq->uobject = &uobj->uobject; 620 cq->comp_handler = ib_uverbs_comp_handler; 621 cq->event_handler = ib_uverbs_cq_event_handler; 622 cq->cq_context = ev_file; 623 atomic_set(&cq->usecnt, 0); 624 625 down(&ib_uverbs_idr_mutex); 626 627 retry: 628 if (!idr_pre_get(&ib_uverbs_cq_idr, GFP_KERNEL)) { 629 ret = -ENOMEM; 630 goto err_up; 631 } 632 633 ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->uobject.id); 634 635 if (ret == -EAGAIN) 636 goto retry; 637 if (ret) 638 goto err_up; 639 640 memset(&resp, 0, sizeof resp); 641 resp.cq_handle = uobj->uobject.id; 642 resp.cqe = cq->cqe; 643 644 if (copy_to_user((void __user *) (unsigned long) cmd.response, 645 &resp, sizeof resp)) { 646 ret = -EFAULT; 647 goto err_idr; 648 } 649 650 down(&file->mutex); 651 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list); 652 up(&file->mutex); 653 654 up(&ib_uverbs_idr_mutex); 655 656 return in_len; 657 658 err_idr: 659 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id); 660 661 err_up: 662 up(&ib_uverbs_idr_mutex); 663 ib_destroy_cq(cq); 664 665 err: 666 kfree(uobj); 667 return ret; 668 } 669 670 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 671 const char __user *buf, int in_len, 672 int out_len) 673 { 674 struct ib_uverbs_poll_cq cmd; 675 struct ib_uverbs_poll_cq_resp *resp; 676 struct ib_cq *cq; 677 struct ib_wc *wc; 678 int ret = 0; 679 int i; 680 int rsize; 681 682 if (copy_from_user(&cmd, buf, sizeof cmd)) 683 return -EFAULT; 684 685 wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL); 686 if (!wc) 687 return -ENOMEM; 688 689 rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc); 690 resp = kmalloc(rsize, GFP_KERNEL); 691 if (!resp) { 692 ret = -ENOMEM; 693 goto out_wc; 694 } 695 696 down(&ib_uverbs_idr_mutex); 697 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 698 if (!cq || cq->uobject->context != file->ucontext) { 699 ret = -EINVAL; 700 goto out; 701 } 702 703 resp->count = ib_poll_cq(cq, cmd.ne, wc); 704 705 for (i = 0; i < resp->count; i++) { 706 resp->wc[i].wr_id = wc[i].wr_id; 707 resp->wc[i].status = wc[i].status; 708 resp->wc[i].opcode = wc[i].opcode; 709 resp->wc[i].vendor_err = wc[i].vendor_err; 710 resp->wc[i].byte_len = wc[i].byte_len; 711 resp->wc[i].imm_data = wc[i].imm_data; 712 resp->wc[i].qp_num = wc[i].qp_num; 713 resp->wc[i].src_qp = wc[i].src_qp; 714 resp->wc[i].wc_flags = wc[i].wc_flags; 715 resp->wc[i].pkey_index = wc[i].pkey_index; 716 resp->wc[i].slid = wc[i].slid; 717 resp->wc[i].sl = wc[i].sl; 718 resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits; 719 resp->wc[i].port_num = wc[i].port_num; 720 } 721 722 if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize)) 723 ret = -EFAULT; 724 725 out: 726 up(&ib_uverbs_idr_mutex); 727 kfree(resp); 728 729 out_wc: 730 kfree(wc); 731 return ret ? ret : in_len; 732 } 733 734 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 735 const char __user *buf, int in_len, 736 int out_len) 737 { 738 struct ib_uverbs_req_notify_cq cmd; 739 struct ib_cq *cq; 740 int ret = -EINVAL; 741 742 if (copy_from_user(&cmd, buf, sizeof cmd)) 743 return -EFAULT; 744 745 down(&ib_uverbs_idr_mutex); 746 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 747 if (cq && cq->uobject->context == file->ucontext) { 748 ib_req_notify_cq(cq, cmd.solicited_only ? 749 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 750 ret = in_len; 751 } 752 up(&ib_uverbs_idr_mutex); 753 754 return ret; 755 } 756 757 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 758 const char __user *buf, int in_len, 759 int out_len) 760 { 761 struct ib_uverbs_destroy_cq cmd; 762 struct ib_uverbs_destroy_cq_resp resp; 763 struct ib_cq *cq; 764 struct ib_ucq_object *uobj; 765 struct ib_uverbs_event_file *ev_file; 766 u64 user_handle; 767 int ret = -EINVAL; 768 769 if (copy_from_user(&cmd, buf, sizeof cmd)) 770 return -EFAULT; 771 772 memset(&resp, 0, sizeof resp); 773 774 down(&ib_uverbs_idr_mutex); 775 776 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 777 if (!cq || cq->uobject->context != file->ucontext) 778 goto out; 779 780 user_handle = cq->uobject->user_handle; 781 uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); 782 ev_file = cq->cq_context; 783 784 ret = ib_destroy_cq(cq); 785 if (ret) 786 goto out; 787 788 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle); 789 790 down(&file->mutex); 791 list_del(&uobj->uobject.list); 792 up(&file->mutex); 793 794 ib_uverbs_release_ucq(file, ev_file, uobj); 795 796 resp.comp_events_reported = uobj->comp_events_reported; 797 resp.async_events_reported = uobj->async_events_reported; 798 799 kfree(uobj); 800 801 if (copy_to_user((void __user *) (unsigned long) cmd.response, 802 &resp, sizeof resp)) 803 ret = -EFAULT; 804 805 out: 806 up(&ib_uverbs_idr_mutex); 807 808 return ret ? ret : in_len; 809 } 810 811 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 812 const char __user *buf, int in_len, 813 int out_len) 814 { 815 struct ib_uverbs_create_qp cmd; 816 struct ib_uverbs_create_qp_resp resp; 817 struct ib_udata udata; 818 struct ib_uevent_object *uobj; 819 struct ib_pd *pd; 820 struct ib_cq *scq, *rcq; 821 struct ib_srq *srq; 822 struct ib_qp *qp; 823 struct ib_qp_init_attr attr; 824 int ret; 825 826 if (out_len < sizeof resp) 827 return -ENOSPC; 828 829 if (copy_from_user(&cmd, buf, sizeof cmd)) 830 return -EFAULT; 831 832 INIT_UDATA(&udata, buf + sizeof cmd, 833 (unsigned long) cmd.response + sizeof resp, 834 in_len - sizeof cmd, out_len - sizeof resp); 835 836 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 837 if (!uobj) 838 return -ENOMEM; 839 840 down(&ib_uverbs_idr_mutex); 841 842 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 843 scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle); 844 rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle); 845 srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL; 846 847 if (!pd || pd->uobject->context != file->ucontext || 848 !scq || scq->uobject->context != file->ucontext || 849 !rcq || rcq->uobject->context != file->ucontext || 850 (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) { 851 ret = -EINVAL; 852 goto err_up; 853 } 854 855 attr.event_handler = ib_uverbs_qp_event_handler; 856 attr.qp_context = file; 857 attr.send_cq = scq; 858 attr.recv_cq = rcq; 859 attr.srq = srq; 860 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 861 attr.qp_type = cmd.qp_type; 862 863 attr.cap.max_send_wr = cmd.max_send_wr; 864 attr.cap.max_recv_wr = cmd.max_recv_wr; 865 attr.cap.max_send_sge = cmd.max_send_sge; 866 attr.cap.max_recv_sge = cmd.max_recv_sge; 867 attr.cap.max_inline_data = cmd.max_inline_data; 868 869 uobj->uobject.user_handle = cmd.user_handle; 870 uobj->uobject.context = file->ucontext; 871 uobj->events_reported = 0; 872 INIT_LIST_HEAD(&uobj->event_list); 873 874 qp = pd->device->create_qp(pd, &attr, &udata); 875 if (IS_ERR(qp)) { 876 ret = PTR_ERR(qp); 877 goto err_up; 878 } 879 880 qp->device = pd->device; 881 qp->pd = pd; 882 qp->send_cq = attr.send_cq; 883 qp->recv_cq = attr.recv_cq; 884 qp->srq = attr.srq; 885 qp->uobject = &uobj->uobject; 886 qp->event_handler = attr.event_handler; 887 qp->qp_context = attr.qp_context; 888 qp->qp_type = attr.qp_type; 889 atomic_inc(&pd->usecnt); 890 atomic_inc(&attr.send_cq->usecnt); 891 atomic_inc(&attr.recv_cq->usecnt); 892 if (attr.srq) 893 atomic_inc(&attr.srq->usecnt); 894 895 memset(&resp, 0, sizeof resp); 896 resp.qpn = qp->qp_num; 897 898 retry: 899 if (!idr_pre_get(&ib_uverbs_qp_idr, GFP_KERNEL)) { 900 ret = -ENOMEM; 901 goto err_destroy; 902 } 903 904 ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uobject.id); 905 906 if (ret == -EAGAIN) 907 goto retry; 908 if (ret) 909 goto err_destroy; 910 911 resp.qp_handle = uobj->uobject.id; 912 913 if (copy_to_user((void __user *) (unsigned long) cmd.response, 914 &resp, sizeof resp)) { 915 ret = -EFAULT; 916 goto err_idr; 917 } 918 919 down(&file->mutex); 920 list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list); 921 up(&file->mutex); 922 923 up(&ib_uverbs_idr_mutex); 924 925 return in_len; 926 927 err_idr: 928 idr_remove(&ib_uverbs_qp_idr, uobj->uobject.id); 929 930 err_destroy: 931 ib_destroy_qp(qp); 932 933 err_up: 934 up(&ib_uverbs_idr_mutex); 935 936 kfree(uobj); 937 return ret; 938 } 939 940 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 941 const char __user *buf, int in_len, 942 int out_len) 943 { 944 struct ib_uverbs_modify_qp cmd; 945 struct ib_qp *qp; 946 struct ib_qp_attr *attr; 947 int ret; 948 949 if (copy_from_user(&cmd, buf, sizeof cmd)) 950 return -EFAULT; 951 952 attr = kmalloc(sizeof *attr, GFP_KERNEL); 953 if (!attr) 954 return -ENOMEM; 955 956 down(&ib_uverbs_idr_mutex); 957 958 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 959 if (!qp || qp->uobject->context != file->ucontext) { 960 ret = -EINVAL; 961 goto out; 962 } 963 964 attr->qp_state = cmd.qp_state; 965 attr->cur_qp_state = cmd.cur_qp_state; 966 attr->path_mtu = cmd.path_mtu; 967 attr->path_mig_state = cmd.path_mig_state; 968 attr->qkey = cmd.qkey; 969 attr->rq_psn = cmd.rq_psn; 970 attr->sq_psn = cmd.sq_psn; 971 attr->dest_qp_num = cmd.dest_qp_num; 972 attr->qp_access_flags = cmd.qp_access_flags; 973 attr->pkey_index = cmd.pkey_index; 974 attr->alt_pkey_index = cmd.pkey_index; 975 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 976 attr->max_rd_atomic = cmd.max_rd_atomic; 977 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 978 attr->min_rnr_timer = cmd.min_rnr_timer; 979 attr->port_num = cmd.port_num; 980 attr->timeout = cmd.timeout; 981 attr->retry_cnt = cmd.retry_cnt; 982 attr->rnr_retry = cmd.rnr_retry; 983 attr->alt_port_num = cmd.alt_port_num; 984 attr->alt_timeout = cmd.alt_timeout; 985 986 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 987 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 988 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 989 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 990 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 991 attr->ah_attr.dlid = cmd.dest.dlid; 992 attr->ah_attr.sl = cmd.dest.sl; 993 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 994 attr->ah_attr.static_rate = cmd.dest.static_rate; 995 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 996 attr->ah_attr.port_num = cmd.dest.port_num; 997 998 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 999 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 1000 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 1001 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 1002 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 1003 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 1004 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 1005 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 1006 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 1007 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 1008 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 1009 1010 ret = ib_modify_qp(qp, attr, cmd.attr_mask); 1011 if (ret) 1012 goto out; 1013 1014 ret = in_len; 1015 1016 out: 1017 up(&ib_uverbs_idr_mutex); 1018 kfree(attr); 1019 1020 return ret; 1021 } 1022 1023 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 1024 const char __user *buf, int in_len, 1025 int out_len) 1026 { 1027 struct ib_uverbs_destroy_qp cmd; 1028 struct ib_uverbs_destroy_qp_resp resp; 1029 struct ib_qp *qp; 1030 struct ib_uevent_object *uobj; 1031 int ret = -EINVAL; 1032 1033 if (copy_from_user(&cmd, buf, sizeof cmd)) 1034 return -EFAULT; 1035 1036 memset(&resp, 0, sizeof resp); 1037 1038 down(&ib_uverbs_idr_mutex); 1039 1040 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1041 if (!qp || qp->uobject->context != file->ucontext) 1042 goto out; 1043 1044 uobj = container_of(qp->uobject, struct ib_uevent_object, uobject); 1045 1046 ret = ib_destroy_qp(qp); 1047 if (ret) 1048 goto out; 1049 1050 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 1051 1052 down(&file->mutex); 1053 list_del(&uobj->uobject.list); 1054 up(&file->mutex); 1055 1056 ib_uverbs_release_uevent(file, uobj); 1057 1058 resp.events_reported = uobj->events_reported; 1059 1060 kfree(uobj); 1061 1062 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1063 &resp, sizeof resp)) 1064 ret = -EFAULT; 1065 1066 out: 1067 up(&ib_uverbs_idr_mutex); 1068 1069 return ret ? ret : in_len; 1070 } 1071 1072 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 1073 const char __user *buf, int in_len, 1074 int out_len) 1075 { 1076 struct ib_uverbs_post_send cmd; 1077 struct ib_uverbs_post_send_resp resp; 1078 struct ib_uverbs_send_wr *user_wr; 1079 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 1080 struct ib_qp *qp; 1081 int i, sg_ind; 1082 ssize_t ret = -EINVAL; 1083 1084 if (copy_from_user(&cmd, buf, sizeof cmd)) 1085 return -EFAULT; 1086 1087 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 1088 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 1089 return -EINVAL; 1090 1091 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 1092 return -EINVAL; 1093 1094 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 1095 if (!user_wr) 1096 return -ENOMEM; 1097 1098 down(&ib_uverbs_idr_mutex); 1099 1100 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1101 if (!qp || qp->uobject->context != file->ucontext) 1102 goto out; 1103 1104 sg_ind = 0; 1105 last = NULL; 1106 for (i = 0; i < cmd.wr_count; ++i) { 1107 if (copy_from_user(user_wr, 1108 buf + sizeof cmd + i * cmd.wqe_size, 1109 cmd.wqe_size)) { 1110 ret = -EFAULT; 1111 goto out; 1112 } 1113 1114 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 1115 ret = -EINVAL; 1116 goto out; 1117 } 1118 1119 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 1120 user_wr->num_sge * sizeof (struct ib_sge), 1121 GFP_KERNEL); 1122 if (!next) { 1123 ret = -ENOMEM; 1124 goto out; 1125 } 1126 1127 if (!last) 1128 wr = next; 1129 else 1130 last->next = next; 1131 last = next; 1132 1133 next->next = NULL; 1134 next->wr_id = user_wr->wr_id; 1135 next->num_sge = user_wr->num_sge; 1136 next->opcode = user_wr->opcode; 1137 next->send_flags = user_wr->send_flags; 1138 next->imm_data = user_wr->imm_data; 1139 1140 if (qp->qp_type == IB_QPT_UD) { 1141 next->wr.ud.ah = idr_find(&ib_uverbs_ah_idr, 1142 user_wr->wr.ud.ah); 1143 if (!next->wr.ud.ah) { 1144 ret = -EINVAL; 1145 goto out; 1146 } 1147 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; 1148 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 1149 } else { 1150 switch (next->opcode) { 1151 case IB_WR_RDMA_WRITE: 1152 case IB_WR_RDMA_WRITE_WITH_IMM: 1153 case IB_WR_RDMA_READ: 1154 next->wr.rdma.remote_addr = 1155 user_wr->wr.rdma.remote_addr; 1156 next->wr.rdma.rkey = 1157 user_wr->wr.rdma.rkey; 1158 break; 1159 case IB_WR_ATOMIC_CMP_AND_SWP: 1160 case IB_WR_ATOMIC_FETCH_AND_ADD: 1161 next->wr.atomic.remote_addr = 1162 user_wr->wr.atomic.remote_addr; 1163 next->wr.atomic.compare_add = 1164 user_wr->wr.atomic.compare_add; 1165 next->wr.atomic.swap = user_wr->wr.atomic.swap; 1166 next->wr.atomic.rkey = user_wr->wr.atomic.rkey; 1167 break; 1168 default: 1169 break; 1170 } 1171 } 1172 1173 if (next->num_sge) { 1174 next->sg_list = (void *) next + 1175 ALIGN(sizeof *next, sizeof (struct ib_sge)); 1176 if (copy_from_user(next->sg_list, 1177 buf + sizeof cmd + 1178 cmd.wr_count * cmd.wqe_size + 1179 sg_ind * sizeof (struct ib_sge), 1180 next->num_sge * sizeof (struct ib_sge))) { 1181 ret = -EFAULT; 1182 goto out; 1183 } 1184 sg_ind += next->num_sge; 1185 } else 1186 next->sg_list = NULL; 1187 } 1188 1189 resp.bad_wr = 0; 1190 ret = qp->device->post_send(qp, wr, &bad_wr); 1191 if (ret) 1192 for (next = wr; next; next = next->next) { 1193 ++resp.bad_wr; 1194 if (next == bad_wr) 1195 break; 1196 } 1197 1198 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1199 &resp, sizeof resp)) 1200 ret = -EFAULT; 1201 1202 out: 1203 up(&ib_uverbs_idr_mutex); 1204 1205 while (wr) { 1206 next = wr->next; 1207 kfree(wr); 1208 wr = next; 1209 } 1210 1211 kfree(user_wr); 1212 1213 return ret ? ret : in_len; 1214 } 1215 1216 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 1217 int in_len, 1218 u32 wr_count, 1219 u32 sge_count, 1220 u32 wqe_size) 1221 { 1222 struct ib_uverbs_recv_wr *user_wr; 1223 struct ib_recv_wr *wr = NULL, *last, *next; 1224 int sg_ind; 1225 int i; 1226 int ret; 1227 1228 if (in_len < wqe_size * wr_count + 1229 sge_count * sizeof (struct ib_uverbs_sge)) 1230 return ERR_PTR(-EINVAL); 1231 1232 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 1233 return ERR_PTR(-EINVAL); 1234 1235 user_wr = kmalloc(wqe_size, GFP_KERNEL); 1236 if (!user_wr) 1237 return ERR_PTR(-ENOMEM); 1238 1239 sg_ind = 0; 1240 last = NULL; 1241 for (i = 0; i < wr_count; ++i) { 1242 if (copy_from_user(user_wr, buf + i * wqe_size, 1243 wqe_size)) { 1244 ret = -EFAULT; 1245 goto err; 1246 } 1247 1248 if (user_wr->num_sge + sg_ind > sge_count) { 1249 ret = -EINVAL; 1250 goto err; 1251 } 1252 1253 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 1254 user_wr->num_sge * sizeof (struct ib_sge), 1255 GFP_KERNEL); 1256 if (!next) { 1257 ret = -ENOMEM; 1258 goto err; 1259 } 1260 1261 if (!last) 1262 wr = next; 1263 else 1264 last->next = next; 1265 last = next; 1266 1267 next->next = NULL; 1268 next->wr_id = user_wr->wr_id; 1269 next->num_sge = user_wr->num_sge; 1270 1271 if (next->num_sge) { 1272 next->sg_list = (void *) next + 1273 ALIGN(sizeof *next, sizeof (struct ib_sge)); 1274 if (copy_from_user(next->sg_list, 1275 buf + wr_count * wqe_size + 1276 sg_ind * sizeof (struct ib_sge), 1277 next->num_sge * sizeof (struct ib_sge))) { 1278 ret = -EFAULT; 1279 goto err; 1280 } 1281 sg_ind += next->num_sge; 1282 } else 1283 next->sg_list = NULL; 1284 } 1285 1286 kfree(user_wr); 1287 return wr; 1288 1289 err: 1290 kfree(user_wr); 1291 1292 while (wr) { 1293 next = wr->next; 1294 kfree(wr); 1295 wr = next; 1296 } 1297 1298 return ERR_PTR(ret); 1299 } 1300 1301 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 1302 const char __user *buf, int in_len, 1303 int out_len) 1304 { 1305 struct ib_uverbs_post_recv cmd; 1306 struct ib_uverbs_post_recv_resp resp; 1307 struct ib_recv_wr *wr, *next, *bad_wr; 1308 struct ib_qp *qp; 1309 ssize_t ret = -EINVAL; 1310 1311 if (copy_from_user(&cmd, buf, sizeof cmd)) 1312 return -EFAULT; 1313 1314 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 1315 in_len - sizeof cmd, cmd.wr_count, 1316 cmd.sge_count, cmd.wqe_size); 1317 if (IS_ERR(wr)) 1318 return PTR_ERR(wr); 1319 1320 down(&ib_uverbs_idr_mutex); 1321 1322 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1323 if (!qp || qp->uobject->context != file->ucontext) 1324 goto out; 1325 1326 resp.bad_wr = 0; 1327 ret = qp->device->post_recv(qp, wr, &bad_wr); 1328 if (ret) 1329 for (next = wr; next; next = next->next) { 1330 ++resp.bad_wr; 1331 if (next == bad_wr) 1332 break; 1333 } 1334 1335 1336 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1337 &resp, sizeof resp)) 1338 ret = -EFAULT; 1339 1340 out: 1341 up(&ib_uverbs_idr_mutex); 1342 1343 while (wr) { 1344 next = wr->next; 1345 kfree(wr); 1346 wr = next; 1347 } 1348 1349 return ret ? ret : in_len; 1350 } 1351 1352 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 1353 const char __user *buf, int in_len, 1354 int out_len) 1355 { 1356 struct ib_uverbs_post_srq_recv cmd; 1357 struct ib_uverbs_post_srq_recv_resp resp; 1358 struct ib_recv_wr *wr, *next, *bad_wr; 1359 struct ib_srq *srq; 1360 ssize_t ret = -EINVAL; 1361 1362 if (copy_from_user(&cmd, buf, sizeof cmd)) 1363 return -EFAULT; 1364 1365 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 1366 in_len - sizeof cmd, cmd.wr_count, 1367 cmd.sge_count, cmd.wqe_size); 1368 if (IS_ERR(wr)) 1369 return PTR_ERR(wr); 1370 1371 down(&ib_uverbs_idr_mutex); 1372 1373 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1374 if (!srq || srq->uobject->context != file->ucontext) 1375 goto out; 1376 1377 resp.bad_wr = 0; 1378 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 1379 if (ret) 1380 for (next = wr; next; next = next->next) { 1381 ++resp.bad_wr; 1382 if (next == bad_wr) 1383 break; 1384 } 1385 1386 1387 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1388 &resp, sizeof resp)) 1389 ret = -EFAULT; 1390 1391 out: 1392 up(&ib_uverbs_idr_mutex); 1393 1394 while (wr) { 1395 next = wr->next; 1396 kfree(wr); 1397 wr = next; 1398 } 1399 1400 return ret ? ret : in_len; 1401 } 1402 1403 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 1404 const char __user *buf, int in_len, 1405 int out_len) 1406 { 1407 struct ib_uverbs_create_ah cmd; 1408 struct ib_uverbs_create_ah_resp resp; 1409 struct ib_uobject *uobj; 1410 struct ib_pd *pd; 1411 struct ib_ah *ah; 1412 struct ib_ah_attr attr; 1413 int ret; 1414 1415 if (out_len < sizeof resp) 1416 return -ENOSPC; 1417 1418 if (copy_from_user(&cmd, buf, sizeof cmd)) 1419 return -EFAULT; 1420 1421 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 1422 if (!uobj) 1423 return -ENOMEM; 1424 1425 down(&ib_uverbs_idr_mutex); 1426 1427 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 1428 if (!pd || pd->uobject->context != file->ucontext) { 1429 ret = -EINVAL; 1430 goto err_up; 1431 } 1432 1433 uobj->user_handle = cmd.user_handle; 1434 uobj->context = file->ucontext; 1435 1436 attr.dlid = cmd.attr.dlid; 1437 attr.sl = cmd.attr.sl; 1438 attr.src_path_bits = cmd.attr.src_path_bits; 1439 attr.static_rate = cmd.attr.static_rate; 1440 attr.port_num = cmd.attr.port_num; 1441 attr.grh.flow_label = cmd.attr.grh.flow_label; 1442 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 1443 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 1444 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 1445 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 1446 1447 ah = ib_create_ah(pd, &attr); 1448 if (IS_ERR(ah)) { 1449 ret = PTR_ERR(ah); 1450 goto err_up; 1451 } 1452 1453 ah->uobject = uobj; 1454 1455 retry: 1456 if (!idr_pre_get(&ib_uverbs_ah_idr, GFP_KERNEL)) { 1457 ret = -ENOMEM; 1458 goto err_destroy; 1459 } 1460 1461 ret = idr_get_new(&ib_uverbs_ah_idr, ah, &uobj->id); 1462 1463 if (ret == -EAGAIN) 1464 goto retry; 1465 if (ret) 1466 goto err_destroy; 1467 1468 resp.ah_handle = uobj->id; 1469 1470 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1471 &resp, sizeof resp)) { 1472 ret = -EFAULT; 1473 goto err_idr; 1474 } 1475 1476 down(&file->mutex); 1477 list_add_tail(&uobj->list, &file->ucontext->ah_list); 1478 up(&file->mutex); 1479 1480 up(&ib_uverbs_idr_mutex); 1481 1482 return in_len; 1483 1484 err_idr: 1485 idr_remove(&ib_uverbs_ah_idr, uobj->id); 1486 1487 err_destroy: 1488 ib_destroy_ah(ah); 1489 1490 err_up: 1491 up(&ib_uverbs_idr_mutex); 1492 1493 kfree(uobj); 1494 return ret; 1495 } 1496 1497 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 1498 const char __user *buf, int in_len, int out_len) 1499 { 1500 struct ib_uverbs_destroy_ah cmd; 1501 struct ib_ah *ah; 1502 struct ib_uobject *uobj; 1503 int ret = -EINVAL; 1504 1505 if (copy_from_user(&cmd, buf, sizeof cmd)) 1506 return -EFAULT; 1507 1508 down(&ib_uverbs_idr_mutex); 1509 1510 ah = idr_find(&ib_uverbs_ah_idr, cmd.ah_handle); 1511 if (!ah || ah->uobject->context != file->ucontext) 1512 goto out; 1513 1514 uobj = ah->uobject; 1515 1516 ret = ib_destroy_ah(ah); 1517 if (ret) 1518 goto out; 1519 1520 idr_remove(&ib_uverbs_ah_idr, cmd.ah_handle); 1521 1522 down(&file->mutex); 1523 list_del(&uobj->list); 1524 up(&file->mutex); 1525 1526 kfree(uobj); 1527 1528 out: 1529 up(&ib_uverbs_idr_mutex); 1530 1531 return ret ? ret : in_len; 1532 } 1533 1534 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 1535 const char __user *buf, int in_len, 1536 int out_len) 1537 { 1538 struct ib_uverbs_attach_mcast cmd; 1539 struct ib_qp *qp; 1540 int ret = -EINVAL; 1541 1542 if (copy_from_user(&cmd, buf, sizeof cmd)) 1543 return -EFAULT; 1544 1545 down(&ib_uverbs_idr_mutex); 1546 1547 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1548 if (qp && qp->uobject->context == file->ucontext) 1549 ret = ib_attach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1550 1551 up(&ib_uverbs_idr_mutex); 1552 1553 return ret ? ret : in_len; 1554 } 1555 1556 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 1557 const char __user *buf, int in_len, 1558 int out_len) 1559 { 1560 struct ib_uverbs_detach_mcast cmd; 1561 struct ib_qp *qp; 1562 int ret = -EINVAL; 1563 1564 if (copy_from_user(&cmd, buf, sizeof cmd)) 1565 return -EFAULT; 1566 1567 down(&ib_uverbs_idr_mutex); 1568 1569 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1570 if (qp && qp->uobject->context == file->ucontext) 1571 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1572 1573 up(&ib_uverbs_idr_mutex); 1574 1575 return ret ? ret : in_len; 1576 } 1577 1578 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 1579 const char __user *buf, int in_len, 1580 int out_len) 1581 { 1582 struct ib_uverbs_create_srq cmd; 1583 struct ib_uverbs_create_srq_resp resp; 1584 struct ib_udata udata; 1585 struct ib_uevent_object *uobj; 1586 struct ib_pd *pd; 1587 struct ib_srq *srq; 1588 struct ib_srq_init_attr attr; 1589 int ret; 1590 1591 if (out_len < sizeof resp) 1592 return -ENOSPC; 1593 1594 if (copy_from_user(&cmd, buf, sizeof cmd)) 1595 return -EFAULT; 1596 1597 INIT_UDATA(&udata, buf + sizeof cmd, 1598 (unsigned long) cmd.response + sizeof resp, 1599 in_len - sizeof cmd, out_len - sizeof resp); 1600 1601 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 1602 if (!uobj) 1603 return -ENOMEM; 1604 1605 down(&ib_uverbs_idr_mutex); 1606 1607 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 1608 1609 if (!pd || pd->uobject->context != file->ucontext) { 1610 ret = -EINVAL; 1611 goto err_up; 1612 } 1613 1614 attr.event_handler = ib_uverbs_srq_event_handler; 1615 attr.srq_context = file; 1616 attr.attr.max_wr = cmd.max_wr; 1617 attr.attr.max_sge = cmd.max_sge; 1618 attr.attr.srq_limit = cmd.srq_limit; 1619 1620 uobj->uobject.user_handle = cmd.user_handle; 1621 uobj->uobject.context = file->ucontext; 1622 uobj->events_reported = 0; 1623 INIT_LIST_HEAD(&uobj->event_list); 1624 1625 srq = pd->device->create_srq(pd, &attr, &udata); 1626 if (IS_ERR(srq)) { 1627 ret = PTR_ERR(srq); 1628 goto err_up; 1629 } 1630 1631 srq->device = pd->device; 1632 srq->pd = pd; 1633 srq->uobject = &uobj->uobject; 1634 srq->event_handler = attr.event_handler; 1635 srq->srq_context = attr.srq_context; 1636 atomic_inc(&pd->usecnt); 1637 atomic_set(&srq->usecnt, 0); 1638 1639 memset(&resp, 0, sizeof resp); 1640 1641 retry: 1642 if (!idr_pre_get(&ib_uverbs_srq_idr, GFP_KERNEL)) { 1643 ret = -ENOMEM; 1644 goto err_destroy; 1645 } 1646 1647 ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->uobject.id); 1648 1649 if (ret == -EAGAIN) 1650 goto retry; 1651 if (ret) 1652 goto err_destroy; 1653 1654 resp.srq_handle = uobj->uobject.id; 1655 1656 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1657 &resp, sizeof resp)) { 1658 ret = -EFAULT; 1659 goto err_idr; 1660 } 1661 1662 down(&file->mutex); 1663 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list); 1664 up(&file->mutex); 1665 1666 up(&ib_uverbs_idr_mutex); 1667 1668 return in_len; 1669 1670 err_idr: 1671 idr_remove(&ib_uverbs_srq_idr, uobj->uobject.id); 1672 1673 err_destroy: 1674 ib_destroy_srq(srq); 1675 1676 err_up: 1677 up(&ib_uverbs_idr_mutex); 1678 1679 kfree(uobj); 1680 return ret; 1681 } 1682 1683 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 1684 const char __user *buf, int in_len, 1685 int out_len) 1686 { 1687 struct ib_uverbs_modify_srq cmd; 1688 struct ib_srq *srq; 1689 struct ib_srq_attr attr; 1690 int ret; 1691 1692 if (copy_from_user(&cmd, buf, sizeof cmd)) 1693 return -EFAULT; 1694 1695 down(&ib_uverbs_idr_mutex); 1696 1697 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1698 if (!srq || srq->uobject->context != file->ucontext) { 1699 ret = -EINVAL; 1700 goto out; 1701 } 1702 1703 attr.max_wr = cmd.max_wr; 1704 attr.max_sge = cmd.max_sge; 1705 attr.srq_limit = cmd.srq_limit; 1706 1707 ret = ib_modify_srq(srq, &attr, cmd.attr_mask); 1708 1709 out: 1710 up(&ib_uverbs_idr_mutex); 1711 1712 return ret ? ret : in_len; 1713 } 1714 1715 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 1716 const char __user *buf, int in_len, 1717 int out_len) 1718 { 1719 struct ib_uverbs_destroy_srq cmd; 1720 struct ib_uverbs_destroy_srq_resp resp; 1721 struct ib_srq *srq; 1722 struct ib_uevent_object *uobj; 1723 int ret = -EINVAL; 1724 1725 if (copy_from_user(&cmd, buf, sizeof cmd)) 1726 return -EFAULT; 1727 1728 down(&ib_uverbs_idr_mutex); 1729 1730 memset(&resp, 0, sizeof resp); 1731 1732 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1733 if (!srq || srq->uobject->context != file->ucontext) 1734 goto out; 1735 1736 uobj = container_of(srq->uobject, struct ib_uevent_object, uobject); 1737 1738 ret = ib_destroy_srq(srq); 1739 if (ret) 1740 goto out; 1741 1742 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); 1743 1744 down(&file->mutex); 1745 list_del(&uobj->uobject.list); 1746 up(&file->mutex); 1747 1748 ib_uverbs_release_uevent(file, uobj); 1749 1750 resp.events_reported = uobj->events_reported; 1751 1752 kfree(uobj); 1753 1754 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1755 &resp, sizeof resp)) 1756 ret = -EFAULT; 1757 1758 out: 1759 up(&ib_uverbs_idr_mutex); 1760 1761 return ret ? ret : in_len; 1762 } 1763