1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 #include <linux/sched.h> 40 41 #include <asm/uaccess.h> 42 43 #include "uverbs.h" 44 #include "core_priv.h" 45 46 struct uverbs_lock_class { 47 struct lock_class_key key; 48 char name[16]; 49 }; 50 51 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; 52 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; 53 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" }; 54 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; 55 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; 56 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 57 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 58 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 59 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; 60 61 /* 62 * The ib_uobject locking scheme is as follows: 63 * 64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 65 * needs to be held during all idr operations. When an object is 66 * looked up, a reference must be taken on the object's kref before 67 * dropping this lock. 68 * 69 * - Each object also has an rwsem. This rwsem must be held for 70 * reading while an operation that uses the object is performed. 71 * For example, while registering an MR, the associated PD's 72 * uobject.mutex must be held for reading. The rwsem must be held 73 * for writing while initializing or destroying an object. 74 * 75 * - In addition, each object has a "live" flag. If this flag is not 76 * set, then lookups of the object will fail even if it is found in 77 * the idr. This handles a reader that blocks and does not acquire 78 * the rwsem until after the object is destroyed. The destroy 79 * operation will set the live flag to 0 and then drop the rwsem; 80 * this will allow the reader to acquire the rwsem, see that the 81 * live flag is 0, and then drop the rwsem and its reference to 82 * object. The underlying storage will not be freed until the last 83 * reference to the object is dropped. 84 */ 85 86 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 87 struct ib_ucontext *context, struct uverbs_lock_class *c) 88 { 89 uobj->user_handle = user_handle; 90 uobj->context = context; 91 kref_init(&uobj->ref); 92 init_rwsem(&uobj->mutex); 93 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name); 94 uobj->live = 0; 95 } 96 97 static void release_uobj(struct kref *kref) 98 { 99 kfree(container_of(kref, struct ib_uobject, ref)); 100 } 101 102 static void put_uobj(struct ib_uobject *uobj) 103 { 104 kref_put(&uobj->ref, release_uobj); 105 } 106 107 static void put_uobj_read(struct ib_uobject *uobj) 108 { 109 up_read(&uobj->mutex); 110 put_uobj(uobj); 111 } 112 113 static void put_uobj_write(struct ib_uobject *uobj) 114 { 115 up_write(&uobj->mutex); 116 put_uobj(uobj); 117 } 118 119 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 120 { 121 int ret; 122 123 idr_preload(GFP_KERNEL); 124 spin_lock(&ib_uverbs_idr_lock); 125 126 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT); 127 if (ret >= 0) 128 uobj->id = ret; 129 130 spin_unlock(&ib_uverbs_idr_lock); 131 idr_preload_end(); 132 133 return ret < 0 ? ret : 0; 134 } 135 136 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 137 { 138 spin_lock(&ib_uverbs_idr_lock); 139 idr_remove(idr, uobj->id); 140 spin_unlock(&ib_uverbs_idr_lock); 141 } 142 143 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 144 struct ib_ucontext *context) 145 { 146 struct ib_uobject *uobj; 147 148 spin_lock(&ib_uverbs_idr_lock); 149 uobj = idr_find(idr, id); 150 if (uobj) { 151 if (uobj->context == context) 152 kref_get(&uobj->ref); 153 else 154 uobj = NULL; 155 } 156 spin_unlock(&ib_uverbs_idr_lock); 157 158 return uobj; 159 } 160 161 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 162 struct ib_ucontext *context, int nested) 163 { 164 struct ib_uobject *uobj; 165 166 uobj = __idr_get_uobj(idr, id, context); 167 if (!uobj) 168 return NULL; 169 170 if (nested) 171 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 172 else 173 down_read(&uobj->mutex); 174 if (!uobj->live) { 175 put_uobj_read(uobj); 176 return NULL; 177 } 178 179 return uobj; 180 } 181 182 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 183 struct ib_ucontext *context) 184 { 185 struct ib_uobject *uobj; 186 187 uobj = __idr_get_uobj(idr, id, context); 188 if (!uobj) 189 return NULL; 190 191 down_write(&uobj->mutex); 192 if (!uobj->live) { 193 put_uobj_write(uobj); 194 return NULL; 195 } 196 197 return uobj; 198 } 199 200 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 201 int nested) 202 { 203 struct ib_uobject *uobj; 204 205 uobj = idr_read_uobj(idr, id, context, nested); 206 return uobj ? uobj->object : NULL; 207 } 208 209 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 210 { 211 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 212 } 213 214 static void put_pd_read(struct ib_pd *pd) 215 { 216 put_uobj_read(pd->uobject); 217 } 218 219 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 220 { 221 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 222 } 223 224 static void put_cq_read(struct ib_cq *cq) 225 { 226 put_uobj_read(cq->uobject); 227 } 228 229 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 230 { 231 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 232 } 233 234 static void put_ah_read(struct ib_ah *ah) 235 { 236 put_uobj_read(ah->uobject); 237 } 238 239 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 240 { 241 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 242 } 243 244 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) 245 { 246 struct ib_uobject *uobj; 247 248 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context); 249 return uobj ? uobj->object : NULL; 250 } 251 252 static void put_qp_read(struct ib_qp *qp) 253 { 254 put_uobj_read(qp->uobject); 255 } 256 257 static void put_qp_write(struct ib_qp *qp) 258 { 259 put_uobj_write(qp->uobject); 260 } 261 262 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 263 { 264 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 265 } 266 267 static void put_srq_read(struct ib_srq *srq) 268 { 269 put_uobj_read(srq->uobject); 270 } 271 272 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, 273 struct ib_uobject **uobj) 274 { 275 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); 276 return *uobj ? (*uobj)->object : NULL; 277 } 278 279 static void put_xrcd_read(struct ib_uobject *uobj) 280 { 281 put_uobj_read(uobj); 282 } 283 284 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 285 struct ib_device *ib_dev, 286 const char __user *buf, 287 int in_len, int out_len) 288 { 289 struct ib_uverbs_get_context cmd; 290 struct ib_uverbs_get_context_resp resp; 291 struct ib_udata udata; 292 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 293 struct ib_device_attr dev_attr; 294 #endif 295 struct ib_ucontext *ucontext; 296 struct file *filp; 297 int ret; 298 299 if (out_len < sizeof resp) 300 return -ENOSPC; 301 302 if (copy_from_user(&cmd, buf, sizeof cmd)) 303 return -EFAULT; 304 305 mutex_lock(&file->mutex); 306 307 if (file->ucontext) { 308 ret = -EINVAL; 309 goto err; 310 } 311 312 INIT_UDATA(&udata, buf + sizeof cmd, 313 (unsigned long) cmd.response + sizeof resp, 314 in_len - sizeof cmd, out_len - sizeof resp); 315 316 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); 317 if (IS_ERR(ucontext)) { 318 ret = PTR_ERR(ucontext); 319 goto err; 320 } 321 322 ucontext->device = ib_dev; 323 INIT_LIST_HEAD(&ucontext->pd_list); 324 INIT_LIST_HEAD(&ucontext->mr_list); 325 INIT_LIST_HEAD(&ucontext->mw_list); 326 INIT_LIST_HEAD(&ucontext->cq_list); 327 INIT_LIST_HEAD(&ucontext->qp_list); 328 INIT_LIST_HEAD(&ucontext->srq_list); 329 INIT_LIST_HEAD(&ucontext->ah_list); 330 INIT_LIST_HEAD(&ucontext->xrcd_list); 331 INIT_LIST_HEAD(&ucontext->rule_list); 332 rcu_read_lock(); 333 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 334 rcu_read_unlock(); 335 ucontext->closing = 0; 336 337 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 338 ucontext->umem_tree = RB_ROOT; 339 init_rwsem(&ucontext->umem_rwsem); 340 ucontext->odp_mrs_count = 0; 341 INIT_LIST_HEAD(&ucontext->no_private_counters); 342 343 ret = ib_query_device(ib_dev, &dev_attr); 344 if (ret) 345 goto err_free; 346 if (!(dev_attr.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 347 ucontext->invalidate_range = NULL; 348 349 #endif 350 351 resp.num_comp_vectors = file->device->num_comp_vectors; 352 353 ret = get_unused_fd_flags(O_CLOEXEC); 354 if (ret < 0) 355 goto err_free; 356 resp.async_fd = ret; 357 358 filp = ib_uverbs_alloc_event_file(file, ib_dev, 1); 359 if (IS_ERR(filp)) { 360 ret = PTR_ERR(filp); 361 goto err_fd; 362 } 363 364 if (copy_to_user((void __user *) (unsigned long) cmd.response, 365 &resp, sizeof resp)) { 366 ret = -EFAULT; 367 goto err_file; 368 } 369 370 file->ucontext = ucontext; 371 372 fd_install(resp.async_fd, filp); 373 374 mutex_unlock(&file->mutex); 375 376 return in_len; 377 378 err_file: 379 ib_uverbs_free_async_event_file(file); 380 fput(filp); 381 382 err_fd: 383 put_unused_fd(resp.async_fd); 384 385 err_free: 386 put_pid(ucontext->tgid); 387 ib_dev->dealloc_ucontext(ucontext); 388 389 err: 390 mutex_unlock(&file->mutex); 391 return ret; 392 } 393 394 static void copy_query_dev_fields(struct ib_uverbs_file *file, 395 struct ib_device *ib_dev, 396 struct ib_uverbs_query_device_resp *resp, 397 struct ib_device_attr *attr) 398 { 399 resp->fw_ver = attr->fw_ver; 400 resp->node_guid = ib_dev->node_guid; 401 resp->sys_image_guid = attr->sys_image_guid; 402 resp->max_mr_size = attr->max_mr_size; 403 resp->page_size_cap = attr->page_size_cap; 404 resp->vendor_id = attr->vendor_id; 405 resp->vendor_part_id = attr->vendor_part_id; 406 resp->hw_ver = attr->hw_ver; 407 resp->max_qp = attr->max_qp; 408 resp->max_qp_wr = attr->max_qp_wr; 409 resp->device_cap_flags = attr->device_cap_flags; 410 resp->max_sge = attr->max_sge; 411 resp->max_sge_rd = attr->max_sge_rd; 412 resp->max_cq = attr->max_cq; 413 resp->max_cqe = attr->max_cqe; 414 resp->max_mr = attr->max_mr; 415 resp->max_pd = attr->max_pd; 416 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 417 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 418 resp->max_res_rd_atom = attr->max_res_rd_atom; 419 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 420 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 421 resp->atomic_cap = attr->atomic_cap; 422 resp->max_ee = attr->max_ee; 423 resp->max_rdd = attr->max_rdd; 424 resp->max_mw = attr->max_mw; 425 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 426 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 427 resp->max_mcast_grp = attr->max_mcast_grp; 428 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 429 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 430 resp->max_ah = attr->max_ah; 431 resp->max_fmr = attr->max_fmr; 432 resp->max_map_per_fmr = attr->max_map_per_fmr; 433 resp->max_srq = attr->max_srq; 434 resp->max_srq_wr = attr->max_srq_wr; 435 resp->max_srq_sge = attr->max_srq_sge; 436 resp->max_pkeys = attr->max_pkeys; 437 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 438 resp->phys_port_cnt = ib_dev->phys_port_cnt; 439 } 440 441 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 442 struct ib_device *ib_dev, 443 const char __user *buf, 444 int in_len, int out_len) 445 { 446 struct ib_uverbs_query_device cmd; 447 struct ib_uverbs_query_device_resp resp; 448 struct ib_device_attr attr; 449 int ret; 450 451 if (out_len < sizeof resp) 452 return -ENOSPC; 453 454 if (copy_from_user(&cmd, buf, sizeof cmd)) 455 return -EFAULT; 456 457 ret = ib_query_device(ib_dev, &attr); 458 if (ret) 459 return ret; 460 461 memset(&resp, 0, sizeof resp); 462 copy_query_dev_fields(file, ib_dev, &resp, &attr); 463 464 if (copy_to_user((void __user *) (unsigned long) cmd.response, 465 &resp, sizeof resp)) 466 return -EFAULT; 467 468 return in_len; 469 } 470 471 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 472 struct ib_device *ib_dev, 473 const char __user *buf, 474 int in_len, int out_len) 475 { 476 struct ib_uverbs_query_port cmd; 477 struct ib_uverbs_query_port_resp resp; 478 struct ib_port_attr attr; 479 int ret; 480 481 if (out_len < sizeof resp) 482 return -ENOSPC; 483 484 if (copy_from_user(&cmd, buf, sizeof cmd)) 485 return -EFAULT; 486 487 ret = ib_query_port(ib_dev, cmd.port_num, &attr); 488 if (ret) 489 return ret; 490 491 memset(&resp, 0, sizeof resp); 492 493 resp.state = attr.state; 494 resp.max_mtu = attr.max_mtu; 495 resp.active_mtu = attr.active_mtu; 496 resp.gid_tbl_len = attr.gid_tbl_len; 497 resp.port_cap_flags = attr.port_cap_flags; 498 resp.max_msg_sz = attr.max_msg_sz; 499 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 500 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 501 resp.pkey_tbl_len = attr.pkey_tbl_len; 502 resp.lid = attr.lid; 503 resp.sm_lid = attr.sm_lid; 504 resp.lmc = attr.lmc; 505 resp.max_vl_num = attr.max_vl_num; 506 resp.sm_sl = attr.sm_sl; 507 resp.subnet_timeout = attr.subnet_timeout; 508 resp.init_type_reply = attr.init_type_reply; 509 resp.active_width = attr.active_width; 510 resp.active_speed = attr.active_speed; 511 resp.phys_state = attr.phys_state; 512 resp.link_layer = rdma_port_get_link_layer(ib_dev, 513 cmd.port_num); 514 515 if (copy_to_user((void __user *) (unsigned long) cmd.response, 516 &resp, sizeof resp)) 517 return -EFAULT; 518 519 return in_len; 520 } 521 522 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 523 struct ib_device *ib_dev, 524 const char __user *buf, 525 int in_len, int out_len) 526 { 527 struct ib_uverbs_alloc_pd cmd; 528 struct ib_uverbs_alloc_pd_resp resp; 529 struct ib_udata udata; 530 struct ib_uobject *uobj; 531 struct ib_pd *pd; 532 int ret; 533 534 if (out_len < sizeof resp) 535 return -ENOSPC; 536 537 if (copy_from_user(&cmd, buf, sizeof cmd)) 538 return -EFAULT; 539 540 INIT_UDATA(&udata, buf + sizeof cmd, 541 (unsigned long) cmd.response + sizeof resp, 542 in_len - sizeof cmd, out_len - sizeof resp); 543 544 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 545 if (!uobj) 546 return -ENOMEM; 547 548 init_uobj(uobj, 0, file->ucontext, &pd_lock_class); 549 down_write(&uobj->mutex); 550 551 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); 552 if (IS_ERR(pd)) { 553 ret = PTR_ERR(pd); 554 goto err; 555 } 556 557 pd->device = ib_dev; 558 pd->uobject = uobj; 559 pd->local_mr = NULL; 560 atomic_set(&pd->usecnt, 0); 561 562 uobj->object = pd; 563 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 564 if (ret) 565 goto err_idr; 566 567 memset(&resp, 0, sizeof resp); 568 resp.pd_handle = uobj->id; 569 570 if (copy_to_user((void __user *) (unsigned long) cmd.response, 571 &resp, sizeof resp)) { 572 ret = -EFAULT; 573 goto err_copy; 574 } 575 576 mutex_lock(&file->mutex); 577 list_add_tail(&uobj->list, &file->ucontext->pd_list); 578 mutex_unlock(&file->mutex); 579 580 uobj->live = 1; 581 582 up_write(&uobj->mutex); 583 584 return in_len; 585 586 err_copy: 587 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 588 589 err_idr: 590 ib_dealloc_pd(pd); 591 592 err: 593 put_uobj_write(uobj); 594 return ret; 595 } 596 597 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 598 struct ib_device *ib_dev, 599 const char __user *buf, 600 int in_len, int out_len) 601 { 602 struct ib_uverbs_dealloc_pd cmd; 603 struct ib_uobject *uobj; 604 struct ib_pd *pd; 605 int ret; 606 607 if (copy_from_user(&cmd, buf, sizeof cmd)) 608 return -EFAULT; 609 610 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 611 if (!uobj) 612 return -EINVAL; 613 pd = uobj->object; 614 615 if (atomic_read(&pd->usecnt)) { 616 ret = -EBUSY; 617 goto err_put; 618 } 619 620 ret = pd->device->dealloc_pd(uobj->object); 621 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); 622 if (ret) 623 goto err_put; 624 625 uobj->live = 0; 626 put_uobj_write(uobj); 627 628 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 629 630 mutex_lock(&file->mutex); 631 list_del(&uobj->list); 632 mutex_unlock(&file->mutex); 633 634 put_uobj(uobj); 635 636 return in_len; 637 638 err_put: 639 put_uobj_write(uobj); 640 return ret; 641 } 642 643 struct xrcd_table_entry { 644 struct rb_node node; 645 struct ib_xrcd *xrcd; 646 struct inode *inode; 647 }; 648 649 static int xrcd_table_insert(struct ib_uverbs_device *dev, 650 struct inode *inode, 651 struct ib_xrcd *xrcd) 652 { 653 struct xrcd_table_entry *entry, *scan; 654 struct rb_node **p = &dev->xrcd_tree.rb_node; 655 struct rb_node *parent = NULL; 656 657 entry = kmalloc(sizeof *entry, GFP_KERNEL); 658 if (!entry) 659 return -ENOMEM; 660 661 entry->xrcd = xrcd; 662 entry->inode = inode; 663 664 while (*p) { 665 parent = *p; 666 scan = rb_entry(parent, struct xrcd_table_entry, node); 667 668 if (inode < scan->inode) { 669 p = &(*p)->rb_left; 670 } else if (inode > scan->inode) { 671 p = &(*p)->rb_right; 672 } else { 673 kfree(entry); 674 return -EEXIST; 675 } 676 } 677 678 rb_link_node(&entry->node, parent, p); 679 rb_insert_color(&entry->node, &dev->xrcd_tree); 680 igrab(inode); 681 return 0; 682 } 683 684 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 685 struct inode *inode) 686 { 687 struct xrcd_table_entry *entry; 688 struct rb_node *p = dev->xrcd_tree.rb_node; 689 690 while (p) { 691 entry = rb_entry(p, struct xrcd_table_entry, node); 692 693 if (inode < entry->inode) 694 p = p->rb_left; 695 else if (inode > entry->inode) 696 p = p->rb_right; 697 else 698 return entry; 699 } 700 701 return NULL; 702 } 703 704 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 705 { 706 struct xrcd_table_entry *entry; 707 708 entry = xrcd_table_search(dev, inode); 709 if (!entry) 710 return NULL; 711 712 return entry->xrcd; 713 } 714 715 static void xrcd_table_delete(struct ib_uverbs_device *dev, 716 struct inode *inode) 717 { 718 struct xrcd_table_entry *entry; 719 720 entry = xrcd_table_search(dev, inode); 721 if (entry) { 722 iput(inode); 723 rb_erase(&entry->node, &dev->xrcd_tree); 724 kfree(entry); 725 } 726 } 727 728 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 729 struct ib_device *ib_dev, 730 const char __user *buf, int in_len, 731 int out_len) 732 { 733 struct ib_uverbs_open_xrcd cmd; 734 struct ib_uverbs_open_xrcd_resp resp; 735 struct ib_udata udata; 736 struct ib_uxrcd_object *obj; 737 struct ib_xrcd *xrcd = NULL; 738 struct fd f = {NULL, 0}; 739 struct inode *inode = NULL; 740 int ret = 0; 741 int new_xrcd = 0; 742 743 if (out_len < sizeof resp) 744 return -ENOSPC; 745 746 if (copy_from_user(&cmd, buf, sizeof cmd)) 747 return -EFAULT; 748 749 INIT_UDATA(&udata, buf + sizeof cmd, 750 (unsigned long) cmd.response + sizeof resp, 751 in_len - sizeof cmd, out_len - sizeof resp); 752 753 mutex_lock(&file->device->xrcd_tree_mutex); 754 755 if (cmd.fd != -1) { 756 /* search for file descriptor */ 757 f = fdget(cmd.fd); 758 if (!f.file) { 759 ret = -EBADF; 760 goto err_tree_mutex_unlock; 761 } 762 763 inode = file_inode(f.file); 764 xrcd = find_xrcd(file->device, inode); 765 if (!xrcd && !(cmd.oflags & O_CREAT)) { 766 /* no file descriptor. Need CREATE flag */ 767 ret = -EAGAIN; 768 goto err_tree_mutex_unlock; 769 } 770 771 if (xrcd && cmd.oflags & O_EXCL) { 772 ret = -EINVAL; 773 goto err_tree_mutex_unlock; 774 } 775 } 776 777 obj = kmalloc(sizeof *obj, GFP_KERNEL); 778 if (!obj) { 779 ret = -ENOMEM; 780 goto err_tree_mutex_unlock; 781 } 782 783 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class); 784 785 down_write(&obj->uobject.mutex); 786 787 if (!xrcd) { 788 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata); 789 if (IS_ERR(xrcd)) { 790 ret = PTR_ERR(xrcd); 791 goto err; 792 } 793 794 xrcd->inode = inode; 795 xrcd->device = ib_dev; 796 atomic_set(&xrcd->usecnt, 0); 797 mutex_init(&xrcd->tgt_qp_mutex); 798 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 799 new_xrcd = 1; 800 } 801 802 atomic_set(&obj->refcnt, 0); 803 obj->uobject.object = xrcd; 804 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 805 if (ret) 806 goto err_idr; 807 808 memset(&resp, 0, sizeof resp); 809 resp.xrcd_handle = obj->uobject.id; 810 811 if (inode) { 812 if (new_xrcd) { 813 /* create new inode/xrcd table entry */ 814 ret = xrcd_table_insert(file->device, inode, xrcd); 815 if (ret) 816 goto err_insert_xrcd; 817 } 818 atomic_inc(&xrcd->usecnt); 819 } 820 821 if (copy_to_user((void __user *) (unsigned long) cmd.response, 822 &resp, sizeof resp)) { 823 ret = -EFAULT; 824 goto err_copy; 825 } 826 827 if (f.file) 828 fdput(f); 829 830 mutex_lock(&file->mutex); 831 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); 832 mutex_unlock(&file->mutex); 833 834 obj->uobject.live = 1; 835 up_write(&obj->uobject.mutex); 836 837 mutex_unlock(&file->device->xrcd_tree_mutex); 838 return in_len; 839 840 err_copy: 841 if (inode) { 842 if (new_xrcd) 843 xrcd_table_delete(file->device, inode); 844 atomic_dec(&xrcd->usecnt); 845 } 846 847 err_insert_xrcd: 848 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 849 850 err_idr: 851 ib_dealloc_xrcd(xrcd); 852 853 err: 854 put_uobj_write(&obj->uobject); 855 856 err_tree_mutex_unlock: 857 if (f.file) 858 fdput(f); 859 860 mutex_unlock(&file->device->xrcd_tree_mutex); 861 862 return ret; 863 } 864 865 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 866 struct ib_device *ib_dev, 867 const char __user *buf, int in_len, 868 int out_len) 869 { 870 struct ib_uverbs_close_xrcd cmd; 871 struct ib_uobject *uobj; 872 struct ib_xrcd *xrcd = NULL; 873 struct inode *inode = NULL; 874 struct ib_uxrcd_object *obj; 875 int live; 876 int ret = 0; 877 878 if (copy_from_user(&cmd, buf, sizeof cmd)) 879 return -EFAULT; 880 881 mutex_lock(&file->device->xrcd_tree_mutex); 882 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); 883 if (!uobj) { 884 ret = -EINVAL; 885 goto out; 886 } 887 888 xrcd = uobj->object; 889 inode = xrcd->inode; 890 obj = container_of(uobj, struct ib_uxrcd_object, uobject); 891 if (atomic_read(&obj->refcnt)) { 892 put_uobj_write(uobj); 893 ret = -EBUSY; 894 goto out; 895 } 896 897 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { 898 ret = ib_dealloc_xrcd(uobj->object); 899 if (!ret) 900 uobj->live = 0; 901 } 902 903 live = uobj->live; 904 if (inode && ret) 905 atomic_inc(&xrcd->usecnt); 906 907 put_uobj_write(uobj); 908 909 if (ret) 910 goto out; 911 912 if (inode && !live) 913 xrcd_table_delete(file->device, inode); 914 915 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); 916 mutex_lock(&file->mutex); 917 list_del(&uobj->list); 918 mutex_unlock(&file->mutex); 919 920 put_uobj(uobj); 921 ret = in_len; 922 923 out: 924 mutex_unlock(&file->device->xrcd_tree_mutex); 925 return ret; 926 } 927 928 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 929 struct ib_xrcd *xrcd) 930 { 931 struct inode *inode; 932 933 inode = xrcd->inode; 934 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 935 return; 936 937 ib_dealloc_xrcd(xrcd); 938 939 if (inode) 940 xrcd_table_delete(dev, inode); 941 } 942 943 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 944 struct ib_device *ib_dev, 945 const char __user *buf, int in_len, 946 int out_len) 947 { 948 struct ib_uverbs_reg_mr cmd; 949 struct ib_uverbs_reg_mr_resp resp; 950 struct ib_udata udata; 951 struct ib_uobject *uobj; 952 struct ib_pd *pd; 953 struct ib_mr *mr; 954 int ret; 955 956 if (out_len < sizeof resp) 957 return -ENOSPC; 958 959 if (copy_from_user(&cmd, buf, sizeof cmd)) 960 return -EFAULT; 961 962 INIT_UDATA(&udata, buf + sizeof cmd, 963 (unsigned long) cmd.response + sizeof resp, 964 in_len - sizeof cmd, out_len - sizeof resp); 965 966 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 967 return -EINVAL; 968 969 ret = ib_check_mr_access(cmd.access_flags); 970 if (ret) 971 return ret; 972 973 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 974 if (!uobj) 975 return -ENOMEM; 976 977 init_uobj(uobj, 0, file->ucontext, &mr_lock_class); 978 down_write(&uobj->mutex); 979 980 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 981 if (!pd) { 982 ret = -EINVAL; 983 goto err_free; 984 } 985 986 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 987 struct ib_device_attr attr; 988 989 ret = ib_query_device(pd->device, &attr); 990 if (ret || !(attr.device_cap_flags & 991 IB_DEVICE_ON_DEMAND_PAGING)) { 992 pr_debug("ODP support not available\n"); 993 ret = -EINVAL; 994 goto err_put; 995 } 996 } 997 998 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 999 cmd.access_flags, &udata); 1000 if (IS_ERR(mr)) { 1001 ret = PTR_ERR(mr); 1002 goto err_put; 1003 } 1004 1005 mr->device = pd->device; 1006 mr->pd = pd; 1007 mr->uobject = uobj; 1008 atomic_inc(&pd->usecnt); 1009 atomic_set(&mr->usecnt, 0); 1010 1011 uobj->object = mr; 1012 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 1013 if (ret) 1014 goto err_unreg; 1015 1016 memset(&resp, 0, sizeof resp); 1017 resp.lkey = mr->lkey; 1018 resp.rkey = mr->rkey; 1019 resp.mr_handle = uobj->id; 1020 1021 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1022 &resp, sizeof resp)) { 1023 ret = -EFAULT; 1024 goto err_copy; 1025 } 1026 1027 put_pd_read(pd); 1028 1029 mutex_lock(&file->mutex); 1030 list_add_tail(&uobj->list, &file->ucontext->mr_list); 1031 mutex_unlock(&file->mutex); 1032 1033 uobj->live = 1; 1034 1035 up_write(&uobj->mutex); 1036 1037 return in_len; 1038 1039 err_copy: 1040 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1041 1042 err_unreg: 1043 ib_dereg_mr(mr); 1044 1045 err_put: 1046 put_pd_read(pd); 1047 1048 err_free: 1049 put_uobj_write(uobj); 1050 return ret; 1051 } 1052 1053 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 1054 struct ib_device *ib_dev, 1055 const char __user *buf, int in_len, 1056 int out_len) 1057 { 1058 struct ib_uverbs_rereg_mr cmd; 1059 struct ib_uverbs_rereg_mr_resp resp; 1060 struct ib_udata udata; 1061 struct ib_pd *pd = NULL; 1062 struct ib_mr *mr; 1063 struct ib_pd *old_pd; 1064 int ret; 1065 struct ib_uobject *uobj; 1066 1067 if (out_len < sizeof(resp)) 1068 return -ENOSPC; 1069 1070 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1071 return -EFAULT; 1072 1073 INIT_UDATA(&udata, buf + sizeof(cmd), 1074 (unsigned long) cmd.response + sizeof(resp), 1075 in_len - sizeof(cmd), out_len - sizeof(resp)); 1076 1077 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 1078 return -EINVAL; 1079 1080 if ((cmd.flags & IB_MR_REREG_TRANS) && 1081 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 1082 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 1083 return -EINVAL; 1084 1085 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, 1086 file->ucontext); 1087 1088 if (!uobj) 1089 return -EINVAL; 1090 1091 mr = uobj->object; 1092 1093 if (cmd.flags & IB_MR_REREG_ACCESS) { 1094 ret = ib_check_mr_access(cmd.access_flags); 1095 if (ret) 1096 goto put_uobjs; 1097 } 1098 1099 if (cmd.flags & IB_MR_REREG_PD) { 1100 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1101 if (!pd) { 1102 ret = -EINVAL; 1103 goto put_uobjs; 1104 } 1105 } 1106 1107 if (atomic_read(&mr->usecnt)) { 1108 ret = -EBUSY; 1109 goto put_uobj_pd; 1110 } 1111 1112 old_pd = mr->pd; 1113 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 1114 cmd.length, cmd.hca_va, 1115 cmd.access_flags, pd, &udata); 1116 if (!ret) { 1117 if (cmd.flags & IB_MR_REREG_PD) { 1118 atomic_inc(&pd->usecnt); 1119 mr->pd = pd; 1120 atomic_dec(&old_pd->usecnt); 1121 } 1122 } else { 1123 goto put_uobj_pd; 1124 } 1125 1126 memset(&resp, 0, sizeof(resp)); 1127 resp.lkey = mr->lkey; 1128 resp.rkey = mr->rkey; 1129 1130 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1131 &resp, sizeof(resp))) 1132 ret = -EFAULT; 1133 else 1134 ret = in_len; 1135 1136 put_uobj_pd: 1137 if (cmd.flags & IB_MR_REREG_PD) 1138 put_pd_read(pd); 1139 1140 put_uobjs: 1141 1142 put_uobj_write(mr->uobject); 1143 1144 return ret; 1145 } 1146 1147 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 1148 struct ib_device *ib_dev, 1149 const char __user *buf, int in_len, 1150 int out_len) 1151 { 1152 struct ib_uverbs_dereg_mr cmd; 1153 struct ib_mr *mr; 1154 struct ib_uobject *uobj; 1155 int ret = -EINVAL; 1156 1157 if (copy_from_user(&cmd, buf, sizeof cmd)) 1158 return -EFAULT; 1159 1160 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 1161 if (!uobj) 1162 return -EINVAL; 1163 1164 mr = uobj->object; 1165 1166 ret = ib_dereg_mr(mr); 1167 if (!ret) 1168 uobj->live = 0; 1169 1170 put_uobj_write(uobj); 1171 1172 if (ret) 1173 return ret; 1174 1175 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1176 1177 mutex_lock(&file->mutex); 1178 list_del(&uobj->list); 1179 mutex_unlock(&file->mutex); 1180 1181 put_uobj(uobj); 1182 1183 return in_len; 1184 } 1185 1186 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 1187 struct ib_device *ib_dev, 1188 const char __user *buf, int in_len, 1189 int out_len) 1190 { 1191 struct ib_uverbs_alloc_mw cmd; 1192 struct ib_uverbs_alloc_mw_resp resp; 1193 struct ib_uobject *uobj; 1194 struct ib_pd *pd; 1195 struct ib_mw *mw; 1196 int ret; 1197 1198 if (out_len < sizeof(resp)) 1199 return -ENOSPC; 1200 1201 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1202 return -EFAULT; 1203 1204 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 1205 if (!uobj) 1206 return -ENOMEM; 1207 1208 init_uobj(uobj, 0, file->ucontext, &mw_lock_class); 1209 down_write(&uobj->mutex); 1210 1211 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1212 if (!pd) { 1213 ret = -EINVAL; 1214 goto err_free; 1215 } 1216 1217 mw = pd->device->alloc_mw(pd, cmd.mw_type); 1218 if (IS_ERR(mw)) { 1219 ret = PTR_ERR(mw); 1220 goto err_put; 1221 } 1222 1223 mw->device = pd->device; 1224 mw->pd = pd; 1225 mw->uobject = uobj; 1226 atomic_inc(&pd->usecnt); 1227 1228 uobj->object = mw; 1229 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj); 1230 if (ret) 1231 goto err_unalloc; 1232 1233 memset(&resp, 0, sizeof(resp)); 1234 resp.rkey = mw->rkey; 1235 resp.mw_handle = uobj->id; 1236 1237 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1238 &resp, sizeof(resp))) { 1239 ret = -EFAULT; 1240 goto err_copy; 1241 } 1242 1243 put_pd_read(pd); 1244 1245 mutex_lock(&file->mutex); 1246 list_add_tail(&uobj->list, &file->ucontext->mw_list); 1247 mutex_unlock(&file->mutex); 1248 1249 uobj->live = 1; 1250 1251 up_write(&uobj->mutex); 1252 1253 return in_len; 1254 1255 err_copy: 1256 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1257 1258 err_unalloc: 1259 ib_dealloc_mw(mw); 1260 1261 err_put: 1262 put_pd_read(pd); 1263 1264 err_free: 1265 put_uobj_write(uobj); 1266 return ret; 1267 } 1268 1269 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 1270 struct ib_device *ib_dev, 1271 const char __user *buf, int in_len, 1272 int out_len) 1273 { 1274 struct ib_uverbs_dealloc_mw cmd; 1275 struct ib_mw *mw; 1276 struct ib_uobject *uobj; 1277 int ret = -EINVAL; 1278 1279 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1280 return -EFAULT; 1281 1282 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext); 1283 if (!uobj) 1284 return -EINVAL; 1285 1286 mw = uobj->object; 1287 1288 ret = ib_dealloc_mw(mw); 1289 if (!ret) 1290 uobj->live = 0; 1291 1292 put_uobj_write(uobj); 1293 1294 if (ret) 1295 return ret; 1296 1297 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1298 1299 mutex_lock(&file->mutex); 1300 list_del(&uobj->list); 1301 mutex_unlock(&file->mutex); 1302 1303 put_uobj(uobj); 1304 1305 return in_len; 1306 } 1307 1308 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 1309 struct ib_device *ib_dev, 1310 const char __user *buf, int in_len, 1311 int out_len) 1312 { 1313 struct ib_uverbs_create_comp_channel cmd; 1314 struct ib_uverbs_create_comp_channel_resp resp; 1315 struct file *filp; 1316 int ret; 1317 1318 if (out_len < sizeof resp) 1319 return -ENOSPC; 1320 1321 if (copy_from_user(&cmd, buf, sizeof cmd)) 1322 return -EFAULT; 1323 1324 ret = get_unused_fd_flags(O_CLOEXEC); 1325 if (ret < 0) 1326 return ret; 1327 resp.fd = ret; 1328 1329 filp = ib_uverbs_alloc_event_file(file, ib_dev, 0); 1330 if (IS_ERR(filp)) { 1331 put_unused_fd(resp.fd); 1332 return PTR_ERR(filp); 1333 } 1334 1335 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1336 &resp, sizeof resp)) { 1337 put_unused_fd(resp.fd); 1338 fput(filp); 1339 return -EFAULT; 1340 } 1341 1342 fd_install(resp.fd, filp); 1343 return in_len; 1344 } 1345 1346 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 1347 struct ib_device *ib_dev, 1348 struct ib_udata *ucore, 1349 struct ib_udata *uhw, 1350 struct ib_uverbs_ex_create_cq *cmd, 1351 size_t cmd_sz, 1352 int (*cb)(struct ib_uverbs_file *file, 1353 struct ib_ucq_object *obj, 1354 struct ib_uverbs_ex_create_cq_resp *resp, 1355 struct ib_udata *udata, 1356 void *context), 1357 void *context) 1358 { 1359 struct ib_ucq_object *obj; 1360 struct ib_uverbs_event_file *ev_file = NULL; 1361 struct ib_cq *cq; 1362 int ret; 1363 struct ib_uverbs_ex_create_cq_resp resp; 1364 struct ib_cq_init_attr attr = {}; 1365 1366 if (cmd->comp_vector >= file->device->num_comp_vectors) 1367 return ERR_PTR(-EINVAL); 1368 1369 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1370 if (!obj) 1371 return ERR_PTR(-ENOMEM); 1372 1373 init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class); 1374 down_write(&obj->uobject.mutex); 1375 1376 if (cmd->comp_channel >= 0) { 1377 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel); 1378 if (!ev_file) { 1379 ret = -EINVAL; 1380 goto err; 1381 } 1382 } 1383 1384 obj->uverbs_file = file; 1385 obj->comp_events_reported = 0; 1386 obj->async_events_reported = 0; 1387 INIT_LIST_HEAD(&obj->comp_list); 1388 INIT_LIST_HEAD(&obj->async_list); 1389 1390 attr.cqe = cmd->cqe; 1391 attr.comp_vector = cmd->comp_vector; 1392 1393 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1394 attr.flags = cmd->flags; 1395 1396 cq = ib_dev->create_cq(ib_dev, &attr, 1397 file->ucontext, uhw); 1398 if (IS_ERR(cq)) { 1399 ret = PTR_ERR(cq); 1400 goto err_file; 1401 } 1402 1403 cq->device = ib_dev; 1404 cq->uobject = &obj->uobject; 1405 cq->comp_handler = ib_uverbs_comp_handler; 1406 cq->event_handler = ib_uverbs_cq_event_handler; 1407 cq->cq_context = ev_file; 1408 atomic_set(&cq->usecnt, 0); 1409 1410 obj->uobject.object = cq; 1411 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1412 if (ret) 1413 goto err_free; 1414 1415 memset(&resp, 0, sizeof resp); 1416 resp.base.cq_handle = obj->uobject.id; 1417 resp.base.cqe = cq->cqe; 1418 1419 resp.response_length = offsetof(typeof(resp), response_length) + 1420 sizeof(resp.response_length); 1421 1422 ret = cb(file, obj, &resp, ucore, context); 1423 if (ret) 1424 goto err_cb; 1425 1426 mutex_lock(&file->mutex); 1427 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 1428 mutex_unlock(&file->mutex); 1429 1430 obj->uobject.live = 1; 1431 1432 up_write(&obj->uobject.mutex); 1433 1434 return obj; 1435 1436 err_cb: 1437 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1438 1439 err_free: 1440 ib_destroy_cq(cq); 1441 1442 err_file: 1443 if (ev_file) 1444 ib_uverbs_release_ucq(file, ev_file, obj); 1445 1446 err: 1447 put_uobj_write(&obj->uobject); 1448 1449 return ERR_PTR(ret); 1450 } 1451 1452 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1453 struct ib_ucq_object *obj, 1454 struct ib_uverbs_ex_create_cq_resp *resp, 1455 struct ib_udata *ucore, void *context) 1456 { 1457 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1458 return -EFAULT; 1459 1460 return 0; 1461 } 1462 1463 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1464 struct ib_device *ib_dev, 1465 const char __user *buf, int in_len, 1466 int out_len) 1467 { 1468 struct ib_uverbs_create_cq cmd; 1469 struct ib_uverbs_ex_create_cq cmd_ex; 1470 struct ib_uverbs_create_cq_resp resp; 1471 struct ib_udata ucore; 1472 struct ib_udata uhw; 1473 struct ib_ucq_object *obj; 1474 1475 if (out_len < sizeof(resp)) 1476 return -ENOSPC; 1477 1478 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1479 return -EFAULT; 1480 1481 INIT_UDATA(&ucore, buf, cmd.response, sizeof(cmd), sizeof(resp)); 1482 1483 INIT_UDATA(&uhw, buf + sizeof(cmd), 1484 (unsigned long)cmd.response + sizeof(resp), 1485 in_len - sizeof(cmd), out_len - sizeof(resp)); 1486 1487 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1488 cmd_ex.user_handle = cmd.user_handle; 1489 cmd_ex.cqe = cmd.cqe; 1490 cmd_ex.comp_vector = cmd.comp_vector; 1491 cmd_ex.comp_channel = cmd.comp_channel; 1492 1493 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex, 1494 offsetof(typeof(cmd_ex), comp_channel) + 1495 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1496 NULL); 1497 1498 if (IS_ERR(obj)) 1499 return PTR_ERR(obj); 1500 1501 return in_len; 1502 } 1503 1504 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1505 struct ib_ucq_object *obj, 1506 struct ib_uverbs_ex_create_cq_resp *resp, 1507 struct ib_udata *ucore, void *context) 1508 { 1509 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1510 return -EFAULT; 1511 1512 return 0; 1513 } 1514 1515 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1516 struct ib_device *ib_dev, 1517 struct ib_udata *ucore, 1518 struct ib_udata *uhw) 1519 { 1520 struct ib_uverbs_ex_create_cq_resp resp; 1521 struct ib_uverbs_ex_create_cq cmd; 1522 struct ib_ucq_object *obj; 1523 int err; 1524 1525 if (ucore->inlen < sizeof(cmd)) 1526 return -EINVAL; 1527 1528 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1529 if (err) 1530 return err; 1531 1532 if (cmd.comp_mask) 1533 return -EINVAL; 1534 1535 if (cmd.reserved) 1536 return -EINVAL; 1537 1538 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1539 sizeof(resp.response_length))) 1540 return -ENOSPC; 1541 1542 obj = create_cq(file, ib_dev, ucore, uhw, &cmd, 1543 min(ucore->inlen, sizeof(cmd)), 1544 ib_uverbs_ex_create_cq_cb, NULL); 1545 1546 if (IS_ERR(obj)) 1547 return PTR_ERR(obj); 1548 1549 return 0; 1550 } 1551 1552 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1553 struct ib_device *ib_dev, 1554 const char __user *buf, int in_len, 1555 int out_len) 1556 { 1557 struct ib_uverbs_resize_cq cmd; 1558 struct ib_uverbs_resize_cq_resp resp; 1559 struct ib_udata udata; 1560 struct ib_cq *cq; 1561 int ret = -EINVAL; 1562 1563 if (copy_from_user(&cmd, buf, sizeof cmd)) 1564 return -EFAULT; 1565 1566 INIT_UDATA(&udata, buf + sizeof cmd, 1567 (unsigned long) cmd.response + sizeof resp, 1568 in_len - sizeof cmd, out_len - sizeof resp); 1569 1570 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1571 if (!cq) 1572 return -EINVAL; 1573 1574 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1575 if (ret) 1576 goto out; 1577 1578 resp.cqe = cq->cqe; 1579 1580 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1581 &resp, sizeof resp.cqe)) 1582 ret = -EFAULT; 1583 1584 out: 1585 put_cq_read(cq); 1586 1587 return ret ? ret : in_len; 1588 } 1589 1590 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1591 { 1592 struct ib_uverbs_wc tmp; 1593 1594 tmp.wr_id = wc->wr_id; 1595 tmp.status = wc->status; 1596 tmp.opcode = wc->opcode; 1597 tmp.vendor_err = wc->vendor_err; 1598 tmp.byte_len = wc->byte_len; 1599 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1600 tmp.qp_num = wc->qp->qp_num; 1601 tmp.src_qp = wc->src_qp; 1602 tmp.wc_flags = wc->wc_flags; 1603 tmp.pkey_index = wc->pkey_index; 1604 tmp.slid = wc->slid; 1605 tmp.sl = wc->sl; 1606 tmp.dlid_path_bits = wc->dlid_path_bits; 1607 tmp.port_num = wc->port_num; 1608 tmp.reserved = 0; 1609 1610 if (copy_to_user(dest, &tmp, sizeof tmp)) 1611 return -EFAULT; 1612 1613 return 0; 1614 } 1615 1616 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1617 struct ib_device *ib_dev, 1618 const char __user *buf, int in_len, 1619 int out_len) 1620 { 1621 struct ib_uverbs_poll_cq cmd; 1622 struct ib_uverbs_poll_cq_resp resp; 1623 u8 __user *header_ptr; 1624 u8 __user *data_ptr; 1625 struct ib_cq *cq; 1626 struct ib_wc wc; 1627 int ret; 1628 1629 if (copy_from_user(&cmd, buf, sizeof cmd)) 1630 return -EFAULT; 1631 1632 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1633 if (!cq) 1634 return -EINVAL; 1635 1636 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1637 header_ptr = (void __user *)(unsigned long) cmd.response; 1638 data_ptr = header_ptr + sizeof resp; 1639 1640 memset(&resp, 0, sizeof resp); 1641 while (resp.count < cmd.ne) { 1642 ret = ib_poll_cq(cq, 1, &wc); 1643 if (ret < 0) 1644 goto out_put; 1645 if (!ret) 1646 break; 1647 1648 ret = copy_wc_to_user(data_ptr, &wc); 1649 if (ret) 1650 goto out_put; 1651 1652 data_ptr += sizeof(struct ib_uverbs_wc); 1653 ++resp.count; 1654 } 1655 1656 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1657 ret = -EFAULT; 1658 goto out_put; 1659 } 1660 1661 ret = in_len; 1662 1663 out_put: 1664 put_cq_read(cq); 1665 return ret; 1666 } 1667 1668 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1669 struct ib_device *ib_dev, 1670 const char __user *buf, int in_len, 1671 int out_len) 1672 { 1673 struct ib_uverbs_req_notify_cq cmd; 1674 struct ib_cq *cq; 1675 1676 if (copy_from_user(&cmd, buf, sizeof cmd)) 1677 return -EFAULT; 1678 1679 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1680 if (!cq) 1681 return -EINVAL; 1682 1683 ib_req_notify_cq(cq, cmd.solicited_only ? 1684 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1685 1686 put_cq_read(cq); 1687 1688 return in_len; 1689 } 1690 1691 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1692 struct ib_device *ib_dev, 1693 const char __user *buf, int in_len, 1694 int out_len) 1695 { 1696 struct ib_uverbs_destroy_cq cmd; 1697 struct ib_uverbs_destroy_cq_resp resp; 1698 struct ib_uobject *uobj; 1699 struct ib_cq *cq; 1700 struct ib_ucq_object *obj; 1701 struct ib_uverbs_event_file *ev_file; 1702 int ret = -EINVAL; 1703 1704 if (copy_from_user(&cmd, buf, sizeof cmd)) 1705 return -EFAULT; 1706 1707 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 1708 if (!uobj) 1709 return -EINVAL; 1710 cq = uobj->object; 1711 ev_file = cq->cq_context; 1712 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1713 1714 ret = ib_destroy_cq(cq); 1715 if (!ret) 1716 uobj->live = 0; 1717 1718 put_uobj_write(uobj); 1719 1720 if (ret) 1721 return ret; 1722 1723 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1724 1725 mutex_lock(&file->mutex); 1726 list_del(&uobj->list); 1727 mutex_unlock(&file->mutex); 1728 1729 ib_uverbs_release_ucq(file, ev_file, obj); 1730 1731 memset(&resp, 0, sizeof resp); 1732 resp.comp_events_reported = obj->comp_events_reported; 1733 resp.async_events_reported = obj->async_events_reported; 1734 1735 put_uobj(uobj); 1736 1737 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1738 &resp, sizeof resp)) 1739 return -EFAULT; 1740 1741 return in_len; 1742 } 1743 1744 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1745 struct ib_device *ib_dev, 1746 const char __user *buf, int in_len, 1747 int out_len) 1748 { 1749 struct ib_uverbs_create_qp cmd; 1750 struct ib_uverbs_create_qp_resp resp; 1751 struct ib_udata udata; 1752 struct ib_uqp_object *obj; 1753 struct ib_device *device; 1754 struct ib_pd *pd = NULL; 1755 struct ib_xrcd *xrcd = NULL; 1756 struct ib_uobject *uninitialized_var(xrcd_uobj); 1757 struct ib_cq *scq = NULL, *rcq = NULL; 1758 struct ib_srq *srq = NULL; 1759 struct ib_qp *qp; 1760 struct ib_qp_init_attr attr; 1761 int ret; 1762 1763 if (out_len < sizeof resp) 1764 return -ENOSPC; 1765 1766 if (copy_from_user(&cmd, buf, sizeof cmd)) 1767 return -EFAULT; 1768 1769 if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1770 return -EPERM; 1771 1772 INIT_UDATA(&udata, buf + sizeof cmd, 1773 (unsigned long) cmd.response + sizeof resp, 1774 in_len - sizeof cmd, out_len - sizeof resp); 1775 1776 obj = kzalloc(sizeof *obj, GFP_KERNEL); 1777 if (!obj) 1778 return -ENOMEM; 1779 1780 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 1781 down_write(&obj->uevent.uobject.mutex); 1782 1783 if (cmd.qp_type == IB_QPT_XRC_TGT) { 1784 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 1785 if (!xrcd) { 1786 ret = -EINVAL; 1787 goto err_put; 1788 } 1789 device = xrcd->device; 1790 } else { 1791 if (cmd.qp_type == IB_QPT_XRC_INI) { 1792 cmd.max_recv_wr = cmd.max_recv_sge = 0; 1793 } else { 1794 if (cmd.is_srq) { 1795 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 1796 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1797 ret = -EINVAL; 1798 goto err_put; 1799 } 1800 } 1801 1802 if (cmd.recv_cq_handle != cmd.send_cq_handle) { 1803 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0); 1804 if (!rcq) { 1805 ret = -EINVAL; 1806 goto err_put; 1807 } 1808 } 1809 } 1810 1811 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq); 1812 rcq = rcq ?: scq; 1813 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1814 if (!pd || !scq) { 1815 ret = -EINVAL; 1816 goto err_put; 1817 } 1818 1819 device = pd->device; 1820 } 1821 1822 attr.event_handler = ib_uverbs_qp_event_handler; 1823 attr.qp_context = file; 1824 attr.send_cq = scq; 1825 attr.recv_cq = rcq; 1826 attr.srq = srq; 1827 attr.xrcd = xrcd; 1828 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 1829 attr.qp_type = cmd.qp_type; 1830 attr.create_flags = 0; 1831 1832 attr.cap.max_send_wr = cmd.max_send_wr; 1833 attr.cap.max_recv_wr = cmd.max_recv_wr; 1834 attr.cap.max_send_sge = cmd.max_send_sge; 1835 attr.cap.max_recv_sge = cmd.max_recv_sge; 1836 attr.cap.max_inline_data = cmd.max_inline_data; 1837 1838 obj->uevent.events_reported = 0; 1839 INIT_LIST_HEAD(&obj->uevent.event_list); 1840 INIT_LIST_HEAD(&obj->mcast_list); 1841 1842 if (cmd.qp_type == IB_QPT_XRC_TGT) 1843 qp = ib_create_qp(pd, &attr); 1844 else 1845 qp = device->create_qp(pd, &attr, &udata); 1846 1847 if (IS_ERR(qp)) { 1848 ret = PTR_ERR(qp); 1849 goto err_put; 1850 } 1851 1852 if (cmd.qp_type != IB_QPT_XRC_TGT) { 1853 qp->real_qp = qp; 1854 qp->device = device; 1855 qp->pd = pd; 1856 qp->send_cq = attr.send_cq; 1857 qp->recv_cq = attr.recv_cq; 1858 qp->srq = attr.srq; 1859 qp->event_handler = attr.event_handler; 1860 qp->qp_context = attr.qp_context; 1861 qp->qp_type = attr.qp_type; 1862 atomic_set(&qp->usecnt, 0); 1863 atomic_inc(&pd->usecnt); 1864 atomic_inc(&attr.send_cq->usecnt); 1865 if (attr.recv_cq) 1866 atomic_inc(&attr.recv_cq->usecnt); 1867 if (attr.srq) 1868 atomic_inc(&attr.srq->usecnt); 1869 } 1870 qp->uobject = &obj->uevent.uobject; 1871 1872 obj->uevent.uobject.object = qp; 1873 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1874 if (ret) 1875 goto err_destroy; 1876 1877 memset(&resp, 0, sizeof resp); 1878 resp.qpn = qp->qp_num; 1879 resp.qp_handle = obj->uevent.uobject.id; 1880 resp.max_recv_sge = attr.cap.max_recv_sge; 1881 resp.max_send_sge = attr.cap.max_send_sge; 1882 resp.max_recv_wr = attr.cap.max_recv_wr; 1883 resp.max_send_wr = attr.cap.max_send_wr; 1884 resp.max_inline_data = attr.cap.max_inline_data; 1885 1886 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1887 &resp, sizeof resp)) { 1888 ret = -EFAULT; 1889 goto err_copy; 1890 } 1891 1892 if (xrcd) { 1893 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1894 uobject); 1895 atomic_inc(&obj->uxrcd->refcnt); 1896 put_xrcd_read(xrcd_uobj); 1897 } 1898 1899 if (pd) 1900 put_pd_read(pd); 1901 if (scq) 1902 put_cq_read(scq); 1903 if (rcq && rcq != scq) 1904 put_cq_read(rcq); 1905 if (srq) 1906 put_srq_read(srq); 1907 1908 mutex_lock(&file->mutex); 1909 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1910 mutex_unlock(&file->mutex); 1911 1912 obj->uevent.uobject.live = 1; 1913 1914 up_write(&obj->uevent.uobject.mutex); 1915 1916 return in_len; 1917 1918 err_copy: 1919 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1920 1921 err_destroy: 1922 ib_destroy_qp(qp); 1923 1924 err_put: 1925 if (xrcd) 1926 put_xrcd_read(xrcd_uobj); 1927 if (pd) 1928 put_pd_read(pd); 1929 if (scq) 1930 put_cq_read(scq); 1931 if (rcq && rcq != scq) 1932 put_cq_read(rcq); 1933 if (srq) 1934 put_srq_read(srq); 1935 1936 put_uobj_write(&obj->uevent.uobject); 1937 return ret; 1938 } 1939 1940 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 1941 struct ib_device *ib_dev, 1942 const char __user *buf, int in_len, int out_len) 1943 { 1944 struct ib_uverbs_open_qp cmd; 1945 struct ib_uverbs_create_qp_resp resp; 1946 struct ib_udata udata; 1947 struct ib_uqp_object *obj; 1948 struct ib_xrcd *xrcd; 1949 struct ib_uobject *uninitialized_var(xrcd_uobj); 1950 struct ib_qp *qp; 1951 struct ib_qp_open_attr attr; 1952 int ret; 1953 1954 if (out_len < sizeof resp) 1955 return -ENOSPC; 1956 1957 if (copy_from_user(&cmd, buf, sizeof cmd)) 1958 return -EFAULT; 1959 1960 INIT_UDATA(&udata, buf + sizeof cmd, 1961 (unsigned long) cmd.response + sizeof resp, 1962 in_len - sizeof cmd, out_len - sizeof resp); 1963 1964 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1965 if (!obj) 1966 return -ENOMEM; 1967 1968 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 1969 down_write(&obj->uevent.uobject.mutex); 1970 1971 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 1972 if (!xrcd) { 1973 ret = -EINVAL; 1974 goto err_put; 1975 } 1976 1977 attr.event_handler = ib_uverbs_qp_event_handler; 1978 attr.qp_context = file; 1979 attr.qp_num = cmd.qpn; 1980 attr.qp_type = cmd.qp_type; 1981 1982 obj->uevent.events_reported = 0; 1983 INIT_LIST_HEAD(&obj->uevent.event_list); 1984 INIT_LIST_HEAD(&obj->mcast_list); 1985 1986 qp = ib_open_qp(xrcd, &attr); 1987 if (IS_ERR(qp)) { 1988 ret = PTR_ERR(qp); 1989 goto err_put; 1990 } 1991 1992 qp->uobject = &obj->uevent.uobject; 1993 1994 obj->uevent.uobject.object = qp; 1995 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1996 if (ret) 1997 goto err_destroy; 1998 1999 memset(&resp, 0, sizeof resp); 2000 resp.qpn = qp->qp_num; 2001 resp.qp_handle = obj->uevent.uobject.id; 2002 2003 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2004 &resp, sizeof resp)) { 2005 ret = -EFAULT; 2006 goto err_remove; 2007 } 2008 2009 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 2010 atomic_inc(&obj->uxrcd->refcnt); 2011 put_xrcd_read(xrcd_uobj); 2012 2013 mutex_lock(&file->mutex); 2014 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 2015 mutex_unlock(&file->mutex); 2016 2017 obj->uevent.uobject.live = 1; 2018 2019 up_write(&obj->uevent.uobject.mutex); 2020 2021 return in_len; 2022 2023 err_remove: 2024 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 2025 2026 err_destroy: 2027 ib_destroy_qp(qp); 2028 2029 err_put: 2030 put_xrcd_read(xrcd_uobj); 2031 put_uobj_write(&obj->uevent.uobject); 2032 return ret; 2033 } 2034 2035 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 2036 struct ib_device *ib_dev, 2037 const char __user *buf, int in_len, 2038 int out_len) 2039 { 2040 struct ib_uverbs_query_qp cmd; 2041 struct ib_uverbs_query_qp_resp resp; 2042 struct ib_qp *qp; 2043 struct ib_qp_attr *attr; 2044 struct ib_qp_init_attr *init_attr; 2045 int ret; 2046 2047 if (copy_from_user(&cmd, buf, sizeof cmd)) 2048 return -EFAULT; 2049 2050 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2051 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 2052 if (!attr || !init_attr) { 2053 ret = -ENOMEM; 2054 goto out; 2055 } 2056 2057 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2058 if (!qp) { 2059 ret = -EINVAL; 2060 goto out; 2061 } 2062 2063 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 2064 2065 put_qp_read(qp); 2066 2067 if (ret) 2068 goto out; 2069 2070 memset(&resp, 0, sizeof resp); 2071 2072 resp.qp_state = attr->qp_state; 2073 resp.cur_qp_state = attr->cur_qp_state; 2074 resp.path_mtu = attr->path_mtu; 2075 resp.path_mig_state = attr->path_mig_state; 2076 resp.qkey = attr->qkey; 2077 resp.rq_psn = attr->rq_psn; 2078 resp.sq_psn = attr->sq_psn; 2079 resp.dest_qp_num = attr->dest_qp_num; 2080 resp.qp_access_flags = attr->qp_access_flags; 2081 resp.pkey_index = attr->pkey_index; 2082 resp.alt_pkey_index = attr->alt_pkey_index; 2083 resp.sq_draining = attr->sq_draining; 2084 resp.max_rd_atomic = attr->max_rd_atomic; 2085 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 2086 resp.min_rnr_timer = attr->min_rnr_timer; 2087 resp.port_num = attr->port_num; 2088 resp.timeout = attr->timeout; 2089 resp.retry_cnt = attr->retry_cnt; 2090 resp.rnr_retry = attr->rnr_retry; 2091 resp.alt_port_num = attr->alt_port_num; 2092 resp.alt_timeout = attr->alt_timeout; 2093 2094 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 2095 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 2096 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 2097 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 2098 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 2099 resp.dest.dlid = attr->ah_attr.dlid; 2100 resp.dest.sl = attr->ah_attr.sl; 2101 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 2102 resp.dest.static_rate = attr->ah_attr.static_rate; 2103 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 2104 resp.dest.port_num = attr->ah_attr.port_num; 2105 2106 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 2107 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 2108 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 2109 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 2110 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 2111 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 2112 resp.alt_dest.sl = attr->alt_ah_attr.sl; 2113 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 2114 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 2115 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 2116 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 2117 2118 resp.max_send_wr = init_attr->cap.max_send_wr; 2119 resp.max_recv_wr = init_attr->cap.max_recv_wr; 2120 resp.max_send_sge = init_attr->cap.max_send_sge; 2121 resp.max_recv_sge = init_attr->cap.max_recv_sge; 2122 resp.max_inline_data = init_attr->cap.max_inline_data; 2123 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 2124 2125 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2126 &resp, sizeof resp)) 2127 ret = -EFAULT; 2128 2129 out: 2130 kfree(attr); 2131 kfree(init_attr); 2132 2133 return ret ? ret : in_len; 2134 } 2135 2136 /* Remove ignored fields set in the attribute mask */ 2137 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 2138 { 2139 switch (qp_type) { 2140 case IB_QPT_XRC_INI: 2141 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 2142 case IB_QPT_XRC_TGT: 2143 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 2144 IB_QP_RNR_RETRY); 2145 default: 2146 return mask; 2147 } 2148 } 2149 2150 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2151 struct ib_device *ib_dev, 2152 const char __user *buf, int in_len, 2153 int out_len) 2154 { 2155 struct ib_uverbs_modify_qp cmd; 2156 struct ib_udata udata; 2157 struct ib_qp *qp; 2158 struct ib_qp_attr *attr; 2159 int ret; 2160 2161 if (copy_from_user(&cmd, buf, sizeof cmd)) 2162 return -EFAULT; 2163 2164 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 2165 out_len); 2166 2167 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2168 if (!attr) 2169 return -ENOMEM; 2170 2171 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2172 if (!qp) { 2173 ret = -EINVAL; 2174 goto out; 2175 } 2176 2177 attr->qp_state = cmd.qp_state; 2178 attr->cur_qp_state = cmd.cur_qp_state; 2179 attr->path_mtu = cmd.path_mtu; 2180 attr->path_mig_state = cmd.path_mig_state; 2181 attr->qkey = cmd.qkey; 2182 attr->rq_psn = cmd.rq_psn; 2183 attr->sq_psn = cmd.sq_psn; 2184 attr->dest_qp_num = cmd.dest_qp_num; 2185 attr->qp_access_flags = cmd.qp_access_flags; 2186 attr->pkey_index = cmd.pkey_index; 2187 attr->alt_pkey_index = cmd.alt_pkey_index; 2188 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 2189 attr->max_rd_atomic = cmd.max_rd_atomic; 2190 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 2191 attr->min_rnr_timer = cmd.min_rnr_timer; 2192 attr->port_num = cmd.port_num; 2193 attr->timeout = cmd.timeout; 2194 attr->retry_cnt = cmd.retry_cnt; 2195 attr->rnr_retry = cmd.rnr_retry; 2196 attr->alt_port_num = cmd.alt_port_num; 2197 attr->alt_timeout = cmd.alt_timeout; 2198 2199 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 2200 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 2201 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 2202 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 2203 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 2204 attr->ah_attr.dlid = cmd.dest.dlid; 2205 attr->ah_attr.sl = cmd.dest.sl; 2206 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 2207 attr->ah_attr.static_rate = cmd.dest.static_rate; 2208 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 2209 attr->ah_attr.port_num = cmd.dest.port_num; 2210 2211 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 2212 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 2213 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 2214 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 2215 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 2216 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 2217 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 2218 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 2219 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 2220 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 2221 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 2222 2223 if (qp->real_qp == qp) { 2224 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask); 2225 if (ret) 2226 goto release_qp; 2227 ret = qp->device->modify_qp(qp, attr, 2228 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 2229 } else { 2230 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 2231 } 2232 2233 if (ret) 2234 goto release_qp; 2235 2236 ret = in_len; 2237 2238 release_qp: 2239 put_qp_read(qp); 2240 2241 out: 2242 kfree(attr); 2243 2244 return ret; 2245 } 2246 2247 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2248 struct ib_device *ib_dev, 2249 const char __user *buf, int in_len, 2250 int out_len) 2251 { 2252 struct ib_uverbs_destroy_qp cmd; 2253 struct ib_uverbs_destroy_qp_resp resp; 2254 struct ib_uobject *uobj; 2255 struct ib_qp *qp; 2256 struct ib_uqp_object *obj; 2257 int ret = -EINVAL; 2258 2259 if (copy_from_user(&cmd, buf, sizeof cmd)) 2260 return -EFAULT; 2261 2262 memset(&resp, 0, sizeof resp); 2263 2264 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 2265 if (!uobj) 2266 return -EINVAL; 2267 qp = uobj->object; 2268 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2269 2270 if (!list_empty(&obj->mcast_list)) { 2271 put_uobj_write(uobj); 2272 return -EBUSY; 2273 } 2274 2275 ret = ib_destroy_qp(qp); 2276 if (!ret) 2277 uobj->live = 0; 2278 2279 put_uobj_write(uobj); 2280 2281 if (ret) 2282 return ret; 2283 2284 if (obj->uxrcd) 2285 atomic_dec(&obj->uxrcd->refcnt); 2286 2287 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 2288 2289 mutex_lock(&file->mutex); 2290 list_del(&uobj->list); 2291 mutex_unlock(&file->mutex); 2292 2293 ib_uverbs_release_uevent(file, &obj->uevent); 2294 2295 resp.events_reported = obj->uevent.events_reported; 2296 2297 put_uobj(uobj); 2298 2299 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2300 &resp, sizeof resp)) 2301 return -EFAULT; 2302 2303 return in_len; 2304 } 2305 2306 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2307 struct ib_device *ib_dev, 2308 const char __user *buf, int in_len, 2309 int out_len) 2310 { 2311 struct ib_uverbs_post_send cmd; 2312 struct ib_uverbs_post_send_resp resp; 2313 struct ib_uverbs_send_wr *user_wr; 2314 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2315 struct ib_qp *qp; 2316 int i, sg_ind; 2317 int is_ud; 2318 ssize_t ret = -EINVAL; 2319 2320 if (copy_from_user(&cmd, buf, sizeof cmd)) 2321 return -EFAULT; 2322 2323 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2324 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2325 return -EINVAL; 2326 2327 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2328 return -EINVAL; 2329 2330 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2331 if (!user_wr) 2332 return -ENOMEM; 2333 2334 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2335 if (!qp) 2336 goto out; 2337 2338 is_ud = qp->qp_type == IB_QPT_UD; 2339 sg_ind = 0; 2340 last = NULL; 2341 for (i = 0; i < cmd.wr_count; ++i) { 2342 if (copy_from_user(user_wr, 2343 buf + sizeof cmd + i * cmd.wqe_size, 2344 cmd.wqe_size)) { 2345 ret = -EFAULT; 2346 goto out_put; 2347 } 2348 2349 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2350 ret = -EINVAL; 2351 goto out_put; 2352 } 2353 2354 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2355 user_wr->num_sge * sizeof (struct ib_sge), 2356 GFP_KERNEL); 2357 if (!next) { 2358 ret = -ENOMEM; 2359 goto out_put; 2360 } 2361 2362 if (!last) 2363 wr = next; 2364 else 2365 last->next = next; 2366 last = next; 2367 2368 next->next = NULL; 2369 next->wr_id = user_wr->wr_id; 2370 next->num_sge = user_wr->num_sge; 2371 next->opcode = user_wr->opcode; 2372 next->send_flags = user_wr->send_flags; 2373 2374 if (is_ud) { 2375 if (next->opcode != IB_WR_SEND && 2376 next->opcode != IB_WR_SEND_WITH_IMM) { 2377 ret = -EINVAL; 2378 goto out_put; 2379 } 2380 2381 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, 2382 file->ucontext); 2383 if (!next->wr.ud.ah) { 2384 ret = -EINVAL; 2385 goto out_put; 2386 } 2387 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; 2388 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 2389 if (next->opcode == IB_WR_SEND_WITH_IMM) 2390 next->ex.imm_data = 2391 (__be32 __force) user_wr->ex.imm_data; 2392 } else { 2393 switch (next->opcode) { 2394 case IB_WR_RDMA_WRITE_WITH_IMM: 2395 next->ex.imm_data = 2396 (__be32 __force) user_wr->ex.imm_data; 2397 case IB_WR_RDMA_WRITE: 2398 case IB_WR_RDMA_READ: 2399 next->wr.rdma.remote_addr = 2400 user_wr->wr.rdma.remote_addr; 2401 next->wr.rdma.rkey = 2402 user_wr->wr.rdma.rkey; 2403 break; 2404 case IB_WR_SEND_WITH_IMM: 2405 next->ex.imm_data = 2406 (__be32 __force) user_wr->ex.imm_data; 2407 break; 2408 case IB_WR_SEND_WITH_INV: 2409 next->ex.invalidate_rkey = 2410 user_wr->ex.invalidate_rkey; 2411 break; 2412 case IB_WR_ATOMIC_CMP_AND_SWP: 2413 case IB_WR_ATOMIC_FETCH_AND_ADD: 2414 next->wr.atomic.remote_addr = 2415 user_wr->wr.atomic.remote_addr; 2416 next->wr.atomic.compare_add = 2417 user_wr->wr.atomic.compare_add; 2418 next->wr.atomic.swap = user_wr->wr.atomic.swap; 2419 next->wr.atomic.rkey = user_wr->wr.atomic.rkey; 2420 case IB_WR_SEND: 2421 break; 2422 default: 2423 ret = -EINVAL; 2424 goto out_put; 2425 } 2426 } 2427 2428 if (next->num_sge) { 2429 next->sg_list = (void *) next + 2430 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2431 if (copy_from_user(next->sg_list, 2432 buf + sizeof cmd + 2433 cmd.wr_count * cmd.wqe_size + 2434 sg_ind * sizeof (struct ib_sge), 2435 next->num_sge * sizeof (struct ib_sge))) { 2436 ret = -EFAULT; 2437 goto out_put; 2438 } 2439 sg_ind += next->num_sge; 2440 } else 2441 next->sg_list = NULL; 2442 } 2443 2444 resp.bad_wr = 0; 2445 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2446 if (ret) 2447 for (next = wr; next; next = next->next) { 2448 ++resp.bad_wr; 2449 if (next == bad_wr) 2450 break; 2451 } 2452 2453 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2454 &resp, sizeof resp)) 2455 ret = -EFAULT; 2456 2457 out_put: 2458 put_qp_read(qp); 2459 2460 while (wr) { 2461 if (is_ud && wr->wr.ud.ah) 2462 put_ah_read(wr->wr.ud.ah); 2463 next = wr->next; 2464 kfree(wr); 2465 wr = next; 2466 } 2467 2468 out: 2469 kfree(user_wr); 2470 2471 return ret ? ret : in_len; 2472 } 2473 2474 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2475 int in_len, 2476 u32 wr_count, 2477 u32 sge_count, 2478 u32 wqe_size) 2479 { 2480 struct ib_uverbs_recv_wr *user_wr; 2481 struct ib_recv_wr *wr = NULL, *last, *next; 2482 int sg_ind; 2483 int i; 2484 int ret; 2485 2486 if (in_len < wqe_size * wr_count + 2487 sge_count * sizeof (struct ib_uverbs_sge)) 2488 return ERR_PTR(-EINVAL); 2489 2490 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2491 return ERR_PTR(-EINVAL); 2492 2493 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2494 if (!user_wr) 2495 return ERR_PTR(-ENOMEM); 2496 2497 sg_ind = 0; 2498 last = NULL; 2499 for (i = 0; i < wr_count; ++i) { 2500 if (copy_from_user(user_wr, buf + i * wqe_size, 2501 wqe_size)) { 2502 ret = -EFAULT; 2503 goto err; 2504 } 2505 2506 if (user_wr->num_sge + sg_ind > sge_count) { 2507 ret = -EINVAL; 2508 goto err; 2509 } 2510 2511 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2512 user_wr->num_sge * sizeof (struct ib_sge), 2513 GFP_KERNEL); 2514 if (!next) { 2515 ret = -ENOMEM; 2516 goto err; 2517 } 2518 2519 if (!last) 2520 wr = next; 2521 else 2522 last->next = next; 2523 last = next; 2524 2525 next->next = NULL; 2526 next->wr_id = user_wr->wr_id; 2527 next->num_sge = user_wr->num_sge; 2528 2529 if (next->num_sge) { 2530 next->sg_list = (void *) next + 2531 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2532 if (copy_from_user(next->sg_list, 2533 buf + wr_count * wqe_size + 2534 sg_ind * sizeof (struct ib_sge), 2535 next->num_sge * sizeof (struct ib_sge))) { 2536 ret = -EFAULT; 2537 goto err; 2538 } 2539 sg_ind += next->num_sge; 2540 } else 2541 next->sg_list = NULL; 2542 } 2543 2544 kfree(user_wr); 2545 return wr; 2546 2547 err: 2548 kfree(user_wr); 2549 2550 while (wr) { 2551 next = wr->next; 2552 kfree(wr); 2553 wr = next; 2554 } 2555 2556 return ERR_PTR(ret); 2557 } 2558 2559 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2560 struct ib_device *ib_dev, 2561 const char __user *buf, int in_len, 2562 int out_len) 2563 { 2564 struct ib_uverbs_post_recv cmd; 2565 struct ib_uverbs_post_recv_resp resp; 2566 struct ib_recv_wr *wr, *next, *bad_wr; 2567 struct ib_qp *qp; 2568 ssize_t ret = -EINVAL; 2569 2570 if (copy_from_user(&cmd, buf, sizeof cmd)) 2571 return -EFAULT; 2572 2573 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2574 in_len - sizeof cmd, cmd.wr_count, 2575 cmd.sge_count, cmd.wqe_size); 2576 if (IS_ERR(wr)) 2577 return PTR_ERR(wr); 2578 2579 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2580 if (!qp) 2581 goto out; 2582 2583 resp.bad_wr = 0; 2584 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2585 2586 put_qp_read(qp); 2587 2588 if (ret) 2589 for (next = wr; next; next = next->next) { 2590 ++resp.bad_wr; 2591 if (next == bad_wr) 2592 break; 2593 } 2594 2595 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2596 &resp, sizeof resp)) 2597 ret = -EFAULT; 2598 2599 out: 2600 while (wr) { 2601 next = wr->next; 2602 kfree(wr); 2603 wr = next; 2604 } 2605 2606 return ret ? ret : in_len; 2607 } 2608 2609 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2610 struct ib_device *ib_dev, 2611 const char __user *buf, int in_len, 2612 int out_len) 2613 { 2614 struct ib_uverbs_post_srq_recv cmd; 2615 struct ib_uverbs_post_srq_recv_resp resp; 2616 struct ib_recv_wr *wr, *next, *bad_wr; 2617 struct ib_srq *srq; 2618 ssize_t ret = -EINVAL; 2619 2620 if (copy_from_user(&cmd, buf, sizeof cmd)) 2621 return -EFAULT; 2622 2623 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2624 in_len - sizeof cmd, cmd.wr_count, 2625 cmd.sge_count, cmd.wqe_size); 2626 if (IS_ERR(wr)) 2627 return PTR_ERR(wr); 2628 2629 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2630 if (!srq) 2631 goto out; 2632 2633 resp.bad_wr = 0; 2634 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2635 2636 put_srq_read(srq); 2637 2638 if (ret) 2639 for (next = wr; next; next = next->next) { 2640 ++resp.bad_wr; 2641 if (next == bad_wr) 2642 break; 2643 } 2644 2645 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2646 &resp, sizeof resp)) 2647 ret = -EFAULT; 2648 2649 out: 2650 while (wr) { 2651 next = wr->next; 2652 kfree(wr); 2653 wr = next; 2654 } 2655 2656 return ret ? ret : in_len; 2657 } 2658 2659 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2660 struct ib_device *ib_dev, 2661 const char __user *buf, int in_len, 2662 int out_len) 2663 { 2664 struct ib_uverbs_create_ah cmd; 2665 struct ib_uverbs_create_ah_resp resp; 2666 struct ib_uobject *uobj; 2667 struct ib_pd *pd; 2668 struct ib_ah *ah; 2669 struct ib_ah_attr attr; 2670 int ret; 2671 2672 if (out_len < sizeof resp) 2673 return -ENOSPC; 2674 2675 if (copy_from_user(&cmd, buf, sizeof cmd)) 2676 return -EFAULT; 2677 2678 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 2679 if (!uobj) 2680 return -ENOMEM; 2681 2682 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class); 2683 down_write(&uobj->mutex); 2684 2685 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 2686 if (!pd) { 2687 ret = -EINVAL; 2688 goto err; 2689 } 2690 2691 attr.dlid = cmd.attr.dlid; 2692 attr.sl = cmd.attr.sl; 2693 attr.src_path_bits = cmd.attr.src_path_bits; 2694 attr.static_rate = cmd.attr.static_rate; 2695 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 2696 attr.port_num = cmd.attr.port_num; 2697 attr.grh.flow_label = cmd.attr.grh.flow_label; 2698 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 2699 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 2700 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 2701 attr.vlan_id = 0; 2702 memset(&attr.dmac, 0, sizeof(attr.dmac)); 2703 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 2704 2705 ah = ib_create_ah(pd, &attr); 2706 if (IS_ERR(ah)) { 2707 ret = PTR_ERR(ah); 2708 goto err_put; 2709 } 2710 2711 ah->uobject = uobj; 2712 uobj->object = ah; 2713 2714 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 2715 if (ret) 2716 goto err_destroy; 2717 2718 resp.ah_handle = uobj->id; 2719 2720 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2721 &resp, sizeof resp)) { 2722 ret = -EFAULT; 2723 goto err_copy; 2724 } 2725 2726 put_pd_read(pd); 2727 2728 mutex_lock(&file->mutex); 2729 list_add_tail(&uobj->list, &file->ucontext->ah_list); 2730 mutex_unlock(&file->mutex); 2731 2732 uobj->live = 1; 2733 2734 up_write(&uobj->mutex); 2735 2736 return in_len; 2737 2738 err_copy: 2739 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2740 2741 err_destroy: 2742 ib_destroy_ah(ah); 2743 2744 err_put: 2745 put_pd_read(pd); 2746 2747 err: 2748 put_uobj_write(uobj); 2749 return ret; 2750 } 2751 2752 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2753 struct ib_device *ib_dev, 2754 const char __user *buf, int in_len, int out_len) 2755 { 2756 struct ib_uverbs_destroy_ah cmd; 2757 struct ib_ah *ah; 2758 struct ib_uobject *uobj; 2759 int ret; 2760 2761 if (copy_from_user(&cmd, buf, sizeof cmd)) 2762 return -EFAULT; 2763 2764 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 2765 if (!uobj) 2766 return -EINVAL; 2767 ah = uobj->object; 2768 2769 ret = ib_destroy_ah(ah); 2770 if (!ret) 2771 uobj->live = 0; 2772 2773 put_uobj_write(uobj); 2774 2775 if (ret) 2776 return ret; 2777 2778 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2779 2780 mutex_lock(&file->mutex); 2781 list_del(&uobj->list); 2782 mutex_unlock(&file->mutex); 2783 2784 put_uobj(uobj); 2785 2786 return in_len; 2787 } 2788 2789 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2790 struct ib_device *ib_dev, 2791 const char __user *buf, int in_len, 2792 int out_len) 2793 { 2794 struct ib_uverbs_attach_mcast cmd; 2795 struct ib_qp *qp; 2796 struct ib_uqp_object *obj; 2797 struct ib_uverbs_mcast_entry *mcast; 2798 int ret; 2799 2800 if (copy_from_user(&cmd, buf, sizeof cmd)) 2801 return -EFAULT; 2802 2803 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2804 if (!qp) 2805 return -EINVAL; 2806 2807 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2808 2809 list_for_each_entry(mcast, &obj->mcast_list, list) 2810 if (cmd.mlid == mcast->lid && 2811 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2812 ret = 0; 2813 goto out_put; 2814 } 2815 2816 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2817 if (!mcast) { 2818 ret = -ENOMEM; 2819 goto out_put; 2820 } 2821 2822 mcast->lid = cmd.mlid; 2823 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2824 2825 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2826 if (!ret) 2827 list_add_tail(&mcast->list, &obj->mcast_list); 2828 else 2829 kfree(mcast); 2830 2831 out_put: 2832 put_qp_write(qp); 2833 2834 return ret ? ret : in_len; 2835 } 2836 2837 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2838 struct ib_device *ib_dev, 2839 const char __user *buf, int in_len, 2840 int out_len) 2841 { 2842 struct ib_uverbs_detach_mcast cmd; 2843 struct ib_uqp_object *obj; 2844 struct ib_qp *qp; 2845 struct ib_uverbs_mcast_entry *mcast; 2846 int ret = -EINVAL; 2847 2848 if (copy_from_user(&cmd, buf, sizeof cmd)) 2849 return -EFAULT; 2850 2851 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2852 if (!qp) 2853 return -EINVAL; 2854 2855 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 2856 if (ret) 2857 goto out_put; 2858 2859 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2860 2861 list_for_each_entry(mcast, &obj->mcast_list, list) 2862 if (cmd.mlid == mcast->lid && 2863 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2864 list_del(&mcast->list); 2865 kfree(mcast); 2866 break; 2867 } 2868 2869 out_put: 2870 put_qp_write(qp); 2871 2872 return ret ? ret : in_len; 2873 } 2874 2875 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 2876 union ib_flow_spec *ib_spec) 2877 { 2878 if (kern_spec->reserved) 2879 return -EINVAL; 2880 2881 ib_spec->type = kern_spec->type; 2882 2883 switch (ib_spec->type) { 2884 case IB_FLOW_SPEC_ETH: 2885 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth); 2886 if (ib_spec->eth.size != kern_spec->eth.size) 2887 return -EINVAL; 2888 memcpy(&ib_spec->eth.val, &kern_spec->eth.val, 2889 sizeof(struct ib_flow_eth_filter)); 2890 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask, 2891 sizeof(struct ib_flow_eth_filter)); 2892 break; 2893 case IB_FLOW_SPEC_IPV4: 2894 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4); 2895 if (ib_spec->ipv4.size != kern_spec->ipv4.size) 2896 return -EINVAL; 2897 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val, 2898 sizeof(struct ib_flow_ipv4_filter)); 2899 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask, 2900 sizeof(struct ib_flow_ipv4_filter)); 2901 break; 2902 case IB_FLOW_SPEC_TCP: 2903 case IB_FLOW_SPEC_UDP: 2904 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp); 2905 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size) 2906 return -EINVAL; 2907 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val, 2908 sizeof(struct ib_flow_tcp_udp_filter)); 2909 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask, 2910 sizeof(struct ib_flow_tcp_udp_filter)); 2911 break; 2912 default: 2913 return -EINVAL; 2914 } 2915 return 0; 2916 } 2917 2918 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 2919 struct ib_device *ib_dev, 2920 struct ib_udata *ucore, 2921 struct ib_udata *uhw) 2922 { 2923 struct ib_uverbs_create_flow cmd; 2924 struct ib_uverbs_create_flow_resp resp; 2925 struct ib_uobject *uobj; 2926 struct ib_flow *flow_id; 2927 struct ib_uverbs_flow_attr *kern_flow_attr; 2928 struct ib_flow_attr *flow_attr; 2929 struct ib_qp *qp; 2930 int err = 0; 2931 void *kern_spec; 2932 void *ib_spec; 2933 int i; 2934 2935 if (ucore->inlen < sizeof(cmd)) 2936 return -EINVAL; 2937 2938 if (ucore->outlen < sizeof(resp)) 2939 return -ENOSPC; 2940 2941 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 2942 if (err) 2943 return err; 2944 2945 ucore->inbuf += sizeof(cmd); 2946 ucore->inlen -= sizeof(cmd); 2947 2948 if (cmd.comp_mask) 2949 return -EINVAL; 2950 2951 if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER && 2952 !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW)) 2953 return -EPERM; 2954 2955 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 2956 return -EINVAL; 2957 2958 if (cmd.flow_attr.size > ucore->inlen || 2959 cmd.flow_attr.size > 2960 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 2961 return -EINVAL; 2962 2963 if (cmd.flow_attr.reserved[0] || 2964 cmd.flow_attr.reserved[1]) 2965 return -EINVAL; 2966 2967 if (cmd.flow_attr.num_of_specs) { 2968 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 2969 GFP_KERNEL); 2970 if (!kern_flow_attr) 2971 return -ENOMEM; 2972 2973 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 2974 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 2975 cmd.flow_attr.size); 2976 if (err) 2977 goto err_free_attr; 2978 } else { 2979 kern_flow_attr = &cmd.flow_attr; 2980 } 2981 2982 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 2983 if (!uobj) { 2984 err = -ENOMEM; 2985 goto err_free_attr; 2986 } 2987 init_uobj(uobj, 0, file->ucontext, &rule_lock_class); 2988 down_write(&uobj->mutex); 2989 2990 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2991 if (!qp) { 2992 err = -EINVAL; 2993 goto err_uobj; 2994 } 2995 2996 flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL); 2997 if (!flow_attr) { 2998 err = -ENOMEM; 2999 goto err_put; 3000 } 3001 3002 flow_attr->type = kern_flow_attr->type; 3003 flow_attr->priority = kern_flow_attr->priority; 3004 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 3005 flow_attr->port = kern_flow_attr->port; 3006 flow_attr->flags = kern_flow_attr->flags; 3007 flow_attr->size = sizeof(*flow_attr); 3008 3009 kern_spec = kern_flow_attr + 1; 3010 ib_spec = flow_attr + 1; 3011 for (i = 0; i < flow_attr->num_of_specs && 3012 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3013 cmd.flow_attr.size >= 3014 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3015 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 3016 if (err) 3017 goto err_free; 3018 flow_attr->size += 3019 ((union ib_flow_spec *) ib_spec)->size; 3020 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3021 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3022 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 3023 } 3024 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3025 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 3026 i, cmd.flow_attr.size); 3027 err = -EINVAL; 3028 goto err_free; 3029 } 3030 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 3031 if (IS_ERR(flow_id)) { 3032 err = PTR_ERR(flow_id); 3033 goto err_free; 3034 } 3035 flow_id->qp = qp; 3036 flow_id->uobject = uobj; 3037 uobj->object = flow_id; 3038 3039 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj); 3040 if (err) 3041 goto destroy_flow; 3042 3043 memset(&resp, 0, sizeof(resp)); 3044 resp.flow_handle = uobj->id; 3045 3046 err = ib_copy_to_udata(ucore, 3047 &resp, sizeof(resp)); 3048 if (err) 3049 goto err_copy; 3050 3051 put_qp_read(qp); 3052 mutex_lock(&file->mutex); 3053 list_add_tail(&uobj->list, &file->ucontext->rule_list); 3054 mutex_unlock(&file->mutex); 3055 3056 uobj->live = 1; 3057 3058 up_write(&uobj->mutex); 3059 kfree(flow_attr); 3060 if (cmd.flow_attr.num_of_specs) 3061 kfree(kern_flow_attr); 3062 return 0; 3063 err_copy: 3064 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 3065 destroy_flow: 3066 ib_destroy_flow(flow_id); 3067 err_free: 3068 kfree(flow_attr); 3069 err_put: 3070 put_qp_read(qp); 3071 err_uobj: 3072 put_uobj_write(uobj); 3073 err_free_attr: 3074 if (cmd.flow_attr.num_of_specs) 3075 kfree(kern_flow_attr); 3076 return err; 3077 } 3078 3079 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3080 struct ib_device *ib_dev, 3081 struct ib_udata *ucore, 3082 struct ib_udata *uhw) 3083 { 3084 struct ib_uverbs_destroy_flow cmd; 3085 struct ib_flow *flow_id; 3086 struct ib_uobject *uobj; 3087 int ret; 3088 3089 if (ucore->inlen < sizeof(cmd)) 3090 return -EINVAL; 3091 3092 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3093 if (ret) 3094 return ret; 3095 3096 if (cmd.comp_mask) 3097 return -EINVAL; 3098 3099 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, 3100 file->ucontext); 3101 if (!uobj) 3102 return -EINVAL; 3103 flow_id = uobj->object; 3104 3105 ret = ib_destroy_flow(flow_id); 3106 if (!ret) 3107 uobj->live = 0; 3108 3109 put_uobj_write(uobj); 3110 3111 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 3112 3113 mutex_lock(&file->mutex); 3114 list_del(&uobj->list); 3115 mutex_unlock(&file->mutex); 3116 3117 put_uobj(uobj); 3118 3119 return ret; 3120 } 3121 3122 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3123 struct ib_device *ib_dev, 3124 struct ib_uverbs_create_xsrq *cmd, 3125 struct ib_udata *udata) 3126 { 3127 struct ib_uverbs_create_srq_resp resp; 3128 struct ib_usrq_object *obj; 3129 struct ib_pd *pd; 3130 struct ib_srq *srq; 3131 struct ib_uobject *uninitialized_var(xrcd_uobj); 3132 struct ib_srq_init_attr attr; 3133 int ret; 3134 3135 obj = kmalloc(sizeof *obj, GFP_KERNEL); 3136 if (!obj) 3137 return -ENOMEM; 3138 3139 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class); 3140 down_write(&obj->uevent.uobject.mutex); 3141 3142 if (cmd->srq_type == IB_SRQT_XRC) { 3143 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); 3144 if (!attr.ext.xrc.xrcd) { 3145 ret = -EINVAL; 3146 goto err; 3147 } 3148 3149 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3150 atomic_inc(&obj->uxrcd->refcnt); 3151 3152 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); 3153 if (!attr.ext.xrc.cq) { 3154 ret = -EINVAL; 3155 goto err_put_xrcd; 3156 } 3157 } 3158 3159 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 3160 if (!pd) { 3161 ret = -EINVAL; 3162 goto err_put_cq; 3163 } 3164 3165 attr.event_handler = ib_uverbs_srq_event_handler; 3166 attr.srq_context = file; 3167 attr.srq_type = cmd->srq_type; 3168 attr.attr.max_wr = cmd->max_wr; 3169 attr.attr.max_sge = cmd->max_sge; 3170 attr.attr.srq_limit = cmd->srq_limit; 3171 3172 obj->uevent.events_reported = 0; 3173 INIT_LIST_HEAD(&obj->uevent.event_list); 3174 3175 srq = pd->device->create_srq(pd, &attr, udata); 3176 if (IS_ERR(srq)) { 3177 ret = PTR_ERR(srq); 3178 goto err_put; 3179 } 3180 3181 srq->device = pd->device; 3182 srq->pd = pd; 3183 srq->srq_type = cmd->srq_type; 3184 srq->uobject = &obj->uevent.uobject; 3185 srq->event_handler = attr.event_handler; 3186 srq->srq_context = attr.srq_context; 3187 3188 if (cmd->srq_type == IB_SRQT_XRC) { 3189 srq->ext.xrc.cq = attr.ext.xrc.cq; 3190 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3191 atomic_inc(&attr.ext.xrc.cq->usecnt); 3192 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3193 } 3194 3195 atomic_inc(&pd->usecnt); 3196 atomic_set(&srq->usecnt, 0); 3197 3198 obj->uevent.uobject.object = srq; 3199 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3200 if (ret) 3201 goto err_destroy; 3202 3203 memset(&resp, 0, sizeof resp); 3204 resp.srq_handle = obj->uevent.uobject.id; 3205 resp.max_wr = attr.attr.max_wr; 3206 resp.max_sge = attr.attr.max_sge; 3207 if (cmd->srq_type == IB_SRQT_XRC) 3208 resp.srqn = srq->ext.xrc.srq_num; 3209 3210 if (copy_to_user((void __user *) (unsigned long) cmd->response, 3211 &resp, sizeof resp)) { 3212 ret = -EFAULT; 3213 goto err_copy; 3214 } 3215 3216 if (cmd->srq_type == IB_SRQT_XRC) { 3217 put_uobj_read(xrcd_uobj); 3218 put_cq_read(attr.ext.xrc.cq); 3219 } 3220 put_pd_read(pd); 3221 3222 mutex_lock(&file->mutex); 3223 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); 3224 mutex_unlock(&file->mutex); 3225 3226 obj->uevent.uobject.live = 1; 3227 3228 up_write(&obj->uevent.uobject.mutex); 3229 3230 return 0; 3231 3232 err_copy: 3233 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3234 3235 err_destroy: 3236 ib_destroy_srq(srq); 3237 3238 err_put: 3239 put_pd_read(pd); 3240 3241 err_put_cq: 3242 if (cmd->srq_type == IB_SRQT_XRC) 3243 put_cq_read(attr.ext.xrc.cq); 3244 3245 err_put_xrcd: 3246 if (cmd->srq_type == IB_SRQT_XRC) { 3247 atomic_dec(&obj->uxrcd->refcnt); 3248 put_uobj_read(xrcd_uobj); 3249 } 3250 3251 err: 3252 put_uobj_write(&obj->uevent.uobject); 3253 return ret; 3254 } 3255 3256 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3257 struct ib_device *ib_dev, 3258 const char __user *buf, int in_len, 3259 int out_len) 3260 { 3261 struct ib_uverbs_create_srq cmd; 3262 struct ib_uverbs_create_xsrq xcmd; 3263 struct ib_uverbs_create_srq_resp resp; 3264 struct ib_udata udata; 3265 int ret; 3266 3267 if (out_len < sizeof resp) 3268 return -ENOSPC; 3269 3270 if (copy_from_user(&cmd, buf, sizeof cmd)) 3271 return -EFAULT; 3272 3273 xcmd.response = cmd.response; 3274 xcmd.user_handle = cmd.user_handle; 3275 xcmd.srq_type = IB_SRQT_BASIC; 3276 xcmd.pd_handle = cmd.pd_handle; 3277 xcmd.max_wr = cmd.max_wr; 3278 xcmd.max_sge = cmd.max_sge; 3279 xcmd.srq_limit = cmd.srq_limit; 3280 3281 INIT_UDATA(&udata, buf + sizeof cmd, 3282 (unsigned long) cmd.response + sizeof resp, 3283 in_len - sizeof cmd, out_len - sizeof resp); 3284 3285 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 3286 if (ret) 3287 return ret; 3288 3289 return in_len; 3290 } 3291 3292 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 3293 struct ib_device *ib_dev, 3294 const char __user *buf, int in_len, int out_len) 3295 { 3296 struct ib_uverbs_create_xsrq cmd; 3297 struct ib_uverbs_create_srq_resp resp; 3298 struct ib_udata udata; 3299 int ret; 3300 3301 if (out_len < sizeof resp) 3302 return -ENOSPC; 3303 3304 if (copy_from_user(&cmd, buf, sizeof cmd)) 3305 return -EFAULT; 3306 3307 INIT_UDATA(&udata, buf + sizeof cmd, 3308 (unsigned long) cmd.response + sizeof resp, 3309 in_len - sizeof cmd, out_len - sizeof resp); 3310 3311 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 3312 if (ret) 3313 return ret; 3314 3315 return in_len; 3316 } 3317 3318 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3319 struct ib_device *ib_dev, 3320 const char __user *buf, int in_len, 3321 int out_len) 3322 { 3323 struct ib_uverbs_modify_srq cmd; 3324 struct ib_udata udata; 3325 struct ib_srq *srq; 3326 struct ib_srq_attr attr; 3327 int ret; 3328 3329 if (copy_from_user(&cmd, buf, sizeof cmd)) 3330 return -EFAULT; 3331 3332 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3333 out_len); 3334 3335 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 3336 if (!srq) 3337 return -EINVAL; 3338 3339 attr.max_wr = cmd.max_wr; 3340 attr.srq_limit = cmd.srq_limit; 3341 3342 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 3343 3344 put_srq_read(srq); 3345 3346 return ret ? ret : in_len; 3347 } 3348 3349 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 3350 struct ib_device *ib_dev, 3351 const char __user *buf, 3352 int in_len, int out_len) 3353 { 3354 struct ib_uverbs_query_srq cmd; 3355 struct ib_uverbs_query_srq_resp resp; 3356 struct ib_srq_attr attr; 3357 struct ib_srq *srq; 3358 int ret; 3359 3360 if (out_len < sizeof resp) 3361 return -ENOSPC; 3362 3363 if (copy_from_user(&cmd, buf, sizeof cmd)) 3364 return -EFAULT; 3365 3366 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 3367 if (!srq) 3368 return -EINVAL; 3369 3370 ret = ib_query_srq(srq, &attr); 3371 3372 put_srq_read(srq); 3373 3374 if (ret) 3375 return ret; 3376 3377 memset(&resp, 0, sizeof resp); 3378 3379 resp.max_wr = attr.max_wr; 3380 resp.max_sge = attr.max_sge; 3381 resp.srq_limit = attr.srq_limit; 3382 3383 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3384 &resp, sizeof resp)) 3385 return -EFAULT; 3386 3387 return in_len; 3388 } 3389 3390 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 3391 struct ib_device *ib_dev, 3392 const char __user *buf, int in_len, 3393 int out_len) 3394 { 3395 struct ib_uverbs_destroy_srq cmd; 3396 struct ib_uverbs_destroy_srq_resp resp; 3397 struct ib_uobject *uobj; 3398 struct ib_srq *srq; 3399 struct ib_uevent_object *obj; 3400 int ret = -EINVAL; 3401 struct ib_usrq_object *us; 3402 enum ib_srq_type srq_type; 3403 3404 if (copy_from_user(&cmd, buf, sizeof cmd)) 3405 return -EFAULT; 3406 3407 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 3408 if (!uobj) 3409 return -EINVAL; 3410 srq = uobj->object; 3411 obj = container_of(uobj, struct ib_uevent_object, uobject); 3412 srq_type = srq->srq_type; 3413 3414 ret = ib_destroy_srq(srq); 3415 if (!ret) 3416 uobj->live = 0; 3417 3418 put_uobj_write(uobj); 3419 3420 if (ret) 3421 return ret; 3422 3423 if (srq_type == IB_SRQT_XRC) { 3424 us = container_of(obj, struct ib_usrq_object, uevent); 3425 atomic_dec(&us->uxrcd->refcnt); 3426 } 3427 3428 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 3429 3430 mutex_lock(&file->mutex); 3431 list_del(&uobj->list); 3432 mutex_unlock(&file->mutex); 3433 3434 ib_uverbs_release_uevent(file, obj); 3435 3436 memset(&resp, 0, sizeof resp); 3437 resp.events_reported = obj->events_reported; 3438 3439 put_uobj(uobj); 3440 3441 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3442 &resp, sizeof resp)) 3443 ret = -EFAULT; 3444 3445 return ret ? ret : in_len; 3446 } 3447 3448 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 3449 struct ib_device *ib_dev, 3450 struct ib_udata *ucore, 3451 struct ib_udata *uhw) 3452 { 3453 struct ib_uverbs_ex_query_device_resp resp; 3454 struct ib_uverbs_ex_query_device cmd; 3455 struct ib_device_attr attr; 3456 int err; 3457 3458 if (ucore->inlen < sizeof(cmd)) 3459 return -EINVAL; 3460 3461 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3462 if (err) 3463 return err; 3464 3465 if (cmd.comp_mask) 3466 return -EINVAL; 3467 3468 if (cmd.reserved) 3469 return -EINVAL; 3470 3471 resp.response_length = offsetof(typeof(resp), odp_caps); 3472 3473 if (ucore->outlen < resp.response_length) 3474 return -ENOSPC; 3475 3476 memset(&attr, 0, sizeof(attr)); 3477 3478 err = ib_dev->query_device(ib_dev, &attr, uhw); 3479 if (err) 3480 return err; 3481 3482 copy_query_dev_fields(file, ib_dev, &resp.base, &attr); 3483 resp.comp_mask = 0; 3484 3485 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 3486 goto end; 3487 3488 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 3489 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 3490 resp.odp_caps.per_transport_caps.rc_odp_caps = 3491 attr.odp_caps.per_transport_caps.rc_odp_caps; 3492 resp.odp_caps.per_transport_caps.uc_odp_caps = 3493 attr.odp_caps.per_transport_caps.uc_odp_caps; 3494 resp.odp_caps.per_transport_caps.ud_odp_caps = 3495 attr.odp_caps.per_transport_caps.ud_odp_caps; 3496 resp.odp_caps.reserved = 0; 3497 #else 3498 memset(&resp.odp_caps, 0, sizeof(resp.odp_caps)); 3499 #endif 3500 resp.response_length += sizeof(resp.odp_caps); 3501 3502 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 3503 goto end; 3504 3505 resp.timestamp_mask = attr.timestamp_mask; 3506 resp.response_length += sizeof(resp.timestamp_mask); 3507 3508 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 3509 goto end; 3510 3511 resp.hca_core_clock = attr.hca_core_clock; 3512 resp.response_length += sizeof(resp.hca_core_clock); 3513 3514 end: 3515 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 3516 if (err) 3517 return err; 3518 3519 return 0; 3520 } 3521