1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 #include <linux/sched.h> 40 41 #include <asm/uaccess.h> 42 43 #include "uverbs.h" 44 #include "core_priv.h" 45 46 struct uverbs_lock_class { 47 struct lock_class_key key; 48 char name[16]; 49 }; 50 51 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; 52 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; 53 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" }; 54 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; 55 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; 56 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 57 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 58 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 59 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; 60 61 /* 62 * The ib_uobject locking scheme is as follows: 63 * 64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 65 * needs to be held during all idr write operations. When an object is 66 * looked up, a reference must be taken on the object's kref before 67 * dropping this lock. For read operations, the rcu_read_lock() 68 * and rcu_write_lock() but similarly the kref reference is grabbed 69 * before the rcu_read_unlock(). 70 * 71 * - Each object also has an rwsem. This rwsem must be held for 72 * reading while an operation that uses the object is performed. 73 * For example, while registering an MR, the associated PD's 74 * uobject.mutex must be held for reading. The rwsem must be held 75 * for writing while initializing or destroying an object. 76 * 77 * - In addition, each object has a "live" flag. If this flag is not 78 * set, then lookups of the object will fail even if it is found in 79 * the idr. This handles a reader that blocks and does not acquire 80 * the rwsem until after the object is destroyed. The destroy 81 * operation will set the live flag to 0 and then drop the rwsem; 82 * this will allow the reader to acquire the rwsem, see that the 83 * live flag is 0, and then drop the rwsem and its reference to 84 * object. The underlying storage will not be freed until the last 85 * reference to the object is dropped. 86 */ 87 88 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 89 struct ib_ucontext *context, struct uverbs_lock_class *c) 90 { 91 uobj->user_handle = user_handle; 92 uobj->context = context; 93 kref_init(&uobj->ref); 94 init_rwsem(&uobj->mutex); 95 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name); 96 uobj->live = 0; 97 } 98 99 static void release_uobj(struct kref *kref) 100 { 101 kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu); 102 } 103 104 static void put_uobj(struct ib_uobject *uobj) 105 { 106 kref_put(&uobj->ref, release_uobj); 107 } 108 109 static void put_uobj_read(struct ib_uobject *uobj) 110 { 111 up_read(&uobj->mutex); 112 put_uobj(uobj); 113 } 114 115 static void put_uobj_write(struct ib_uobject *uobj) 116 { 117 up_write(&uobj->mutex); 118 put_uobj(uobj); 119 } 120 121 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 122 { 123 int ret; 124 125 idr_preload(GFP_KERNEL); 126 spin_lock(&ib_uverbs_idr_lock); 127 128 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT); 129 if (ret >= 0) 130 uobj->id = ret; 131 132 spin_unlock(&ib_uverbs_idr_lock); 133 idr_preload_end(); 134 135 return ret < 0 ? ret : 0; 136 } 137 138 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 139 { 140 spin_lock(&ib_uverbs_idr_lock); 141 idr_remove(idr, uobj->id); 142 spin_unlock(&ib_uverbs_idr_lock); 143 } 144 145 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 146 struct ib_ucontext *context) 147 { 148 struct ib_uobject *uobj; 149 150 rcu_read_lock(); 151 uobj = idr_find(idr, id); 152 if (uobj) { 153 if (uobj->context == context) 154 kref_get(&uobj->ref); 155 else 156 uobj = NULL; 157 } 158 rcu_read_unlock(); 159 160 return uobj; 161 } 162 163 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 164 struct ib_ucontext *context, int nested) 165 { 166 struct ib_uobject *uobj; 167 168 uobj = __idr_get_uobj(idr, id, context); 169 if (!uobj) 170 return NULL; 171 172 if (nested) 173 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 174 else 175 down_read(&uobj->mutex); 176 if (!uobj->live) { 177 put_uobj_read(uobj); 178 return NULL; 179 } 180 181 return uobj; 182 } 183 184 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 185 struct ib_ucontext *context) 186 { 187 struct ib_uobject *uobj; 188 189 uobj = __idr_get_uobj(idr, id, context); 190 if (!uobj) 191 return NULL; 192 193 down_write(&uobj->mutex); 194 if (!uobj->live) { 195 put_uobj_write(uobj); 196 return NULL; 197 } 198 199 return uobj; 200 } 201 202 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 203 int nested) 204 { 205 struct ib_uobject *uobj; 206 207 uobj = idr_read_uobj(idr, id, context, nested); 208 return uobj ? uobj->object : NULL; 209 } 210 211 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 212 { 213 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 214 } 215 216 static void put_pd_read(struct ib_pd *pd) 217 { 218 put_uobj_read(pd->uobject); 219 } 220 221 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 222 { 223 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 224 } 225 226 static void put_cq_read(struct ib_cq *cq) 227 { 228 put_uobj_read(cq->uobject); 229 } 230 231 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 232 { 233 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 234 } 235 236 static void put_ah_read(struct ib_ah *ah) 237 { 238 put_uobj_read(ah->uobject); 239 } 240 241 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 242 { 243 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 244 } 245 246 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) 247 { 248 struct ib_uobject *uobj; 249 250 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context); 251 return uobj ? uobj->object : NULL; 252 } 253 254 static void put_qp_read(struct ib_qp *qp) 255 { 256 put_uobj_read(qp->uobject); 257 } 258 259 static void put_qp_write(struct ib_qp *qp) 260 { 261 put_uobj_write(qp->uobject); 262 } 263 264 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 265 { 266 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 267 } 268 269 static void put_srq_read(struct ib_srq *srq) 270 { 271 put_uobj_read(srq->uobject); 272 } 273 274 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, 275 struct ib_uobject **uobj) 276 { 277 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); 278 return *uobj ? (*uobj)->object : NULL; 279 } 280 281 static void put_xrcd_read(struct ib_uobject *uobj) 282 { 283 put_uobj_read(uobj); 284 } 285 286 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 287 struct ib_device *ib_dev, 288 const char __user *buf, 289 int in_len, int out_len) 290 { 291 struct ib_uverbs_get_context cmd; 292 struct ib_uverbs_get_context_resp resp; 293 struct ib_udata udata; 294 struct ib_ucontext *ucontext; 295 struct file *filp; 296 int ret; 297 298 if (out_len < sizeof resp) 299 return -ENOSPC; 300 301 if (copy_from_user(&cmd, buf, sizeof cmd)) 302 return -EFAULT; 303 304 mutex_lock(&file->mutex); 305 306 if (file->ucontext) { 307 ret = -EINVAL; 308 goto err; 309 } 310 311 INIT_UDATA(&udata, buf + sizeof cmd, 312 (unsigned long) cmd.response + sizeof resp, 313 in_len - sizeof cmd, out_len - sizeof resp); 314 315 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); 316 if (IS_ERR(ucontext)) { 317 ret = PTR_ERR(ucontext); 318 goto err; 319 } 320 321 ucontext->device = ib_dev; 322 INIT_LIST_HEAD(&ucontext->pd_list); 323 INIT_LIST_HEAD(&ucontext->mr_list); 324 INIT_LIST_HEAD(&ucontext->mw_list); 325 INIT_LIST_HEAD(&ucontext->cq_list); 326 INIT_LIST_HEAD(&ucontext->qp_list); 327 INIT_LIST_HEAD(&ucontext->srq_list); 328 INIT_LIST_HEAD(&ucontext->ah_list); 329 INIT_LIST_HEAD(&ucontext->xrcd_list); 330 INIT_LIST_HEAD(&ucontext->rule_list); 331 rcu_read_lock(); 332 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 333 rcu_read_unlock(); 334 ucontext->closing = 0; 335 336 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 337 ucontext->umem_tree = RB_ROOT; 338 init_rwsem(&ucontext->umem_rwsem); 339 ucontext->odp_mrs_count = 0; 340 INIT_LIST_HEAD(&ucontext->no_private_counters); 341 342 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 343 ucontext->invalidate_range = NULL; 344 345 #endif 346 347 resp.num_comp_vectors = file->device->num_comp_vectors; 348 349 ret = get_unused_fd_flags(O_CLOEXEC); 350 if (ret < 0) 351 goto err_free; 352 resp.async_fd = ret; 353 354 filp = ib_uverbs_alloc_event_file(file, ib_dev, 1); 355 if (IS_ERR(filp)) { 356 ret = PTR_ERR(filp); 357 goto err_fd; 358 } 359 360 if (copy_to_user((void __user *) (unsigned long) cmd.response, 361 &resp, sizeof resp)) { 362 ret = -EFAULT; 363 goto err_file; 364 } 365 366 file->ucontext = ucontext; 367 368 fd_install(resp.async_fd, filp); 369 370 mutex_unlock(&file->mutex); 371 372 return in_len; 373 374 err_file: 375 ib_uverbs_free_async_event_file(file); 376 fput(filp); 377 378 err_fd: 379 put_unused_fd(resp.async_fd); 380 381 err_free: 382 put_pid(ucontext->tgid); 383 ib_dev->dealloc_ucontext(ucontext); 384 385 err: 386 mutex_unlock(&file->mutex); 387 return ret; 388 } 389 390 static void copy_query_dev_fields(struct ib_uverbs_file *file, 391 struct ib_device *ib_dev, 392 struct ib_uverbs_query_device_resp *resp, 393 struct ib_device_attr *attr) 394 { 395 resp->fw_ver = attr->fw_ver; 396 resp->node_guid = ib_dev->node_guid; 397 resp->sys_image_guid = attr->sys_image_guid; 398 resp->max_mr_size = attr->max_mr_size; 399 resp->page_size_cap = attr->page_size_cap; 400 resp->vendor_id = attr->vendor_id; 401 resp->vendor_part_id = attr->vendor_part_id; 402 resp->hw_ver = attr->hw_ver; 403 resp->max_qp = attr->max_qp; 404 resp->max_qp_wr = attr->max_qp_wr; 405 resp->device_cap_flags = attr->device_cap_flags; 406 resp->max_sge = attr->max_sge; 407 resp->max_sge_rd = attr->max_sge_rd; 408 resp->max_cq = attr->max_cq; 409 resp->max_cqe = attr->max_cqe; 410 resp->max_mr = attr->max_mr; 411 resp->max_pd = attr->max_pd; 412 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 413 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 414 resp->max_res_rd_atom = attr->max_res_rd_atom; 415 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 416 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 417 resp->atomic_cap = attr->atomic_cap; 418 resp->max_ee = attr->max_ee; 419 resp->max_rdd = attr->max_rdd; 420 resp->max_mw = attr->max_mw; 421 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 422 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 423 resp->max_mcast_grp = attr->max_mcast_grp; 424 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 425 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 426 resp->max_ah = attr->max_ah; 427 resp->max_fmr = attr->max_fmr; 428 resp->max_map_per_fmr = attr->max_map_per_fmr; 429 resp->max_srq = attr->max_srq; 430 resp->max_srq_wr = attr->max_srq_wr; 431 resp->max_srq_sge = attr->max_srq_sge; 432 resp->max_pkeys = attr->max_pkeys; 433 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 434 resp->phys_port_cnt = ib_dev->phys_port_cnt; 435 } 436 437 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 438 struct ib_device *ib_dev, 439 const char __user *buf, 440 int in_len, int out_len) 441 { 442 struct ib_uverbs_query_device cmd; 443 struct ib_uverbs_query_device_resp resp; 444 445 if (out_len < sizeof resp) 446 return -ENOSPC; 447 448 if (copy_from_user(&cmd, buf, sizeof cmd)) 449 return -EFAULT; 450 451 memset(&resp, 0, sizeof resp); 452 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs); 453 454 if (copy_to_user((void __user *) (unsigned long) cmd.response, 455 &resp, sizeof resp)) 456 return -EFAULT; 457 458 return in_len; 459 } 460 461 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 462 struct ib_device *ib_dev, 463 const char __user *buf, 464 int in_len, int out_len) 465 { 466 struct ib_uverbs_query_port cmd; 467 struct ib_uverbs_query_port_resp resp; 468 struct ib_port_attr attr; 469 int ret; 470 471 if (out_len < sizeof resp) 472 return -ENOSPC; 473 474 if (copy_from_user(&cmd, buf, sizeof cmd)) 475 return -EFAULT; 476 477 ret = ib_query_port(ib_dev, cmd.port_num, &attr); 478 if (ret) 479 return ret; 480 481 memset(&resp, 0, sizeof resp); 482 483 resp.state = attr.state; 484 resp.max_mtu = attr.max_mtu; 485 resp.active_mtu = attr.active_mtu; 486 resp.gid_tbl_len = attr.gid_tbl_len; 487 resp.port_cap_flags = attr.port_cap_flags; 488 resp.max_msg_sz = attr.max_msg_sz; 489 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 490 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 491 resp.pkey_tbl_len = attr.pkey_tbl_len; 492 resp.lid = attr.lid; 493 resp.sm_lid = attr.sm_lid; 494 resp.lmc = attr.lmc; 495 resp.max_vl_num = attr.max_vl_num; 496 resp.sm_sl = attr.sm_sl; 497 resp.subnet_timeout = attr.subnet_timeout; 498 resp.init_type_reply = attr.init_type_reply; 499 resp.active_width = attr.active_width; 500 resp.active_speed = attr.active_speed; 501 resp.phys_state = attr.phys_state; 502 resp.link_layer = rdma_port_get_link_layer(ib_dev, 503 cmd.port_num); 504 505 if (copy_to_user((void __user *) (unsigned long) cmd.response, 506 &resp, sizeof resp)) 507 return -EFAULT; 508 509 return in_len; 510 } 511 512 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 513 struct ib_device *ib_dev, 514 const char __user *buf, 515 int in_len, int out_len) 516 { 517 struct ib_uverbs_alloc_pd cmd; 518 struct ib_uverbs_alloc_pd_resp resp; 519 struct ib_udata udata; 520 struct ib_uobject *uobj; 521 struct ib_pd *pd; 522 int ret; 523 524 if (out_len < sizeof resp) 525 return -ENOSPC; 526 527 if (copy_from_user(&cmd, buf, sizeof cmd)) 528 return -EFAULT; 529 530 INIT_UDATA(&udata, buf + sizeof cmd, 531 (unsigned long) cmd.response + sizeof resp, 532 in_len - sizeof cmd, out_len - sizeof resp); 533 534 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 535 if (!uobj) 536 return -ENOMEM; 537 538 init_uobj(uobj, 0, file->ucontext, &pd_lock_class); 539 down_write(&uobj->mutex); 540 541 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); 542 if (IS_ERR(pd)) { 543 ret = PTR_ERR(pd); 544 goto err; 545 } 546 547 pd->device = ib_dev; 548 pd->uobject = uobj; 549 pd->local_mr = NULL; 550 atomic_set(&pd->usecnt, 0); 551 552 uobj->object = pd; 553 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 554 if (ret) 555 goto err_idr; 556 557 memset(&resp, 0, sizeof resp); 558 resp.pd_handle = uobj->id; 559 560 if (copy_to_user((void __user *) (unsigned long) cmd.response, 561 &resp, sizeof resp)) { 562 ret = -EFAULT; 563 goto err_copy; 564 } 565 566 mutex_lock(&file->mutex); 567 list_add_tail(&uobj->list, &file->ucontext->pd_list); 568 mutex_unlock(&file->mutex); 569 570 uobj->live = 1; 571 572 up_write(&uobj->mutex); 573 574 return in_len; 575 576 err_copy: 577 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 578 579 err_idr: 580 ib_dealloc_pd(pd); 581 582 err: 583 put_uobj_write(uobj); 584 return ret; 585 } 586 587 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 588 struct ib_device *ib_dev, 589 const char __user *buf, 590 int in_len, int out_len) 591 { 592 struct ib_uverbs_dealloc_pd cmd; 593 struct ib_uobject *uobj; 594 struct ib_pd *pd; 595 int ret; 596 597 if (copy_from_user(&cmd, buf, sizeof cmd)) 598 return -EFAULT; 599 600 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 601 if (!uobj) 602 return -EINVAL; 603 pd = uobj->object; 604 605 if (atomic_read(&pd->usecnt)) { 606 ret = -EBUSY; 607 goto err_put; 608 } 609 610 ret = pd->device->dealloc_pd(uobj->object); 611 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); 612 if (ret) 613 goto err_put; 614 615 uobj->live = 0; 616 put_uobj_write(uobj); 617 618 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 619 620 mutex_lock(&file->mutex); 621 list_del(&uobj->list); 622 mutex_unlock(&file->mutex); 623 624 put_uobj(uobj); 625 626 return in_len; 627 628 err_put: 629 put_uobj_write(uobj); 630 return ret; 631 } 632 633 struct xrcd_table_entry { 634 struct rb_node node; 635 struct ib_xrcd *xrcd; 636 struct inode *inode; 637 }; 638 639 static int xrcd_table_insert(struct ib_uverbs_device *dev, 640 struct inode *inode, 641 struct ib_xrcd *xrcd) 642 { 643 struct xrcd_table_entry *entry, *scan; 644 struct rb_node **p = &dev->xrcd_tree.rb_node; 645 struct rb_node *parent = NULL; 646 647 entry = kmalloc(sizeof *entry, GFP_KERNEL); 648 if (!entry) 649 return -ENOMEM; 650 651 entry->xrcd = xrcd; 652 entry->inode = inode; 653 654 while (*p) { 655 parent = *p; 656 scan = rb_entry(parent, struct xrcd_table_entry, node); 657 658 if (inode < scan->inode) { 659 p = &(*p)->rb_left; 660 } else if (inode > scan->inode) { 661 p = &(*p)->rb_right; 662 } else { 663 kfree(entry); 664 return -EEXIST; 665 } 666 } 667 668 rb_link_node(&entry->node, parent, p); 669 rb_insert_color(&entry->node, &dev->xrcd_tree); 670 igrab(inode); 671 return 0; 672 } 673 674 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 675 struct inode *inode) 676 { 677 struct xrcd_table_entry *entry; 678 struct rb_node *p = dev->xrcd_tree.rb_node; 679 680 while (p) { 681 entry = rb_entry(p, struct xrcd_table_entry, node); 682 683 if (inode < entry->inode) 684 p = p->rb_left; 685 else if (inode > entry->inode) 686 p = p->rb_right; 687 else 688 return entry; 689 } 690 691 return NULL; 692 } 693 694 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 695 { 696 struct xrcd_table_entry *entry; 697 698 entry = xrcd_table_search(dev, inode); 699 if (!entry) 700 return NULL; 701 702 return entry->xrcd; 703 } 704 705 static void xrcd_table_delete(struct ib_uverbs_device *dev, 706 struct inode *inode) 707 { 708 struct xrcd_table_entry *entry; 709 710 entry = xrcd_table_search(dev, inode); 711 if (entry) { 712 iput(inode); 713 rb_erase(&entry->node, &dev->xrcd_tree); 714 kfree(entry); 715 } 716 } 717 718 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 719 struct ib_device *ib_dev, 720 const char __user *buf, int in_len, 721 int out_len) 722 { 723 struct ib_uverbs_open_xrcd cmd; 724 struct ib_uverbs_open_xrcd_resp resp; 725 struct ib_udata udata; 726 struct ib_uxrcd_object *obj; 727 struct ib_xrcd *xrcd = NULL; 728 struct fd f = {NULL, 0}; 729 struct inode *inode = NULL; 730 int ret = 0; 731 int new_xrcd = 0; 732 733 if (out_len < sizeof resp) 734 return -ENOSPC; 735 736 if (copy_from_user(&cmd, buf, sizeof cmd)) 737 return -EFAULT; 738 739 INIT_UDATA(&udata, buf + sizeof cmd, 740 (unsigned long) cmd.response + sizeof resp, 741 in_len - sizeof cmd, out_len - sizeof resp); 742 743 mutex_lock(&file->device->xrcd_tree_mutex); 744 745 if (cmd.fd != -1) { 746 /* search for file descriptor */ 747 f = fdget(cmd.fd); 748 if (!f.file) { 749 ret = -EBADF; 750 goto err_tree_mutex_unlock; 751 } 752 753 inode = file_inode(f.file); 754 xrcd = find_xrcd(file->device, inode); 755 if (!xrcd && !(cmd.oflags & O_CREAT)) { 756 /* no file descriptor. Need CREATE flag */ 757 ret = -EAGAIN; 758 goto err_tree_mutex_unlock; 759 } 760 761 if (xrcd && cmd.oflags & O_EXCL) { 762 ret = -EINVAL; 763 goto err_tree_mutex_unlock; 764 } 765 } 766 767 obj = kmalloc(sizeof *obj, GFP_KERNEL); 768 if (!obj) { 769 ret = -ENOMEM; 770 goto err_tree_mutex_unlock; 771 } 772 773 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class); 774 775 down_write(&obj->uobject.mutex); 776 777 if (!xrcd) { 778 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata); 779 if (IS_ERR(xrcd)) { 780 ret = PTR_ERR(xrcd); 781 goto err; 782 } 783 784 xrcd->inode = inode; 785 xrcd->device = ib_dev; 786 atomic_set(&xrcd->usecnt, 0); 787 mutex_init(&xrcd->tgt_qp_mutex); 788 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 789 new_xrcd = 1; 790 } 791 792 atomic_set(&obj->refcnt, 0); 793 obj->uobject.object = xrcd; 794 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 795 if (ret) 796 goto err_idr; 797 798 memset(&resp, 0, sizeof resp); 799 resp.xrcd_handle = obj->uobject.id; 800 801 if (inode) { 802 if (new_xrcd) { 803 /* create new inode/xrcd table entry */ 804 ret = xrcd_table_insert(file->device, inode, xrcd); 805 if (ret) 806 goto err_insert_xrcd; 807 } 808 atomic_inc(&xrcd->usecnt); 809 } 810 811 if (copy_to_user((void __user *) (unsigned long) cmd.response, 812 &resp, sizeof resp)) { 813 ret = -EFAULT; 814 goto err_copy; 815 } 816 817 if (f.file) 818 fdput(f); 819 820 mutex_lock(&file->mutex); 821 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); 822 mutex_unlock(&file->mutex); 823 824 obj->uobject.live = 1; 825 up_write(&obj->uobject.mutex); 826 827 mutex_unlock(&file->device->xrcd_tree_mutex); 828 return in_len; 829 830 err_copy: 831 if (inode) { 832 if (new_xrcd) 833 xrcd_table_delete(file->device, inode); 834 atomic_dec(&xrcd->usecnt); 835 } 836 837 err_insert_xrcd: 838 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 839 840 err_idr: 841 ib_dealloc_xrcd(xrcd); 842 843 err: 844 put_uobj_write(&obj->uobject); 845 846 err_tree_mutex_unlock: 847 if (f.file) 848 fdput(f); 849 850 mutex_unlock(&file->device->xrcd_tree_mutex); 851 852 return ret; 853 } 854 855 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 856 struct ib_device *ib_dev, 857 const char __user *buf, int in_len, 858 int out_len) 859 { 860 struct ib_uverbs_close_xrcd cmd; 861 struct ib_uobject *uobj; 862 struct ib_xrcd *xrcd = NULL; 863 struct inode *inode = NULL; 864 struct ib_uxrcd_object *obj; 865 int live; 866 int ret = 0; 867 868 if (copy_from_user(&cmd, buf, sizeof cmd)) 869 return -EFAULT; 870 871 mutex_lock(&file->device->xrcd_tree_mutex); 872 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); 873 if (!uobj) { 874 ret = -EINVAL; 875 goto out; 876 } 877 878 xrcd = uobj->object; 879 inode = xrcd->inode; 880 obj = container_of(uobj, struct ib_uxrcd_object, uobject); 881 if (atomic_read(&obj->refcnt)) { 882 put_uobj_write(uobj); 883 ret = -EBUSY; 884 goto out; 885 } 886 887 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { 888 ret = ib_dealloc_xrcd(uobj->object); 889 if (!ret) 890 uobj->live = 0; 891 } 892 893 live = uobj->live; 894 if (inode && ret) 895 atomic_inc(&xrcd->usecnt); 896 897 put_uobj_write(uobj); 898 899 if (ret) 900 goto out; 901 902 if (inode && !live) 903 xrcd_table_delete(file->device, inode); 904 905 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); 906 mutex_lock(&file->mutex); 907 list_del(&uobj->list); 908 mutex_unlock(&file->mutex); 909 910 put_uobj(uobj); 911 ret = in_len; 912 913 out: 914 mutex_unlock(&file->device->xrcd_tree_mutex); 915 return ret; 916 } 917 918 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 919 struct ib_xrcd *xrcd) 920 { 921 struct inode *inode; 922 923 inode = xrcd->inode; 924 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 925 return; 926 927 ib_dealloc_xrcd(xrcd); 928 929 if (inode) 930 xrcd_table_delete(dev, inode); 931 } 932 933 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 934 struct ib_device *ib_dev, 935 const char __user *buf, int in_len, 936 int out_len) 937 { 938 struct ib_uverbs_reg_mr cmd; 939 struct ib_uverbs_reg_mr_resp resp; 940 struct ib_udata udata; 941 struct ib_uobject *uobj; 942 struct ib_pd *pd; 943 struct ib_mr *mr; 944 int ret; 945 946 if (out_len < sizeof resp) 947 return -ENOSPC; 948 949 if (copy_from_user(&cmd, buf, sizeof cmd)) 950 return -EFAULT; 951 952 INIT_UDATA(&udata, buf + sizeof cmd, 953 (unsigned long) cmd.response + sizeof resp, 954 in_len - sizeof cmd, out_len - sizeof resp); 955 956 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 957 return -EINVAL; 958 959 ret = ib_check_mr_access(cmd.access_flags); 960 if (ret) 961 return ret; 962 963 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 964 if (!uobj) 965 return -ENOMEM; 966 967 init_uobj(uobj, 0, file->ucontext, &mr_lock_class); 968 down_write(&uobj->mutex); 969 970 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 971 if (!pd) { 972 ret = -EINVAL; 973 goto err_free; 974 } 975 976 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 977 if (!(pd->device->attrs.device_cap_flags & 978 IB_DEVICE_ON_DEMAND_PAGING)) { 979 pr_debug("ODP support not available\n"); 980 ret = -EINVAL; 981 goto err_put; 982 } 983 } 984 985 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 986 cmd.access_flags, &udata); 987 if (IS_ERR(mr)) { 988 ret = PTR_ERR(mr); 989 goto err_put; 990 } 991 992 mr->device = pd->device; 993 mr->pd = pd; 994 mr->uobject = uobj; 995 atomic_inc(&pd->usecnt); 996 997 uobj->object = mr; 998 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 999 if (ret) 1000 goto err_unreg; 1001 1002 memset(&resp, 0, sizeof resp); 1003 resp.lkey = mr->lkey; 1004 resp.rkey = mr->rkey; 1005 resp.mr_handle = uobj->id; 1006 1007 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1008 &resp, sizeof resp)) { 1009 ret = -EFAULT; 1010 goto err_copy; 1011 } 1012 1013 put_pd_read(pd); 1014 1015 mutex_lock(&file->mutex); 1016 list_add_tail(&uobj->list, &file->ucontext->mr_list); 1017 mutex_unlock(&file->mutex); 1018 1019 uobj->live = 1; 1020 1021 up_write(&uobj->mutex); 1022 1023 return in_len; 1024 1025 err_copy: 1026 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1027 1028 err_unreg: 1029 ib_dereg_mr(mr); 1030 1031 err_put: 1032 put_pd_read(pd); 1033 1034 err_free: 1035 put_uobj_write(uobj); 1036 return ret; 1037 } 1038 1039 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 1040 struct ib_device *ib_dev, 1041 const char __user *buf, int in_len, 1042 int out_len) 1043 { 1044 struct ib_uverbs_rereg_mr cmd; 1045 struct ib_uverbs_rereg_mr_resp resp; 1046 struct ib_udata udata; 1047 struct ib_pd *pd = NULL; 1048 struct ib_mr *mr; 1049 struct ib_pd *old_pd; 1050 int ret; 1051 struct ib_uobject *uobj; 1052 1053 if (out_len < sizeof(resp)) 1054 return -ENOSPC; 1055 1056 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1057 return -EFAULT; 1058 1059 INIT_UDATA(&udata, buf + sizeof(cmd), 1060 (unsigned long) cmd.response + sizeof(resp), 1061 in_len - sizeof(cmd), out_len - sizeof(resp)); 1062 1063 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 1064 return -EINVAL; 1065 1066 if ((cmd.flags & IB_MR_REREG_TRANS) && 1067 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 1068 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 1069 return -EINVAL; 1070 1071 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, 1072 file->ucontext); 1073 1074 if (!uobj) 1075 return -EINVAL; 1076 1077 mr = uobj->object; 1078 1079 if (cmd.flags & IB_MR_REREG_ACCESS) { 1080 ret = ib_check_mr_access(cmd.access_flags); 1081 if (ret) 1082 goto put_uobjs; 1083 } 1084 1085 if (cmd.flags & IB_MR_REREG_PD) { 1086 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1087 if (!pd) { 1088 ret = -EINVAL; 1089 goto put_uobjs; 1090 } 1091 } 1092 1093 old_pd = mr->pd; 1094 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 1095 cmd.length, cmd.hca_va, 1096 cmd.access_flags, pd, &udata); 1097 if (!ret) { 1098 if (cmd.flags & IB_MR_REREG_PD) { 1099 atomic_inc(&pd->usecnt); 1100 mr->pd = pd; 1101 atomic_dec(&old_pd->usecnt); 1102 } 1103 } else { 1104 goto put_uobj_pd; 1105 } 1106 1107 memset(&resp, 0, sizeof(resp)); 1108 resp.lkey = mr->lkey; 1109 resp.rkey = mr->rkey; 1110 1111 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1112 &resp, sizeof(resp))) 1113 ret = -EFAULT; 1114 else 1115 ret = in_len; 1116 1117 put_uobj_pd: 1118 if (cmd.flags & IB_MR_REREG_PD) 1119 put_pd_read(pd); 1120 1121 put_uobjs: 1122 1123 put_uobj_write(mr->uobject); 1124 1125 return ret; 1126 } 1127 1128 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 1129 struct ib_device *ib_dev, 1130 const char __user *buf, int in_len, 1131 int out_len) 1132 { 1133 struct ib_uverbs_dereg_mr cmd; 1134 struct ib_mr *mr; 1135 struct ib_uobject *uobj; 1136 int ret = -EINVAL; 1137 1138 if (copy_from_user(&cmd, buf, sizeof cmd)) 1139 return -EFAULT; 1140 1141 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 1142 if (!uobj) 1143 return -EINVAL; 1144 1145 mr = uobj->object; 1146 1147 ret = ib_dereg_mr(mr); 1148 if (!ret) 1149 uobj->live = 0; 1150 1151 put_uobj_write(uobj); 1152 1153 if (ret) 1154 return ret; 1155 1156 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1157 1158 mutex_lock(&file->mutex); 1159 list_del(&uobj->list); 1160 mutex_unlock(&file->mutex); 1161 1162 put_uobj(uobj); 1163 1164 return in_len; 1165 } 1166 1167 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 1168 struct ib_device *ib_dev, 1169 const char __user *buf, int in_len, 1170 int out_len) 1171 { 1172 struct ib_uverbs_alloc_mw cmd; 1173 struct ib_uverbs_alloc_mw_resp resp; 1174 struct ib_uobject *uobj; 1175 struct ib_pd *pd; 1176 struct ib_mw *mw; 1177 int ret; 1178 1179 if (out_len < sizeof(resp)) 1180 return -ENOSPC; 1181 1182 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1183 return -EFAULT; 1184 1185 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 1186 if (!uobj) 1187 return -ENOMEM; 1188 1189 init_uobj(uobj, 0, file->ucontext, &mw_lock_class); 1190 down_write(&uobj->mutex); 1191 1192 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1193 if (!pd) { 1194 ret = -EINVAL; 1195 goto err_free; 1196 } 1197 1198 mw = pd->device->alloc_mw(pd, cmd.mw_type); 1199 if (IS_ERR(mw)) { 1200 ret = PTR_ERR(mw); 1201 goto err_put; 1202 } 1203 1204 mw->device = pd->device; 1205 mw->pd = pd; 1206 mw->uobject = uobj; 1207 atomic_inc(&pd->usecnt); 1208 1209 uobj->object = mw; 1210 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj); 1211 if (ret) 1212 goto err_unalloc; 1213 1214 memset(&resp, 0, sizeof(resp)); 1215 resp.rkey = mw->rkey; 1216 resp.mw_handle = uobj->id; 1217 1218 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1219 &resp, sizeof(resp))) { 1220 ret = -EFAULT; 1221 goto err_copy; 1222 } 1223 1224 put_pd_read(pd); 1225 1226 mutex_lock(&file->mutex); 1227 list_add_tail(&uobj->list, &file->ucontext->mw_list); 1228 mutex_unlock(&file->mutex); 1229 1230 uobj->live = 1; 1231 1232 up_write(&uobj->mutex); 1233 1234 return in_len; 1235 1236 err_copy: 1237 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1238 1239 err_unalloc: 1240 uverbs_dealloc_mw(mw); 1241 1242 err_put: 1243 put_pd_read(pd); 1244 1245 err_free: 1246 put_uobj_write(uobj); 1247 return ret; 1248 } 1249 1250 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 1251 struct ib_device *ib_dev, 1252 const char __user *buf, int in_len, 1253 int out_len) 1254 { 1255 struct ib_uverbs_dealloc_mw cmd; 1256 struct ib_mw *mw; 1257 struct ib_uobject *uobj; 1258 int ret = -EINVAL; 1259 1260 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1261 return -EFAULT; 1262 1263 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext); 1264 if (!uobj) 1265 return -EINVAL; 1266 1267 mw = uobj->object; 1268 1269 ret = uverbs_dealloc_mw(mw); 1270 if (!ret) 1271 uobj->live = 0; 1272 1273 put_uobj_write(uobj); 1274 1275 if (ret) 1276 return ret; 1277 1278 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1279 1280 mutex_lock(&file->mutex); 1281 list_del(&uobj->list); 1282 mutex_unlock(&file->mutex); 1283 1284 put_uobj(uobj); 1285 1286 return in_len; 1287 } 1288 1289 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 1290 struct ib_device *ib_dev, 1291 const char __user *buf, int in_len, 1292 int out_len) 1293 { 1294 struct ib_uverbs_create_comp_channel cmd; 1295 struct ib_uverbs_create_comp_channel_resp resp; 1296 struct file *filp; 1297 int ret; 1298 1299 if (out_len < sizeof resp) 1300 return -ENOSPC; 1301 1302 if (copy_from_user(&cmd, buf, sizeof cmd)) 1303 return -EFAULT; 1304 1305 ret = get_unused_fd_flags(O_CLOEXEC); 1306 if (ret < 0) 1307 return ret; 1308 resp.fd = ret; 1309 1310 filp = ib_uverbs_alloc_event_file(file, ib_dev, 0); 1311 if (IS_ERR(filp)) { 1312 put_unused_fd(resp.fd); 1313 return PTR_ERR(filp); 1314 } 1315 1316 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1317 &resp, sizeof resp)) { 1318 put_unused_fd(resp.fd); 1319 fput(filp); 1320 return -EFAULT; 1321 } 1322 1323 fd_install(resp.fd, filp); 1324 return in_len; 1325 } 1326 1327 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 1328 struct ib_device *ib_dev, 1329 struct ib_udata *ucore, 1330 struct ib_udata *uhw, 1331 struct ib_uverbs_ex_create_cq *cmd, 1332 size_t cmd_sz, 1333 int (*cb)(struct ib_uverbs_file *file, 1334 struct ib_ucq_object *obj, 1335 struct ib_uverbs_ex_create_cq_resp *resp, 1336 struct ib_udata *udata, 1337 void *context), 1338 void *context) 1339 { 1340 struct ib_ucq_object *obj; 1341 struct ib_uverbs_event_file *ev_file = NULL; 1342 struct ib_cq *cq; 1343 int ret; 1344 struct ib_uverbs_ex_create_cq_resp resp; 1345 struct ib_cq_init_attr attr = {}; 1346 1347 if (cmd->comp_vector >= file->device->num_comp_vectors) 1348 return ERR_PTR(-EINVAL); 1349 1350 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1351 if (!obj) 1352 return ERR_PTR(-ENOMEM); 1353 1354 init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class); 1355 down_write(&obj->uobject.mutex); 1356 1357 if (cmd->comp_channel >= 0) { 1358 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel); 1359 if (!ev_file) { 1360 ret = -EINVAL; 1361 goto err; 1362 } 1363 } 1364 1365 obj->uverbs_file = file; 1366 obj->comp_events_reported = 0; 1367 obj->async_events_reported = 0; 1368 INIT_LIST_HEAD(&obj->comp_list); 1369 INIT_LIST_HEAD(&obj->async_list); 1370 1371 attr.cqe = cmd->cqe; 1372 attr.comp_vector = cmd->comp_vector; 1373 1374 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1375 attr.flags = cmd->flags; 1376 1377 cq = ib_dev->create_cq(ib_dev, &attr, 1378 file->ucontext, uhw); 1379 if (IS_ERR(cq)) { 1380 ret = PTR_ERR(cq); 1381 goto err_file; 1382 } 1383 1384 cq->device = ib_dev; 1385 cq->uobject = &obj->uobject; 1386 cq->comp_handler = ib_uverbs_comp_handler; 1387 cq->event_handler = ib_uverbs_cq_event_handler; 1388 cq->cq_context = ev_file; 1389 atomic_set(&cq->usecnt, 0); 1390 1391 obj->uobject.object = cq; 1392 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1393 if (ret) 1394 goto err_free; 1395 1396 memset(&resp, 0, sizeof resp); 1397 resp.base.cq_handle = obj->uobject.id; 1398 resp.base.cqe = cq->cqe; 1399 1400 resp.response_length = offsetof(typeof(resp), response_length) + 1401 sizeof(resp.response_length); 1402 1403 ret = cb(file, obj, &resp, ucore, context); 1404 if (ret) 1405 goto err_cb; 1406 1407 mutex_lock(&file->mutex); 1408 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 1409 mutex_unlock(&file->mutex); 1410 1411 obj->uobject.live = 1; 1412 1413 up_write(&obj->uobject.mutex); 1414 1415 return obj; 1416 1417 err_cb: 1418 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1419 1420 err_free: 1421 ib_destroy_cq(cq); 1422 1423 err_file: 1424 if (ev_file) 1425 ib_uverbs_release_ucq(file, ev_file, obj); 1426 1427 err: 1428 put_uobj_write(&obj->uobject); 1429 1430 return ERR_PTR(ret); 1431 } 1432 1433 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1434 struct ib_ucq_object *obj, 1435 struct ib_uverbs_ex_create_cq_resp *resp, 1436 struct ib_udata *ucore, void *context) 1437 { 1438 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1439 return -EFAULT; 1440 1441 return 0; 1442 } 1443 1444 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1445 struct ib_device *ib_dev, 1446 const char __user *buf, int in_len, 1447 int out_len) 1448 { 1449 struct ib_uverbs_create_cq cmd; 1450 struct ib_uverbs_ex_create_cq cmd_ex; 1451 struct ib_uverbs_create_cq_resp resp; 1452 struct ib_udata ucore; 1453 struct ib_udata uhw; 1454 struct ib_ucq_object *obj; 1455 1456 if (out_len < sizeof(resp)) 1457 return -ENOSPC; 1458 1459 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1460 return -EFAULT; 1461 1462 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp)); 1463 1464 INIT_UDATA(&uhw, buf + sizeof(cmd), 1465 (unsigned long)cmd.response + sizeof(resp), 1466 in_len - sizeof(cmd), out_len - sizeof(resp)); 1467 1468 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1469 cmd_ex.user_handle = cmd.user_handle; 1470 cmd_ex.cqe = cmd.cqe; 1471 cmd_ex.comp_vector = cmd.comp_vector; 1472 cmd_ex.comp_channel = cmd.comp_channel; 1473 1474 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex, 1475 offsetof(typeof(cmd_ex), comp_channel) + 1476 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1477 NULL); 1478 1479 if (IS_ERR(obj)) 1480 return PTR_ERR(obj); 1481 1482 return in_len; 1483 } 1484 1485 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1486 struct ib_ucq_object *obj, 1487 struct ib_uverbs_ex_create_cq_resp *resp, 1488 struct ib_udata *ucore, void *context) 1489 { 1490 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1491 return -EFAULT; 1492 1493 return 0; 1494 } 1495 1496 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1497 struct ib_device *ib_dev, 1498 struct ib_udata *ucore, 1499 struct ib_udata *uhw) 1500 { 1501 struct ib_uverbs_ex_create_cq_resp resp; 1502 struct ib_uverbs_ex_create_cq cmd; 1503 struct ib_ucq_object *obj; 1504 int err; 1505 1506 if (ucore->inlen < sizeof(cmd)) 1507 return -EINVAL; 1508 1509 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1510 if (err) 1511 return err; 1512 1513 if (cmd.comp_mask) 1514 return -EINVAL; 1515 1516 if (cmd.reserved) 1517 return -EINVAL; 1518 1519 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1520 sizeof(resp.response_length))) 1521 return -ENOSPC; 1522 1523 obj = create_cq(file, ib_dev, ucore, uhw, &cmd, 1524 min(ucore->inlen, sizeof(cmd)), 1525 ib_uverbs_ex_create_cq_cb, NULL); 1526 1527 if (IS_ERR(obj)) 1528 return PTR_ERR(obj); 1529 1530 return 0; 1531 } 1532 1533 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1534 struct ib_device *ib_dev, 1535 const char __user *buf, int in_len, 1536 int out_len) 1537 { 1538 struct ib_uverbs_resize_cq cmd; 1539 struct ib_uverbs_resize_cq_resp resp; 1540 struct ib_udata udata; 1541 struct ib_cq *cq; 1542 int ret = -EINVAL; 1543 1544 if (copy_from_user(&cmd, buf, sizeof cmd)) 1545 return -EFAULT; 1546 1547 INIT_UDATA(&udata, buf + sizeof cmd, 1548 (unsigned long) cmd.response + sizeof resp, 1549 in_len - sizeof cmd, out_len - sizeof resp); 1550 1551 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1552 if (!cq) 1553 return -EINVAL; 1554 1555 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1556 if (ret) 1557 goto out; 1558 1559 resp.cqe = cq->cqe; 1560 1561 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1562 &resp, sizeof resp.cqe)) 1563 ret = -EFAULT; 1564 1565 out: 1566 put_cq_read(cq); 1567 1568 return ret ? ret : in_len; 1569 } 1570 1571 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1572 { 1573 struct ib_uverbs_wc tmp; 1574 1575 tmp.wr_id = wc->wr_id; 1576 tmp.status = wc->status; 1577 tmp.opcode = wc->opcode; 1578 tmp.vendor_err = wc->vendor_err; 1579 tmp.byte_len = wc->byte_len; 1580 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1581 tmp.qp_num = wc->qp->qp_num; 1582 tmp.src_qp = wc->src_qp; 1583 tmp.wc_flags = wc->wc_flags; 1584 tmp.pkey_index = wc->pkey_index; 1585 tmp.slid = wc->slid; 1586 tmp.sl = wc->sl; 1587 tmp.dlid_path_bits = wc->dlid_path_bits; 1588 tmp.port_num = wc->port_num; 1589 tmp.reserved = 0; 1590 1591 if (copy_to_user(dest, &tmp, sizeof tmp)) 1592 return -EFAULT; 1593 1594 return 0; 1595 } 1596 1597 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1598 struct ib_device *ib_dev, 1599 const char __user *buf, int in_len, 1600 int out_len) 1601 { 1602 struct ib_uverbs_poll_cq cmd; 1603 struct ib_uverbs_poll_cq_resp resp; 1604 u8 __user *header_ptr; 1605 u8 __user *data_ptr; 1606 struct ib_cq *cq; 1607 struct ib_wc wc; 1608 int ret; 1609 1610 if (copy_from_user(&cmd, buf, sizeof cmd)) 1611 return -EFAULT; 1612 1613 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1614 if (!cq) 1615 return -EINVAL; 1616 1617 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1618 header_ptr = (void __user *)(unsigned long) cmd.response; 1619 data_ptr = header_ptr + sizeof resp; 1620 1621 memset(&resp, 0, sizeof resp); 1622 while (resp.count < cmd.ne) { 1623 ret = ib_poll_cq(cq, 1, &wc); 1624 if (ret < 0) 1625 goto out_put; 1626 if (!ret) 1627 break; 1628 1629 ret = copy_wc_to_user(data_ptr, &wc); 1630 if (ret) 1631 goto out_put; 1632 1633 data_ptr += sizeof(struct ib_uverbs_wc); 1634 ++resp.count; 1635 } 1636 1637 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1638 ret = -EFAULT; 1639 goto out_put; 1640 } 1641 1642 ret = in_len; 1643 1644 out_put: 1645 put_cq_read(cq); 1646 return ret; 1647 } 1648 1649 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1650 struct ib_device *ib_dev, 1651 const char __user *buf, int in_len, 1652 int out_len) 1653 { 1654 struct ib_uverbs_req_notify_cq cmd; 1655 struct ib_cq *cq; 1656 1657 if (copy_from_user(&cmd, buf, sizeof cmd)) 1658 return -EFAULT; 1659 1660 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1661 if (!cq) 1662 return -EINVAL; 1663 1664 ib_req_notify_cq(cq, cmd.solicited_only ? 1665 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1666 1667 put_cq_read(cq); 1668 1669 return in_len; 1670 } 1671 1672 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1673 struct ib_device *ib_dev, 1674 const char __user *buf, int in_len, 1675 int out_len) 1676 { 1677 struct ib_uverbs_destroy_cq cmd; 1678 struct ib_uverbs_destroy_cq_resp resp; 1679 struct ib_uobject *uobj; 1680 struct ib_cq *cq; 1681 struct ib_ucq_object *obj; 1682 struct ib_uverbs_event_file *ev_file; 1683 int ret = -EINVAL; 1684 1685 if (copy_from_user(&cmd, buf, sizeof cmd)) 1686 return -EFAULT; 1687 1688 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 1689 if (!uobj) 1690 return -EINVAL; 1691 cq = uobj->object; 1692 ev_file = cq->cq_context; 1693 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1694 1695 ret = ib_destroy_cq(cq); 1696 if (!ret) 1697 uobj->live = 0; 1698 1699 put_uobj_write(uobj); 1700 1701 if (ret) 1702 return ret; 1703 1704 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1705 1706 mutex_lock(&file->mutex); 1707 list_del(&uobj->list); 1708 mutex_unlock(&file->mutex); 1709 1710 ib_uverbs_release_ucq(file, ev_file, obj); 1711 1712 memset(&resp, 0, sizeof resp); 1713 resp.comp_events_reported = obj->comp_events_reported; 1714 resp.async_events_reported = obj->async_events_reported; 1715 1716 put_uobj(uobj); 1717 1718 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1719 &resp, sizeof resp)) 1720 return -EFAULT; 1721 1722 return in_len; 1723 } 1724 1725 static int create_qp(struct ib_uverbs_file *file, 1726 struct ib_udata *ucore, 1727 struct ib_udata *uhw, 1728 struct ib_uverbs_ex_create_qp *cmd, 1729 size_t cmd_sz, 1730 int (*cb)(struct ib_uverbs_file *file, 1731 struct ib_uverbs_ex_create_qp_resp *resp, 1732 struct ib_udata *udata), 1733 void *context) 1734 { 1735 struct ib_uqp_object *obj; 1736 struct ib_device *device; 1737 struct ib_pd *pd = NULL; 1738 struct ib_xrcd *xrcd = NULL; 1739 struct ib_uobject *uninitialized_var(xrcd_uobj); 1740 struct ib_cq *scq = NULL, *rcq = NULL; 1741 struct ib_srq *srq = NULL; 1742 struct ib_qp *qp; 1743 char *buf; 1744 struct ib_qp_init_attr attr; 1745 struct ib_uverbs_ex_create_qp_resp resp; 1746 int ret; 1747 1748 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1749 return -EPERM; 1750 1751 obj = kzalloc(sizeof *obj, GFP_KERNEL); 1752 if (!obj) 1753 return -ENOMEM; 1754 1755 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, 1756 &qp_lock_class); 1757 down_write(&obj->uevent.uobject.mutex); 1758 1759 if (cmd->qp_type == IB_QPT_XRC_TGT) { 1760 xrcd = idr_read_xrcd(cmd->pd_handle, file->ucontext, 1761 &xrcd_uobj); 1762 if (!xrcd) { 1763 ret = -EINVAL; 1764 goto err_put; 1765 } 1766 device = xrcd->device; 1767 } else { 1768 if (cmd->qp_type == IB_QPT_XRC_INI) { 1769 cmd->max_recv_wr = 0; 1770 cmd->max_recv_sge = 0; 1771 } else { 1772 if (cmd->is_srq) { 1773 srq = idr_read_srq(cmd->srq_handle, 1774 file->ucontext); 1775 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1776 ret = -EINVAL; 1777 goto err_put; 1778 } 1779 } 1780 1781 if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1782 rcq = idr_read_cq(cmd->recv_cq_handle, 1783 file->ucontext, 0); 1784 if (!rcq) { 1785 ret = -EINVAL; 1786 goto err_put; 1787 } 1788 } 1789 } 1790 1791 scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq); 1792 rcq = rcq ?: scq; 1793 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 1794 if (!pd || !scq) { 1795 ret = -EINVAL; 1796 goto err_put; 1797 } 1798 1799 device = pd->device; 1800 } 1801 1802 attr.event_handler = ib_uverbs_qp_event_handler; 1803 attr.qp_context = file; 1804 attr.send_cq = scq; 1805 attr.recv_cq = rcq; 1806 attr.srq = srq; 1807 attr.xrcd = xrcd; 1808 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : 1809 IB_SIGNAL_REQ_WR; 1810 attr.qp_type = cmd->qp_type; 1811 attr.create_flags = 0; 1812 1813 attr.cap.max_send_wr = cmd->max_send_wr; 1814 attr.cap.max_recv_wr = cmd->max_recv_wr; 1815 attr.cap.max_send_sge = cmd->max_send_sge; 1816 attr.cap.max_recv_sge = cmd->max_recv_sge; 1817 attr.cap.max_inline_data = cmd->max_inline_data; 1818 1819 obj->uevent.events_reported = 0; 1820 INIT_LIST_HEAD(&obj->uevent.event_list); 1821 INIT_LIST_HEAD(&obj->mcast_list); 1822 1823 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) + 1824 sizeof(cmd->create_flags)) 1825 attr.create_flags = cmd->create_flags; 1826 1827 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 1828 IB_QP_CREATE_CROSS_CHANNEL | 1829 IB_QP_CREATE_MANAGED_SEND | 1830 IB_QP_CREATE_MANAGED_RECV)) { 1831 ret = -EINVAL; 1832 goto err_put; 1833 } 1834 1835 buf = (void *)cmd + sizeof(*cmd); 1836 if (cmd_sz > sizeof(*cmd)) 1837 if (!(buf[0] == 0 && !memcmp(buf, buf + 1, 1838 cmd_sz - sizeof(*cmd) - 1))) { 1839 ret = -EINVAL; 1840 goto err_put; 1841 } 1842 1843 if (cmd->qp_type == IB_QPT_XRC_TGT) 1844 qp = ib_create_qp(pd, &attr); 1845 else 1846 qp = device->create_qp(pd, &attr, uhw); 1847 1848 if (IS_ERR(qp)) { 1849 ret = PTR_ERR(qp); 1850 goto err_put; 1851 } 1852 1853 if (cmd->qp_type != IB_QPT_XRC_TGT) { 1854 qp->real_qp = qp; 1855 qp->device = device; 1856 qp->pd = pd; 1857 qp->send_cq = attr.send_cq; 1858 qp->recv_cq = attr.recv_cq; 1859 qp->srq = attr.srq; 1860 qp->event_handler = attr.event_handler; 1861 qp->qp_context = attr.qp_context; 1862 qp->qp_type = attr.qp_type; 1863 atomic_set(&qp->usecnt, 0); 1864 atomic_inc(&pd->usecnt); 1865 atomic_inc(&attr.send_cq->usecnt); 1866 if (attr.recv_cq) 1867 atomic_inc(&attr.recv_cq->usecnt); 1868 if (attr.srq) 1869 atomic_inc(&attr.srq->usecnt); 1870 } 1871 qp->uobject = &obj->uevent.uobject; 1872 1873 obj->uevent.uobject.object = qp; 1874 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1875 if (ret) 1876 goto err_destroy; 1877 1878 memset(&resp, 0, sizeof resp); 1879 resp.base.qpn = qp->qp_num; 1880 resp.base.qp_handle = obj->uevent.uobject.id; 1881 resp.base.max_recv_sge = attr.cap.max_recv_sge; 1882 resp.base.max_send_sge = attr.cap.max_send_sge; 1883 resp.base.max_recv_wr = attr.cap.max_recv_wr; 1884 resp.base.max_send_wr = attr.cap.max_send_wr; 1885 resp.base.max_inline_data = attr.cap.max_inline_data; 1886 1887 resp.response_length = offsetof(typeof(resp), response_length) + 1888 sizeof(resp.response_length); 1889 1890 ret = cb(file, &resp, ucore); 1891 if (ret) 1892 goto err_cb; 1893 1894 if (xrcd) { 1895 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1896 uobject); 1897 atomic_inc(&obj->uxrcd->refcnt); 1898 put_xrcd_read(xrcd_uobj); 1899 } 1900 1901 if (pd) 1902 put_pd_read(pd); 1903 if (scq) 1904 put_cq_read(scq); 1905 if (rcq && rcq != scq) 1906 put_cq_read(rcq); 1907 if (srq) 1908 put_srq_read(srq); 1909 1910 mutex_lock(&file->mutex); 1911 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1912 mutex_unlock(&file->mutex); 1913 1914 obj->uevent.uobject.live = 1; 1915 1916 up_write(&obj->uevent.uobject.mutex); 1917 1918 return 0; 1919 err_cb: 1920 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1921 1922 err_destroy: 1923 ib_destroy_qp(qp); 1924 1925 err_put: 1926 if (xrcd) 1927 put_xrcd_read(xrcd_uobj); 1928 if (pd) 1929 put_pd_read(pd); 1930 if (scq) 1931 put_cq_read(scq); 1932 if (rcq && rcq != scq) 1933 put_cq_read(rcq); 1934 if (srq) 1935 put_srq_read(srq); 1936 1937 put_uobj_write(&obj->uevent.uobject); 1938 return ret; 1939 } 1940 1941 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file, 1942 struct ib_uverbs_ex_create_qp_resp *resp, 1943 struct ib_udata *ucore) 1944 { 1945 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1946 return -EFAULT; 1947 1948 return 0; 1949 } 1950 1951 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 1952 struct ib_device *ib_dev, 1953 const char __user *buf, int in_len, 1954 int out_len) 1955 { 1956 struct ib_uverbs_create_qp cmd; 1957 struct ib_uverbs_ex_create_qp cmd_ex; 1958 struct ib_udata ucore; 1959 struct ib_udata uhw; 1960 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp); 1961 int err; 1962 1963 if (out_len < resp_size) 1964 return -ENOSPC; 1965 1966 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1967 return -EFAULT; 1968 1969 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), 1970 resp_size); 1971 INIT_UDATA(&uhw, buf + sizeof(cmd), 1972 (unsigned long)cmd.response + resp_size, 1973 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1974 out_len - resp_size); 1975 1976 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1977 cmd_ex.user_handle = cmd.user_handle; 1978 cmd_ex.pd_handle = cmd.pd_handle; 1979 cmd_ex.send_cq_handle = cmd.send_cq_handle; 1980 cmd_ex.recv_cq_handle = cmd.recv_cq_handle; 1981 cmd_ex.srq_handle = cmd.srq_handle; 1982 cmd_ex.max_send_wr = cmd.max_send_wr; 1983 cmd_ex.max_recv_wr = cmd.max_recv_wr; 1984 cmd_ex.max_send_sge = cmd.max_send_sge; 1985 cmd_ex.max_recv_sge = cmd.max_recv_sge; 1986 cmd_ex.max_inline_data = cmd.max_inline_data; 1987 cmd_ex.sq_sig_all = cmd.sq_sig_all; 1988 cmd_ex.qp_type = cmd.qp_type; 1989 cmd_ex.is_srq = cmd.is_srq; 1990 1991 err = create_qp(file, &ucore, &uhw, &cmd_ex, 1992 offsetof(typeof(cmd_ex), is_srq) + 1993 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb, 1994 NULL); 1995 1996 if (err) 1997 return err; 1998 1999 return in_len; 2000 } 2001 2002 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file, 2003 struct ib_uverbs_ex_create_qp_resp *resp, 2004 struct ib_udata *ucore) 2005 { 2006 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 2007 return -EFAULT; 2008 2009 return 0; 2010 } 2011 2012 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, 2013 struct ib_device *ib_dev, 2014 struct ib_udata *ucore, 2015 struct ib_udata *uhw) 2016 { 2017 struct ib_uverbs_ex_create_qp_resp resp; 2018 struct ib_uverbs_ex_create_qp cmd = {0}; 2019 int err; 2020 2021 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) + 2022 sizeof(cmd.comp_mask))) 2023 return -EINVAL; 2024 2025 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2026 if (err) 2027 return err; 2028 2029 if (cmd.comp_mask) 2030 return -EINVAL; 2031 2032 if (cmd.reserved) 2033 return -EINVAL; 2034 2035 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 2036 sizeof(resp.response_length))) 2037 return -ENOSPC; 2038 2039 err = create_qp(file, ucore, uhw, &cmd, 2040 min(ucore->inlen, sizeof(cmd)), 2041 ib_uverbs_ex_create_qp_cb, NULL); 2042 2043 if (err) 2044 return err; 2045 2046 return 0; 2047 } 2048 2049 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 2050 struct ib_device *ib_dev, 2051 const char __user *buf, int in_len, int out_len) 2052 { 2053 struct ib_uverbs_open_qp cmd; 2054 struct ib_uverbs_create_qp_resp resp; 2055 struct ib_udata udata; 2056 struct ib_uqp_object *obj; 2057 struct ib_xrcd *xrcd; 2058 struct ib_uobject *uninitialized_var(xrcd_uobj); 2059 struct ib_qp *qp; 2060 struct ib_qp_open_attr attr; 2061 int ret; 2062 2063 if (out_len < sizeof resp) 2064 return -ENOSPC; 2065 2066 if (copy_from_user(&cmd, buf, sizeof cmd)) 2067 return -EFAULT; 2068 2069 INIT_UDATA(&udata, buf + sizeof cmd, 2070 (unsigned long) cmd.response + sizeof resp, 2071 in_len - sizeof cmd, out_len - sizeof resp); 2072 2073 obj = kmalloc(sizeof *obj, GFP_KERNEL); 2074 if (!obj) 2075 return -ENOMEM; 2076 2077 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 2078 down_write(&obj->uevent.uobject.mutex); 2079 2080 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 2081 if (!xrcd) { 2082 ret = -EINVAL; 2083 goto err_put; 2084 } 2085 2086 attr.event_handler = ib_uverbs_qp_event_handler; 2087 attr.qp_context = file; 2088 attr.qp_num = cmd.qpn; 2089 attr.qp_type = cmd.qp_type; 2090 2091 obj->uevent.events_reported = 0; 2092 INIT_LIST_HEAD(&obj->uevent.event_list); 2093 INIT_LIST_HEAD(&obj->mcast_list); 2094 2095 qp = ib_open_qp(xrcd, &attr); 2096 if (IS_ERR(qp)) { 2097 ret = PTR_ERR(qp); 2098 goto err_put; 2099 } 2100 2101 qp->uobject = &obj->uevent.uobject; 2102 2103 obj->uevent.uobject.object = qp; 2104 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 2105 if (ret) 2106 goto err_destroy; 2107 2108 memset(&resp, 0, sizeof resp); 2109 resp.qpn = qp->qp_num; 2110 resp.qp_handle = obj->uevent.uobject.id; 2111 2112 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2113 &resp, sizeof resp)) { 2114 ret = -EFAULT; 2115 goto err_remove; 2116 } 2117 2118 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 2119 atomic_inc(&obj->uxrcd->refcnt); 2120 put_xrcd_read(xrcd_uobj); 2121 2122 mutex_lock(&file->mutex); 2123 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 2124 mutex_unlock(&file->mutex); 2125 2126 obj->uevent.uobject.live = 1; 2127 2128 up_write(&obj->uevent.uobject.mutex); 2129 2130 return in_len; 2131 2132 err_remove: 2133 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 2134 2135 err_destroy: 2136 ib_destroy_qp(qp); 2137 2138 err_put: 2139 put_xrcd_read(xrcd_uobj); 2140 put_uobj_write(&obj->uevent.uobject); 2141 return ret; 2142 } 2143 2144 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 2145 struct ib_device *ib_dev, 2146 const char __user *buf, int in_len, 2147 int out_len) 2148 { 2149 struct ib_uverbs_query_qp cmd; 2150 struct ib_uverbs_query_qp_resp resp; 2151 struct ib_qp *qp; 2152 struct ib_qp_attr *attr; 2153 struct ib_qp_init_attr *init_attr; 2154 int ret; 2155 2156 if (copy_from_user(&cmd, buf, sizeof cmd)) 2157 return -EFAULT; 2158 2159 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2160 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 2161 if (!attr || !init_attr) { 2162 ret = -ENOMEM; 2163 goto out; 2164 } 2165 2166 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2167 if (!qp) { 2168 ret = -EINVAL; 2169 goto out; 2170 } 2171 2172 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 2173 2174 put_qp_read(qp); 2175 2176 if (ret) 2177 goto out; 2178 2179 memset(&resp, 0, sizeof resp); 2180 2181 resp.qp_state = attr->qp_state; 2182 resp.cur_qp_state = attr->cur_qp_state; 2183 resp.path_mtu = attr->path_mtu; 2184 resp.path_mig_state = attr->path_mig_state; 2185 resp.qkey = attr->qkey; 2186 resp.rq_psn = attr->rq_psn; 2187 resp.sq_psn = attr->sq_psn; 2188 resp.dest_qp_num = attr->dest_qp_num; 2189 resp.qp_access_flags = attr->qp_access_flags; 2190 resp.pkey_index = attr->pkey_index; 2191 resp.alt_pkey_index = attr->alt_pkey_index; 2192 resp.sq_draining = attr->sq_draining; 2193 resp.max_rd_atomic = attr->max_rd_atomic; 2194 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 2195 resp.min_rnr_timer = attr->min_rnr_timer; 2196 resp.port_num = attr->port_num; 2197 resp.timeout = attr->timeout; 2198 resp.retry_cnt = attr->retry_cnt; 2199 resp.rnr_retry = attr->rnr_retry; 2200 resp.alt_port_num = attr->alt_port_num; 2201 resp.alt_timeout = attr->alt_timeout; 2202 2203 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 2204 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 2205 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 2206 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 2207 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 2208 resp.dest.dlid = attr->ah_attr.dlid; 2209 resp.dest.sl = attr->ah_attr.sl; 2210 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 2211 resp.dest.static_rate = attr->ah_attr.static_rate; 2212 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 2213 resp.dest.port_num = attr->ah_attr.port_num; 2214 2215 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 2216 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 2217 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 2218 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 2219 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 2220 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 2221 resp.alt_dest.sl = attr->alt_ah_attr.sl; 2222 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 2223 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 2224 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 2225 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 2226 2227 resp.max_send_wr = init_attr->cap.max_send_wr; 2228 resp.max_recv_wr = init_attr->cap.max_recv_wr; 2229 resp.max_send_sge = init_attr->cap.max_send_sge; 2230 resp.max_recv_sge = init_attr->cap.max_recv_sge; 2231 resp.max_inline_data = init_attr->cap.max_inline_data; 2232 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 2233 2234 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2235 &resp, sizeof resp)) 2236 ret = -EFAULT; 2237 2238 out: 2239 kfree(attr); 2240 kfree(init_attr); 2241 2242 return ret ? ret : in_len; 2243 } 2244 2245 /* Remove ignored fields set in the attribute mask */ 2246 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 2247 { 2248 switch (qp_type) { 2249 case IB_QPT_XRC_INI: 2250 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 2251 case IB_QPT_XRC_TGT: 2252 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 2253 IB_QP_RNR_RETRY); 2254 default: 2255 return mask; 2256 } 2257 } 2258 2259 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2260 struct ib_device *ib_dev, 2261 const char __user *buf, int in_len, 2262 int out_len) 2263 { 2264 struct ib_uverbs_modify_qp cmd; 2265 struct ib_udata udata; 2266 struct ib_qp *qp; 2267 struct ib_qp_attr *attr; 2268 int ret; 2269 2270 if (copy_from_user(&cmd, buf, sizeof cmd)) 2271 return -EFAULT; 2272 2273 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 2274 out_len); 2275 2276 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2277 if (!attr) 2278 return -ENOMEM; 2279 2280 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2281 if (!qp) { 2282 ret = -EINVAL; 2283 goto out; 2284 } 2285 2286 attr->qp_state = cmd.qp_state; 2287 attr->cur_qp_state = cmd.cur_qp_state; 2288 attr->path_mtu = cmd.path_mtu; 2289 attr->path_mig_state = cmd.path_mig_state; 2290 attr->qkey = cmd.qkey; 2291 attr->rq_psn = cmd.rq_psn; 2292 attr->sq_psn = cmd.sq_psn; 2293 attr->dest_qp_num = cmd.dest_qp_num; 2294 attr->qp_access_flags = cmd.qp_access_flags; 2295 attr->pkey_index = cmd.pkey_index; 2296 attr->alt_pkey_index = cmd.alt_pkey_index; 2297 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 2298 attr->max_rd_atomic = cmd.max_rd_atomic; 2299 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 2300 attr->min_rnr_timer = cmd.min_rnr_timer; 2301 attr->port_num = cmd.port_num; 2302 attr->timeout = cmd.timeout; 2303 attr->retry_cnt = cmd.retry_cnt; 2304 attr->rnr_retry = cmd.rnr_retry; 2305 attr->alt_port_num = cmd.alt_port_num; 2306 attr->alt_timeout = cmd.alt_timeout; 2307 2308 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 2309 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 2310 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 2311 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 2312 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 2313 attr->ah_attr.dlid = cmd.dest.dlid; 2314 attr->ah_attr.sl = cmd.dest.sl; 2315 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 2316 attr->ah_attr.static_rate = cmd.dest.static_rate; 2317 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 2318 attr->ah_attr.port_num = cmd.dest.port_num; 2319 2320 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 2321 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 2322 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 2323 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 2324 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 2325 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 2326 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 2327 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 2328 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 2329 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 2330 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 2331 2332 if (qp->real_qp == qp) { 2333 ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask); 2334 if (ret) 2335 goto release_qp; 2336 ret = qp->device->modify_qp(qp, attr, 2337 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 2338 } else { 2339 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 2340 } 2341 2342 if (ret) 2343 goto release_qp; 2344 2345 ret = in_len; 2346 2347 release_qp: 2348 put_qp_read(qp); 2349 2350 out: 2351 kfree(attr); 2352 2353 return ret; 2354 } 2355 2356 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2357 struct ib_device *ib_dev, 2358 const char __user *buf, int in_len, 2359 int out_len) 2360 { 2361 struct ib_uverbs_destroy_qp cmd; 2362 struct ib_uverbs_destroy_qp_resp resp; 2363 struct ib_uobject *uobj; 2364 struct ib_qp *qp; 2365 struct ib_uqp_object *obj; 2366 int ret = -EINVAL; 2367 2368 if (copy_from_user(&cmd, buf, sizeof cmd)) 2369 return -EFAULT; 2370 2371 memset(&resp, 0, sizeof resp); 2372 2373 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 2374 if (!uobj) 2375 return -EINVAL; 2376 qp = uobj->object; 2377 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2378 2379 if (!list_empty(&obj->mcast_list)) { 2380 put_uobj_write(uobj); 2381 return -EBUSY; 2382 } 2383 2384 ret = ib_destroy_qp(qp); 2385 if (!ret) 2386 uobj->live = 0; 2387 2388 put_uobj_write(uobj); 2389 2390 if (ret) 2391 return ret; 2392 2393 if (obj->uxrcd) 2394 atomic_dec(&obj->uxrcd->refcnt); 2395 2396 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 2397 2398 mutex_lock(&file->mutex); 2399 list_del(&uobj->list); 2400 mutex_unlock(&file->mutex); 2401 2402 ib_uverbs_release_uevent(file, &obj->uevent); 2403 2404 resp.events_reported = obj->uevent.events_reported; 2405 2406 put_uobj(uobj); 2407 2408 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2409 &resp, sizeof resp)) 2410 return -EFAULT; 2411 2412 return in_len; 2413 } 2414 2415 static void *alloc_wr(size_t wr_size, __u32 num_sge) 2416 { 2417 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + 2418 num_sge * sizeof (struct ib_sge), GFP_KERNEL); 2419 }; 2420 2421 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2422 struct ib_device *ib_dev, 2423 const char __user *buf, int in_len, 2424 int out_len) 2425 { 2426 struct ib_uverbs_post_send cmd; 2427 struct ib_uverbs_post_send_resp resp; 2428 struct ib_uverbs_send_wr *user_wr; 2429 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2430 struct ib_qp *qp; 2431 int i, sg_ind; 2432 int is_ud; 2433 ssize_t ret = -EINVAL; 2434 size_t next_size; 2435 2436 if (copy_from_user(&cmd, buf, sizeof cmd)) 2437 return -EFAULT; 2438 2439 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2440 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2441 return -EINVAL; 2442 2443 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2444 return -EINVAL; 2445 2446 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2447 if (!user_wr) 2448 return -ENOMEM; 2449 2450 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2451 if (!qp) 2452 goto out; 2453 2454 is_ud = qp->qp_type == IB_QPT_UD; 2455 sg_ind = 0; 2456 last = NULL; 2457 for (i = 0; i < cmd.wr_count; ++i) { 2458 if (copy_from_user(user_wr, 2459 buf + sizeof cmd + i * cmd.wqe_size, 2460 cmd.wqe_size)) { 2461 ret = -EFAULT; 2462 goto out_put; 2463 } 2464 2465 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2466 ret = -EINVAL; 2467 goto out_put; 2468 } 2469 2470 if (is_ud) { 2471 struct ib_ud_wr *ud; 2472 2473 if (user_wr->opcode != IB_WR_SEND && 2474 user_wr->opcode != IB_WR_SEND_WITH_IMM) { 2475 ret = -EINVAL; 2476 goto out_put; 2477 } 2478 2479 next_size = sizeof(*ud); 2480 ud = alloc_wr(next_size, user_wr->num_sge); 2481 if (!ud) { 2482 ret = -ENOMEM; 2483 goto out_put; 2484 } 2485 2486 ud->ah = idr_read_ah(user_wr->wr.ud.ah, file->ucontext); 2487 if (!ud->ah) { 2488 kfree(ud); 2489 ret = -EINVAL; 2490 goto out_put; 2491 } 2492 ud->remote_qpn = user_wr->wr.ud.remote_qpn; 2493 ud->remote_qkey = user_wr->wr.ud.remote_qkey; 2494 2495 next = &ud->wr; 2496 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 2497 user_wr->opcode == IB_WR_RDMA_WRITE || 2498 user_wr->opcode == IB_WR_RDMA_READ) { 2499 struct ib_rdma_wr *rdma; 2500 2501 next_size = sizeof(*rdma); 2502 rdma = alloc_wr(next_size, user_wr->num_sge); 2503 if (!rdma) { 2504 ret = -ENOMEM; 2505 goto out_put; 2506 } 2507 2508 rdma->remote_addr = user_wr->wr.rdma.remote_addr; 2509 rdma->rkey = user_wr->wr.rdma.rkey; 2510 2511 next = &rdma->wr; 2512 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2513 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2514 struct ib_atomic_wr *atomic; 2515 2516 next_size = sizeof(*atomic); 2517 atomic = alloc_wr(next_size, user_wr->num_sge); 2518 if (!atomic) { 2519 ret = -ENOMEM; 2520 goto out_put; 2521 } 2522 2523 atomic->remote_addr = user_wr->wr.atomic.remote_addr; 2524 atomic->compare_add = user_wr->wr.atomic.compare_add; 2525 atomic->swap = user_wr->wr.atomic.swap; 2526 atomic->rkey = user_wr->wr.atomic.rkey; 2527 2528 next = &atomic->wr; 2529 } else if (user_wr->opcode == IB_WR_SEND || 2530 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2531 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2532 next_size = sizeof(*next); 2533 next = alloc_wr(next_size, user_wr->num_sge); 2534 if (!next) { 2535 ret = -ENOMEM; 2536 goto out_put; 2537 } 2538 } else { 2539 ret = -EINVAL; 2540 goto out_put; 2541 } 2542 2543 if (user_wr->opcode == IB_WR_SEND_WITH_IMM || 2544 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 2545 next->ex.imm_data = 2546 (__be32 __force) user_wr->ex.imm_data; 2547 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { 2548 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; 2549 } 2550 2551 if (!last) 2552 wr = next; 2553 else 2554 last->next = next; 2555 last = next; 2556 2557 next->next = NULL; 2558 next->wr_id = user_wr->wr_id; 2559 next->num_sge = user_wr->num_sge; 2560 next->opcode = user_wr->opcode; 2561 next->send_flags = user_wr->send_flags; 2562 2563 if (next->num_sge) { 2564 next->sg_list = (void *) next + 2565 ALIGN(next_size, sizeof(struct ib_sge)); 2566 if (copy_from_user(next->sg_list, 2567 buf + sizeof cmd + 2568 cmd.wr_count * cmd.wqe_size + 2569 sg_ind * sizeof (struct ib_sge), 2570 next->num_sge * sizeof (struct ib_sge))) { 2571 ret = -EFAULT; 2572 goto out_put; 2573 } 2574 sg_ind += next->num_sge; 2575 } else 2576 next->sg_list = NULL; 2577 } 2578 2579 resp.bad_wr = 0; 2580 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2581 if (ret) 2582 for (next = wr; next; next = next->next) { 2583 ++resp.bad_wr; 2584 if (next == bad_wr) 2585 break; 2586 } 2587 2588 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2589 &resp, sizeof resp)) 2590 ret = -EFAULT; 2591 2592 out_put: 2593 put_qp_read(qp); 2594 2595 while (wr) { 2596 if (is_ud && ud_wr(wr)->ah) 2597 put_ah_read(ud_wr(wr)->ah); 2598 next = wr->next; 2599 kfree(wr); 2600 wr = next; 2601 } 2602 2603 out: 2604 kfree(user_wr); 2605 2606 return ret ? ret : in_len; 2607 } 2608 2609 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2610 int in_len, 2611 u32 wr_count, 2612 u32 sge_count, 2613 u32 wqe_size) 2614 { 2615 struct ib_uverbs_recv_wr *user_wr; 2616 struct ib_recv_wr *wr = NULL, *last, *next; 2617 int sg_ind; 2618 int i; 2619 int ret; 2620 2621 if (in_len < wqe_size * wr_count + 2622 sge_count * sizeof (struct ib_uverbs_sge)) 2623 return ERR_PTR(-EINVAL); 2624 2625 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2626 return ERR_PTR(-EINVAL); 2627 2628 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2629 if (!user_wr) 2630 return ERR_PTR(-ENOMEM); 2631 2632 sg_ind = 0; 2633 last = NULL; 2634 for (i = 0; i < wr_count; ++i) { 2635 if (copy_from_user(user_wr, buf + i * wqe_size, 2636 wqe_size)) { 2637 ret = -EFAULT; 2638 goto err; 2639 } 2640 2641 if (user_wr->num_sge + sg_ind > sge_count) { 2642 ret = -EINVAL; 2643 goto err; 2644 } 2645 2646 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2647 user_wr->num_sge * sizeof (struct ib_sge), 2648 GFP_KERNEL); 2649 if (!next) { 2650 ret = -ENOMEM; 2651 goto err; 2652 } 2653 2654 if (!last) 2655 wr = next; 2656 else 2657 last->next = next; 2658 last = next; 2659 2660 next->next = NULL; 2661 next->wr_id = user_wr->wr_id; 2662 next->num_sge = user_wr->num_sge; 2663 2664 if (next->num_sge) { 2665 next->sg_list = (void *) next + 2666 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2667 if (copy_from_user(next->sg_list, 2668 buf + wr_count * wqe_size + 2669 sg_ind * sizeof (struct ib_sge), 2670 next->num_sge * sizeof (struct ib_sge))) { 2671 ret = -EFAULT; 2672 goto err; 2673 } 2674 sg_ind += next->num_sge; 2675 } else 2676 next->sg_list = NULL; 2677 } 2678 2679 kfree(user_wr); 2680 return wr; 2681 2682 err: 2683 kfree(user_wr); 2684 2685 while (wr) { 2686 next = wr->next; 2687 kfree(wr); 2688 wr = next; 2689 } 2690 2691 return ERR_PTR(ret); 2692 } 2693 2694 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2695 struct ib_device *ib_dev, 2696 const char __user *buf, int in_len, 2697 int out_len) 2698 { 2699 struct ib_uverbs_post_recv cmd; 2700 struct ib_uverbs_post_recv_resp resp; 2701 struct ib_recv_wr *wr, *next, *bad_wr; 2702 struct ib_qp *qp; 2703 ssize_t ret = -EINVAL; 2704 2705 if (copy_from_user(&cmd, buf, sizeof cmd)) 2706 return -EFAULT; 2707 2708 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2709 in_len - sizeof cmd, cmd.wr_count, 2710 cmd.sge_count, cmd.wqe_size); 2711 if (IS_ERR(wr)) 2712 return PTR_ERR(wr); 2713 2714 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2715 if (!qp) 2716 goto out; 2717 2718 resp.bad_wr = 0; 2719 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2720 2721 put_qp_read(qp); 2722 2723 if (ret) 2724 for (next = wr; next; next = next->next) { 2725 ++resp.bad_wr; 2726 if (next == bad_wr) 2727 break; 2728 } 2729 2730 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2731 &resp, sizeof resp)) 2732 ret = -EFAULT; 2733 2734 out: 2735 while (wr) { 2736 next = wr->next; 2737 kfree(wr); 2738 wr = next; 2739 } 2740 2741 return ret ? ret : in_len; 2742 } 2743 2744 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2745 struct ib_device *ib_dev, 2746 const char __user *buf, int in_len, 2747 int out_len) 2748 { 2749 struct ib_uverbs_post_srq_recv cmd; 2750 struct ib_uverbs_post_srq_recv_resp resp; 2751 struct ib_recv_wr *wr, *next, *bad_wr; 2752 struct ib_srq *srq; 2753 ssize_t ret = -EINVAL; 2754 2755 if (copy_from_user(&cmd, buf, sizeof cmd)) 2756 return -EFAULT; 2757 2758 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2759 in_len - sizeof cmd, cmd.wr_count, 2760 cmd.sge_count, cmd.wqe_size); 2761 if (IS_ERR(wr)) 2762 return PTR_ERR(wr); 2763 2764 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2765 if (!srq) 2766 goto out; 2767 2768 resp.bad_wr = 0; 2769 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2770 2771 put_srq_read(srq); 2772 2773 if (ret) 2774 for (next = wr; next; next = next->next) { 2775 ++resp.bad_wr; 2776 if (next == bad_wr) 2777 break; 2778 } 2779 2780 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2781 &resp, sizeof resp)) 2782 ret = -EFAULT; 2783 2784 out: 2785 while (wr) { 2786 next = wr->next; 2787 kfree(wr); 2788 wr = next; 2789 } 2790 2791 return ret ? ret : in_len; 2792 } 2793 2794 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2795 struct ib_device *ib_dev, 2796 const char __user *buf, int in_len, 2797 int out_len) 2798 { 2799 struct ib_uverbs_create_ah cmd; 2800 struct ib_uverbs_create_ah_resp resp; 2801 struct ib_uobject *uobj; 2802 struct ib_pd *pd; 2803 struct ib_ah *ah; 2804 struct ib_ah_attr attr; 2805 int ret; 2806 2807 if (out_len < sizeof resp) 2808 return -ENOSPC; 2809 2810 if (copy_from_user(&cmd, buf, sizeof cmd)) 2811 return -EFAULT; 2812 2813 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 2814 if (!uobj) 2815 return -ENOMEM; 2816 2817 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class); 2818 down_write(&uobj->mutex); 2819 2820 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 2821 if (!pd) { 2822 ret = -EINVAL; 2823 goto err; 2824 } 2825 2826 attr.dlid = cmd.attr.dlid; 2827 attr.sl = cmd.attr.sl; 2828 attr.src_path_bits = cmd.attr.src_path_bits; 2829 attr.static_rate = cmd.attr.static_rate; 2830 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 2831 attr.port_num = cmd.attr.port_num; 2832 attr.grh.flow_label = cmd.attr.grh.flow_label; 2833 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 2834 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 2835 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 2836 memset(&attr.dmac, 0, sizeof(attr.dmac)); 2837 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 2838 2839 ah = ib_create_ah(pd, &attr); 2840 if (IS_ERR(ah)) { 2841 ret = PTR_ERR(ah); 2842 goto err_put; 2843 } 2844 2845 ah->uobject = uobj; 2846 uobj->object = ah; 2847 2848 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 2849 if (ret) 2850 goto err_destroy; 2851 2852 resp.ah_handle = uobj->id; 2853 2854 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2855 &resp, sizeof resp)) { 2856 ret = -EFAULT; 2857 goto err_copy; 2858 } 2859 2860 put_pd_read(pd); 2861 2862 mutex_lock(&file->mutex); 2863 list_add_tail(&uobj->list, &file->ucontext->ah_list); 2864 mutex_unlock(&file->mutex); 2865 2866 uobj->live = 1; 2867 2868 up_write(&uobj->mutex); 2869 2870 return in_len; 2871 2872 err_copy: 2873 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2874 2875 err_destroy: 2876 ib_destroy_ah(ah); 2877 2878 err_put: 2879 put_pd_read(pd); 2880 2881 err: 2882 put_uobj_write(uobj); 2883 return ret; 2884 } 2885 2886 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2887 struct ib_device *ib_dev, 2888 const char __user *buf, int in_len, int out_len) 2889 { 2890 struct ib_uverbs_destroy_ah cmd; 2891 struct ib_ah *ah; 2892 struct ib_uobject *uobj; 2893 int ret; 2894 2895 if (copy_from_user(&cmd, buf, sizeof cmd)) 2896 return -EFAULT; 2897 2898 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 2899 if (!uobj) 2900 return -EINVAL; 2901 ah = uobj->object; 2902 2903 ret = ib_destroy_ah(ah); 2904 if (!ret) 2905 uobj->live = 0; 2906 2907 put_uobj_write(uobj); 2908 2909 if (ret) 2910 return ret; 2911 2912 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2913 2914 mutex_lock(&file->mutex); 2915 list_del(&uobj->list); 2916 mutex_unlock(&file->mutex); 2917 2918 put_uobj(uobj); 2919 2920 return in_len; 2921 } 2922 2923 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2924 struct ib_device *ib_dev, 2925 const char __user *buf, int in_len, 2926 int out_len) 2927 { 2928 struct ib_uverbs_attach_mcast cmd; 2929 struct ib_qp *qp; 2930 struct ib_uqp_object *obj; 2931 struct ib_uverbs_mcast_entry *mcast; 2932 int ret; 2933 2934 if (copy_from_user(&cmd, buf, sizeof cmd)) 2935 return -EFAULT; 2936 2937 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2938 if (!qp) 2939 return -EINVAL; 2940 2941 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2942 2943 list_for_each_entry(mcast, &obj->mcast_list, list) 2944 if (cmd.mlid == mcast->lid && 2945 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2946 ret = 0; 2947 goto out_put; 2948 } 2949 2950 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 2951 if (!mcast) { 2952 ret = -ENOMEM; 2953 goto out_put; 2954 } 2955 2956 mcast->lid = cmd.mlid; 2957 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 2958 2959 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 2960 if (!ret) 2961 list_add_tail(&mcast->list, &obj->mcast_list); 2962 else 2963 kfree(mcast); 2964 2965 out_put: 2966 put_qp_write(qp); 2967 2968 return ret ? ret : in_len; 2969 } 2970 2971 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 2972 struct ib_device *ib_dev, 2973 const char __user *buf, int in_len, 2974 int out_len) 2975 { 2976 struct ib_uverbs_detach_mcast cmd; 2977 struct ib_uqp_object *obj; 2978 struct ib_qp *qp; 2979 struct ib_uverbs_mcast_entry *mcast; 2980 int ret = -EINVAL; 2981 2982 if (copy_from_user(&cmd, buf, sizeof cmd)) 2983 return -EFAULT; 2984 2985 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 2986 if (!qp) 2987 return -EINVAL; 2988 2989 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 2990 if (ret) 2991 goto out_put; 2992 2993 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 2994 2995 list_for_each_entry(mcast, &obj->mcast_list, list) 2996 if (cmd.mlid == mcast->lid && 2997 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 2998 list_del(&mcast->list); 2999 kfree(mcast); 3000 break; 3001 } 3002 3003 out_put: 3004 put_qp_write(qp); 3005 3006 return ret ? ret : in_len; 3007 } 3008 3009 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 3010 union ib_flow_spec *ib_spec) 3011 { 3012 if (kern_spec->reserved) 3013 return -EINVAL; 3014 3015 ib_spec->type = kern_spec->type; 3016 3017 switch (ib_spec->type) { 3018 case IB_FLOW_SPEC_ETH: 3019 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth); 3020 if (ib_spec->eth.size != kern_spec->eth.size) 3021 return -EINVAL; 3022 memcpy(&ib_spec->eth.val, &kern_spec->eth.val, 3023 sizeof(struct ib_flow_eth_filter)); 3024 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask, 3025 sizeof(struct ib_flow_eth_filter)); 3026 break; 3027 case IB_FLOW_SPEC_IPV4: 3028 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4); 3029 if (ib_spec->ipv4.size != kern_spec->ipv4.size) 3030 return -EINVAL; 3031 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val, 3032 sizeof(struct ib_flow_ipv4_filter)); 3033 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask, 3034 sizeof(struct ib_flow_ipv4_filter)); 3035 break; 3036 case IB_FLOW_SPEC_TCP: 3037 case IB_FLOW_SPEC_UDP: 3038 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp); 3039 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size) 3040 return -EINVAL; 3041 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val, 3042 sizeof(struct ib_flow_tcp_udp_filter)); 3043 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask, 3044 sizeof(struct ib_flow_tcp_udp_filter)); 3045 break; 3046 default: 3047 return -EINVAL; 3048 } 3049 return 0; 3050 } 3051 3052 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 3053 struct ib_device *ib_dev, 3054 struct ib_udata *ucore, 3055 struct ib_udata *uhw) 3056 { 3057 struct ib_uverbs_create_flow cmd; 3058 struct ib_uverbs_create_flow_resp resp; 3059 struct ib_uobject *uobj; 3060 struct ib_flow *flow_id; 3061 struct ib_uverbs_flow_attr *kern_flow_attr; 3062 struct ib_flow_attr *flow_attr; 3063 struct ib_qp *qp; 3064 int err = 0; 3065 void *kern_spec; 3066 void *ib_spec; 3067 int i; 3068 3069 if (ucore->inlen < sizeof(cmd)) 3070 return -EINVAL; 3071 3072 if (ucore->outlen < sizeof(resp)) 3073 return -ENOSPC; 3074 3075 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3076 if (err) 3077 return err; 3078 3079 ucore->inbuf += sizeof(cmd); 3080 ucore->inlen -= sizeof(cmd); 3081 3082 if (cmd.comp_mask) 3083 return -EINVAL; 3084 3085 if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER && 3086 !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW)) 3087 return -EPERM; 3088 3089 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 3090 return -EINVAL; 3091 3092 if (cmd.flow_attr.size > ucore->inlen || 3093 cmd.flow_attr.size > 3094 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 3095 return -EINVAL; 3096 3097 if (cmd.flow_attr.reserved[0] || 3098 cmd.flow_attr.reserved[1]) 3099 return -EINVAL; 3100 3101 if (cmd.flow_attr.num_of_specs) { 3102 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 3103 GFP_KERNEL); 3104 if (!kern_flow_attr) 3105 return -ENOMEM; 3106 3107 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 3108 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 3109 cmd.flow_attr.size); 3110 if (err) 3111 goto err_free_attr; 3112 } else { 3113 kern_flow_attr = &cmd.flow_attr; 3114 } 3115 3116 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 3117 if (!uobj) { 3118 err = -ENOMEM; 3119 goto err_free_attr; 3120 } 3121 init_uobj(uobj, 0, file->ucontext, &rule_lock_class); 3122 down_write(&uobj->mutex); 3123 3124 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 3125 if (!qp) { 3126 err = -EINVAL; 3127 goto err_uobj; 3128 } 3129 3130 flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL); 3131 if (!flow_attr) { 3132 err = -ENOMEM; 3133 goto err_put; 3134 } 3135 3136 flow_attr->type = kern_flow_attr->type; 3137 flow_attr->priority = kern_flow_attr->priority; 3138 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 3139 flow_attr->port = kern_flow_attr->port; 3140 flow_attr->flags = kern_flow_attr->flags; 3141 flow_attr->size = sizeof(*flow_attr); 3142 3143 kern_spec = kern_flow_attr + 1; 3144 ib_spec = flow_attr + 1; 3145 for (i = 0; i < flow_attr->num_of_specs && 3146 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3147 cmd.flow_attr.size >= 3148 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3149 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 3150 if (err) 3151 goto err_free; 3152 flow_attr->size += 3153 ((union ib_flow_spec *) ib_spec)->size; 3154 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3155 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3156 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 3157 } 3158 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3159 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 3160 i, cmd.flow_attr.size); 3161 err = -EINVAL; 3162 goto err_free; 3163 } 3164 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 3165 if (IS_ERR(flow_id)) { 3166 err = PTR_ERR(flow_id); 3167 goto err_free; 3168 } 3169 flow_id->qp = qp; 3170 flow_id->uobject = uobj; 3171 uobj->object = flow_id; 3172 3173 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj); 3174 if (err) 3175 goto destroy_flow; 3176 3177 memset(&resp, 0, sizeof(resp)); 3178 resp.flow_handle = uobj->id; 3179 3180 err = ib_copy_to_udata(ucore, 3181 &resp, sizeof(resp)); 3182 if (err) 3183 goto err_copy; 3184 3185 put_qp_read(qp); 3186 mutex_lock(&file->mutex); 3187 list_add_tail(&uobj->list, &file->ucontext->rule_list); 3188 mutex_unlock(&file->mutex); 3189 3190 uobj->live = 1; 3191 3192 up_write(&uobj->mutex); 3193 kfree(flow_attr); 3194 if (cmd.flow_attr.num_of_specs) 3195 kfree(kern_flow_attr); 3196 return 0; 3197 err_copy: 3198 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 3199 destroy_flow: 3200 ib_destroy_flow(flow_id); 3201 err_free: 3202 kfree(flow_attr); 3203 err_put: 3204 put_qp_read(qp); 3205 err_uobj: 3206 put_uobj_write(uobj); 3207 err_free_attr: 3208 if (cmd.flow_attr.num_of_specs) 3209 kfree(kern_flow_attr); 3210 return err; 3211 } 3212 3213 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3214 struct ib_device *ib_dev, 3215 struct ib_udata *ucore, 3216 struct ib_udata *uhw) 3217 { 3218 struct ib_uverbs_destroy_flow cmd; 3219 struct ib_flow *flow_id; 3220 struct ib_uobject *uobj; 3221 int ret; 3222 3223 if (ucore->inlen < sizeof(cmd)) 3224 return -EINVAL; 3225 3226 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3227 if (ret) 3228 return ret; 3229 3230 if (cmd.comp_mask) 3231 return -EINVAL; 3232 3233 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, 3234 file->ucontext); 3235 if (!uobj) 3236 return -EINVAL; 3237 flow_id = uobj->object; 3238 3239 ret = ib_destroy_flow(flow_id); 3240 if (!ret) 3241 uobj->live = 0; 3242 3243 put_uobj_write(uobj); 3244 3245 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 3246 3247 mutex_lock(&file->mutex); 3248 list_del(&uobj->list); 3249 mutex_unlock(&file->mutex); 3250 3251 put_uobj(uobj); 3252 3253 return ret; 3254 } 3255 3256 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3257 struct ib_device *ib_dev, 3258 struct ib_uverbs_create_xsrq *cmd, 3259 struct ib_udata *udata) 3260 { 3261 struct ib_uverbs_create_srq_resp resp; 3262 struct ib_usrq_object *obj; 3263 struct ib_pd *pd; 3264 struct ib_srq *srq; 3265 struct ib_uobject *uninitialized_var(xrcd_uobj); 3266 struct ib_srq_init_attr attr; 3267 int ret; 3268 3269 obj = kmalloc(sizeof *obj, GFP_KERNEL); 3270 if (!obj) 3271 return -ENOMEM; 3272 3273 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class); 3274 down_write(&obj->uevent.uobject.mutex); 3275 3276 if (cmd->srq_type == IB_SRQT_XRC) { 3277 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); 3278 if (!attr.ext.xrc.xrcd) { 3279 ret = -EINVAL; 3280 goto err; 3281 } 3282 3283 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3284 atomic_inc(&obj->uxrcd->refcnt); 3285 3286 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); 3287 if (!attr.ext.xrc.cq) { 3288 ret = -EINVAL; 3289 goto err_put_xrcd; 3290 } 3291 } 3292 3293 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 3294 if (!pd) { 3295 ret = -EINVAL; 3296 goto err_put_cq; 3297 } 3298 3299 attr.event_handler = ib_uverbs_srq_event_handler; 3300 attr.srq_context = file; 3301 attr.srq_type = cmd->srq_type; 3302 attr.attr.max_wr = cmd->max_wr; 3303 attr.attr.max_sge = cmd->max_sge; 3304 attr.attr.srq_limit = cmd->srq_limit; 3305 3306 obj->uevent.events_reported = 0; 3307 INIT_LIST_HEAD(&obj->uevent.event_list); 3308 3309 srq = pd->device->create_srq(pd, &attr, udata); 3310 if (IS_ERR(srq)) { 3311 ret = PTR_ERR(srq); 3312 goto err_put; 3313 } 3314 3315 srq->device = pd->device; 3316 srq->pd = pd; 3317 srq->srq_type = cmd->srq_type; 3318 srq->uobject = &obj->uevent.uobject; 3319 srq->event_handler = attr.event_handler; 3320 srq->srq_context = attr.srq_context; 3321 3322 if (cmd->srq_type == IB_SRQT_XRC) { 3323 srq->ext.xrc.cq = attr.ext.xrc.cq; 3324 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3325 atomic_inc(&attr.ext.xrc.cq->usecnt); 3326 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3327 } 3328 3329 atomic_inc(&pd->usecnt); 3330 atomic_set(&srq->usecnt, 0); 3331 3332 obj->uevent.uobject.object = srq; 3333 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3334 if (ret) 3335 goto err_destroy; 3336 3337 memset(&resp, 0, sizeof resp); 3338 resp.srq_handle = obj->uevent.uobject.id; 3339 resp.max_wr = attr.attr.max_wr; 3340 resp.max_sge = attr.attr.max_sge; 3341 if (cmd->srq_type == IB_SRQT_XRC) 3342 resp.srqn = srq->ext.xrc.srq_num; 3343 3344 if (copy_to_user((void __user *) (unsigned long) cmd->response, 3345 &resp, sizeof resp)) { 3346 ret = -EFAULT; 3347 goto err_copy; 3348 } 3349 3350 if (cmd->srq_type == IB_SRQT_XRC) { 3351 put_uobj_read(xrcd_uobj); 3352 put_cq_read(attr.ext.xrc.cq); 3353 } 3354 put_pd_read(pd); 3355 3356 mutex_lock(&file->mutex); 3357 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); 3358 mutex_unlock(&file->mutex); 3359 3360 obj->uevent.uobject.live = 1; 3361 3362 up_write(&obj->uevent.uobject.mutex); 3363 3364 return 0; 3365 3366 err_copy: 3367 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3368 3369 err_destroy: 3370 ib_destroy_srq(srq); 3371 3372 err_put: 3373 put_pd_read(pd); 3374 3375 err_put_cq: 3376 if (cmd->srq_type == IB_SRQT_XRC) 3377 put_cq_read(attr.ext.xrc.cq); 3378 3379 err_put_xrcd: 3380 if (cmd->srq_type == IB_SRQT_XRC) { 3381 atomic_dec(&obj->uxrcd->refcnt); 3382 put_uobj_read(xrcd_uobj); 3383 } 3384 3385 err: 3386 put_uobj_write(&obj->uevent.uobject); 3387 return ret; 3388 } 3389 3390 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3391 struct ib_device *ib_dev, 3392 const char __user *buf, int in_len, 3393 int out_len) 3394 { 3395 struct ib_uverbs_create_srq cmd; 3396 struct ib_uverbs_create_xsrq xcmd; 3397 struct ib_uverbs_create_srq_resp resp; 3398 struct ib_udata udata; 3399 int ret; 3400 3401 if (out_len < sizeof resp) 3402 return -ENOSPC; 3403 3404 if (copy_from_user(&cmd, buf, sizeof cmd)) 3405 return -EFAULT; 3406 3407 xcmd.response = cmd.response; 3408 xcmd.user_handle = cmd.user_handle; 3409 xcmd.srq_type = IB_SRQT_BASIC; 3410 xcmd.pd_handle = cmd.pd_handle; 3411 xcmd.max_wr = cmd.max_wr; 3412 xcmd.max_sge = cmd.max_sge; 3413 xcmd.srq_limit = cmd.srq_limit; 3414 3415 INIT_UDATA(&udata, buf + sizeof cmd, 3416 (unsigned long) cmd.response + sizeof resp, 3417 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 3418 out_len - sizeof resp); 3419 3420 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 3421 if (ret) 3422 return ret; 3423 3424 return in_len; 3425 } 3426 3427 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 3428 struct ib_device *ib_dev, 3429 const char __user *buf, int in_len, int out_len) 3430 { 3431 struct ib_uverbs_create_xsrq cmd; 3432 struct ib_uverbs_create_srq_resp resp; 3433 struct ib_udata udata; 3434 int ret; 3435 3436 if (out_len < sizeof resp) 3437 return -ENOSPC; 3438 3439 if (copy_from_user(&cmd, buf, sizeof cmd)) 3440 return -EFAULT; 3441 3442 INIT_UDATA(&udata, buf + sizeof cmd, 3443 (unsigned long) cmd.response + sizeof resp, 3444 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 3445 out_len - sizeof resp); 3446 3447 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 3448 if (ret) 3449 return ret; 3450 3451 return in_len; 3452 } 3453 3454 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3455 struct ib_device *ib_dev, 3456 const char __user *buf, int in_len, 3457 int out_len) 3458 { 3459 struct ib_uverbs_modify_srq cmd; 3460 struct ib_udata udata; 3461 struct ib_srq *srq; 3462 struct ib_srq_attr attr; 3463 int ret; 3464 3465 if (copy_from_user(&cmd, buf, sizeof cmd)) 3466 return -EFAULT; 3467 3468 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3469 out_len); 3470 3471 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 3472 if (!srq) 3473 return -EINVAL; 3474 3475 attr.max_wr = cmd.max_wr; 3476 attr.srq_limit = cmd.srq_limit; 3477 3478 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 3479 3480 put_srq_read(srq); 3481 3482 return ret ? ret : in_len; 3483 } 3484 3485 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 3486 struct ib_device *ib_dev, 3487 const char __user *buf, 3488 int in_len, int out_len) 3489 { 3490 struct ib_uverbs_query_srq cmd; 3491 struct ib_uverbs_query_srq_resp resp; 3492 struct ib_srq_attr attr; 3493 struct ib_srq *srq; 3494 int ret; 3495 3496 if (out_len < sizeof resp) 3497 return -ENOSPC; 3498 3499 if (copy_from_user(&cmd, buf, sizeof cmd)) 3500 return -EFAULT; 3501 3502 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 3503 if (!srq) 3504 return -EINVAL; 3505 3506 ret = ib_query_srq(srq, &attr); 3507 3508 put_srq_read(srq); 3509 3510 if (ret) 3511 return ret; 3512 3513 memset(&resp, 0, sizeof resp); 3514 3515 resp.max_wr = attr.max_wr; 3516 resp.max_sge = attr.max_sge; 3517 resp.srq_limit = attr.srq_limit; 3518 3519 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3520 &resp, sizeof resp)) 3521 return -EFAULT; 3522 3523 return in_len; 3524 } 3525 3526 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 3527 struct ib_device *ib_dev, 3528 const char __user *buf, int in_len, 3529 int out_len) 3530 { 3531 struct ib_uverbs_destroy_srq cmd; 3532 struct ib_uverbs_destroy_srq_resp resp; 3533 struct ib_uobject *uobj; 3534 struct ib_srq *srq; 3535 struct ib_uevent_object *obj; 3536 int ret = -EINVAL; 3537 struct ib_usrq_object *us; 3538 enum ib_srq_type srq_type; 3539 3540 if (copy_from_user(&cmd, buf, sizeof cmd)) 3541 return -EFAULT; 3542 3543 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 3544 if (!uobj) 3545 return -EINVAL; 3546 srq = uobj->object; 3547 obj = container_of(uobj, struct ib_uevent_object, uobject); 3548 srq_type = srq->srq_type; 3549 3550 ret = ib_destroy_srq(srq); 3551 if (!ret) 3552 uobj->live = 0; 3553 3554 put_uobj_write(uobj); 3555 3556 if (ret) 3557 return ret; 3558 3559 if (srq_type == IB_SRQT_XRC) { 3560 us = container_of(obj, struct ib_usrq_object, uevent); 3561 atomic_dec(&us->uxrcd->refcnt); 3562 } 3563 3564 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 3565 3566 mutex_lock(&file->mutex); 3567 list_del(&uobj->list); 3568 mutex_unlock(&file->mutex); 3569 3570 ib_uverbs_release_uevent(file, obj); 3571 3572 memset(&resp, 0, sizeof resp); 3573 resp.events_reported = obj->events_reported; 3574 3575 put_uobj(uobj); 3576 3577 if (copy_to_user((void __user *) (unsigned long) cmd.response, 3578 &resp, sizeof resp)) 3579 ret = -EFAULT; 3580 3581 return ret ? ret : in_len; 3582 } 3583 3584 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 3585 struct ib_device *ib_dev, 3586 struct ib_udata *ucore, 3587 struct ib_udata *uhw) 3588 { 3589 struct ib_uverbs_ex_query_device_resp resp; 3590 struct ib_uverbs_ex_query_device cmd; 3591 struct ib_device_attr attr; 3592 int err; 3593 3594 if (ucore->inlen < sizeof(cmd)) 3595 return -EINVAL; 3596 3597 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3598 if (err) 3599 return err; 3600 3601 if (cmd.comp_mask) 3602 return -EINVAL; 3603 3604 if (cmd.reserved) 3605 return -EINVAL; 3606 3607 resp.response_length = offsetof(typeof(resp), odp_caps); 3608 3609 if (ucore->outlen < resp.response_length) 3610 return -ENOSPC; 3611 3612 memset(&attr, 0, sizeof(attr)); 3613 3614 err = ib_dev->query_device(ib_dev, &attr, uhw); 3615 if (err) 3616 return err; 3617 3618 copy_query_dev_fields(file, ib_dev, &resp.base, &attr); 3619 resp.comp_mask = 0; 3620 3621 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 3622 goto end; 3623 3624 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 3625 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 3626 resp.odp_caps.per_transport_caps.rc_odp_caps = 3627 attr.odp_caps.per_transport_caps.rc_odp_caps; 3628 resp.odp_caps.per_transport_caps.uc_odp_caps = 3629 attr.odp_caps.per_transport_caps.uc_odp_caps; 3630 resp.odp_caps.per_transport_caps.ud_odp_caps = 3631 attr.odp_caps.per_transport_caps.ud_odp_caps; 3632 resp.odp_caps.reserved = 0; 3633 #else 3634 memset(&resp.odp_caps, 0, sizeof(resp.odp_caps)); 3635 #endif 3636 resp.response_length += sizeof(resp.odp_caps); 3637 3638 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 3639 goto end; 3640 3641 resp.timestamp_mask = attr.timestamp_mask; 3642 resp.response_length += sizeof(resp.timestamp_mask); 3643 3644 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 3645 goto end; 3646 3647 resp.hca_core_clock = attr.hca_core_clock; 3648 resp.response_length += sizeof(resp.hca_core_clock); 3649 3650 end: 3651 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 3652 if (err) 3653 return err; 3654 3655 return 0; 3656 } 3657