1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/file.h> 37 #include <linux/fs.h> 38 #include <linux/slab.h> 39 #include <linux/sched.h> 40 41 #include <asm/uaccess.h> 42 43 #include "uverbs.h" 44 #include "core_priv.h" 45 46 struct uverbs_lock_class { 47 struct lock_class_key key; 48 char name[16]; 49 }; 50 51 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; 52 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; 53 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" }; 54 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; 55 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; 56 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 57 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 58 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 59 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; 60 static struct uverbs_lock_class wq_lock_class = { .name = "WQ-uobj" }; 61 static struct uverbs_lock_class rwq_ind_table_lock_class = { .name = "IND_TBL-uobj" }; 62 63 /* 64 * The ib_uobject locking scheme is as follows: 65 * 66 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 67 * needs to be held during all idr write operations. When an object is 68 * looked up, a reference must be taken on the object's kref before 69 * dropping this lock. For read operations, the rcu_read_lock() 70 * and rcu_write_lock() but similarly the kref reference is grabbed 71 * before the rcu_read_unlock(). 72 * 73 * - Each object also has an rwsem. This rwsem must be held for 74 * reading while an operation that uses the object is performed. 75 * For example, while registering an MR, the associated PD's 76 * uobject.mutex must be held for reading. The rwsem must be held 77 * for writing while initializing or destroying an object. 78 * 79 * - In addition, each object has a "live" flag. If this flag is not 80 * set, then lookups of the object will fail even if it is found in 81 * the idr. This handles a reader that blocks and does not acquire 82 * the rwsem until after the object is destroyed. The destroy 83 * operation will set the live flag to 0 and then drop the rwsem; 84 * this will allow the reader to acquire the rwsem, see that the 85 * live flag is 0, and then drop the rwsem and its reference to 86 * object. The underlying storage will not be freed until the last 87 * reference to the object is dropped. 88 */ 89 90 static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 91 struct ib_ucontext *context, struct uverbs_lock_class *c) 92 { 93 uobj->user_handle = user_handle; 94 uobj->context = context; 95 kref_init(&uobj->ref); 96 init_rwsem(&uobj->mutex); 97 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name); 98 uobj->live = 0; 99 } 100 101 static void release_uobj(struct kref *kref) 102 { 103 kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu); 104 } 105 106 static void put_uobj(struct ib_uobject *uobj) 107 { 108 kref_put(&uobj->ref, release_uobj); 109 } 110 111 static void put_uobj_read(struct ib_uobject *uobj) 112 { 113 up_read(&uobj->mutex); 114 put_uobj(uobj); 115 } 116 117 static void put_uobj_write(struct ib_uobject *uobj) 118 { 119 up_write(&uobj->mutex); 120 put_uobj(uobj); 121 } 122 123 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 124 { 125 int ret; 126 127 idr_preload(GFP_KERNEL); 128 spin_lock(&ib_uverbs_idr_lock); 129 130 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT); 131 if (ret >= 0) 132 uobj->id = ret; 133 134 spin_unlock(&ib_uverbs_idr_lock); 135 idr_preload_end(); 136 137 return ret < 0 ? ret : 0; 138 } 139 140 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 141 { 142 spin_lock(&ib_uverbs_idr_lock); 143 idr_remove(idr, uobj->id); 144 spin_unlock(&ib_uverbs_idr_lock); 145 } 146 147 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 148 struct ib_ucontext *context) 149 { 150 struct ib_uobject *uobj; 151 152 rcu_read_lock(); 153 uobj = idr_find(idr, id); 154 if (uobj) { 155 if (uobj->context == context) 156 kref_get(&uobj->ref); 157 else 158 uobj = NULL; 159 } 160 rcu_read_unlock(); 161 162 return uobj; 163 } 164 165 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 166 struct ib_ucontext *context, int nested) 167 { 168 struct ib_uobject *uobj; 169 170 uobj = __idr_get_uobj(idr, id, context); 171 if (!uobj) 172 return NULL; 173 174 if (nested) 175 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 176 else 177 down_read(&uobj->mutex); 178 if (!uobj->live) { 179 put_uobj_read(uobj); 180 return NULL; 181 } 182 183 return uobj; 184 } 185 186 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 187 struct ib_ucontext *context) 188 { 189 struct ib_uobject *uobj; 190 191 uobj = __idr_get_uobj(idr, id, context); 192 if (!uobj) 193 return NULL; 194 195 down_write(&uobj->mutex); 196 if (!uobj->live) { 197 put_uobj_write(uobj); 198 return NULL; 199 } 200 201 return uobj; 202 } 203 204 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 205 int nested) 206 { 207 struct ib_uobject *uobj; 208 209 uobj = idr_read_uobj(idr, id, context, nested); 210 return uobj ? uobj->object : NULL; 211 } 212 213 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 214 { 215 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 216 } 217 218 static void put_pd_read(struct ib_pd *pd) 219 { 220 put_uobj_read(pd->uobject); 221 } 222 223 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 224 { 225 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 226 } 227 228 static void put_cq_read(struct ib_cq *cq) 229 { 230 put_uobj_read(cq->uobject); 231 } 232 233 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 234 { 235 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 236 } 237 238 static void put_ah_read(struct ib_ah *ah) 239 { 240 put_uobj_read(ah->uobject); 241 } 242 243 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 244 { 245 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 246 } 247 248 static struct ib_wq *idr_read_wq(int wq_handle, struct ib_ucontext *context) 249 { 250 return idr_read_obj(&ib_uverbs_wq_idr, wq_handle, context, 0); 251 } 252 253 static void put_wq_read(struct ib_wq *wq) 254 { 255 put_uobj_read(wq->uobject); 256 } 257 258 static struct ib_rwq_ind_table *idr_read_rwq_indirection_table(int ind_table_handle, 259 struct ib_ucontext *context) 260 { 261 return idr_read_obj(&ib_uverbs_rwq_ind_tbl_idr, ind_table_handle, context, 0); 262 } 263 264 static void put_rwq_indirection_table_read(struct ib_rwq_ind_table *ind_table) 265 { 266 put_uobj_read(ind_table->uobject); 267 } 268 269 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) 270 { 271 struct ib_uobject *uobj; 272 273 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context); 274 return uobj ? uobj->object : NULL; 275 } 276 277 static void put_qp_read(struct ib_qp *qp) 278 { 279 put_uobj_read(qp->uobject); 280 } 281 282 static void put_qp_write(struct ib_qp *qp) 283 { 284 put_uobj_write(qp->uobject); 285 } 286 287 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 288 { 289 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 290 } 291 292 static void put_srq_read(struct ib_srq *srq) 293 { 294 put_uobj_read(srq->uobject); 295 } 296 297 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, 298 struct ib_uobject **uobj) 299 { 300 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); 301 return *uobj ? (*uobj)->object : NULL; 302 } 303 304 static void put_xrcd_read(struct ib_uobject *uobj) 305 { 306 put_uobj_read(uobj); 307 } 308 309 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 310 struct ib_device *ib_dev, 311 const char __user *buf, 312 int in_len, int out_len) 313 { 314 struct ib_uverbs_get_context cmd; 315 struct ib_uverbs_get_context_resp resp; 316 struct ib_udata udata; 317 struct ib_ucontext *ucontext; 318 struct file *filp; 319 int ret; 320 321 if (out_len < sizeof resp) 322 return -ENOSPC; 323 324 if (copy_from_user(&cmd, buf, sizeof cmd)) 325 return -EFAULT; 326 327 mutex_lock(&file->mutex); 328 329 if (file->ucontext) { 330 ret = -EINVAL; 331 goto err; 332 } 333 334 INIT_UDATA(&udata, buf + sizeof cmd, 335 (unsigned long) cmd.response + sizeof resp, 336 in_len - sizeof cmd, out_len - sizeof resp); 337 338 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); 339 if (IS_ERR(ucontext)) { 340 ret = PTR_ERR(ucontext); 341 goto err; 342 } 343 344 ucontext->device = ib_dev; 345 INIT_LIST_HEAD(&ucontext->pd_list); 346 INIT_LIST_HEAD(&ucontext->mr_list); 347 INIT_LIST_HEAD(&ucontext->mw_list); 348 INIT_LIST_HEAD(&ucontext->cq_list); 349 INIT_LIST_HEAD(&ucontext->qp_list); 350 INIT_LIST_HEAD(&ucontext->srq_list); 351 INIT_LIST_HEAD(&ucontext->ah_list); 352 INIT_LIST_HEAD(&ucontext->wq_list); 353 INIT_LIST_HEAD(&ucontext->rwq_ind_tbl_list); 354 INIT_LIST_HEAD(&ucontext->xrcd_list); 355 INIT_LIST_HEAD(&ucontext->rule_list); 356 rcu_read_lock(); 357 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 358 rcu_read_unlock(); 359 ucontext->closing = 0; 360 361 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 362 ucontext->umem_tree = RB_ROOT; 363 init_rwsem(&ucontext->umem_rwsem); 364 ucontext->odp_mrs_count = 0; 365 INIT_LIST_HEAD(&ucontext->no_private_counters); 366 367 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 368 ucontext->invalidate_range = NULL; 369 370 #endif 371 372 resp.num_comp_vectors = file->device->num_comp_vectors; 373 374 ret = get_unused_fd_flags(O_CLOEXEC); 375 if (ret < 0) 376 goto err_free; 377 resp.async_fd = ret; 378 379 filp = ib_uverbs_alloc_event_file(file, ib_dev, 1); 380 if (IS_ERR(filp)) { 381 ret = PTR_ERR(filp); 382 goto err_fd; 383 } 384 385 if (copy_to_user((void __user *) (unsigned long) cmd.response, 386 &resp, sizeof resp)) { 387 ret = -EFAULT; 388 goto err_file; 389 } 390 391 file->ucontext = ucontext; 392 393 fd_install(resp.async_fd, filp); 394 395 mutex_unlock(&file->mutex); 396 397 return in_len; 398 399 err_file: 400 ib_uverbs_free_async_event_file(file); 401 fput(filp); 402 403 err_fd: 404 put_unused_fd(resp.async_fd); 405 406 err_free: 407 put_pid(ucontext->tgid); 408 ib_dev->dealloc_ucontext(ucontext); 409 410 err: 411 mutex_unlock(&file->mutex); 412 return ret; 413 } 414 415 static void copy_query_dev_fields(struct ib_uverbs_file *file, 416 struct ib_device *ib_dev, 417 struct ib_uverbs_query_device_resp *resp, 418 struct ib_device_attr *attr) 419 { 420 resp->fw_ver = attr->fw_ver; 421 resp->node_guid = ib_dev->node_guid; 422 resp->sys_image_guid = attr->sys_image_guid; 423 resp->max_mr_size = attr->max_mr_size; 424 resp->page_size_cap = attr->page_size_cap; 425 resp->vendor_id = attr->vendor_id; 426 resp->vendor_part_id = attr->vendor_part_id; 427 resp->hw_ver = attr->hw_ver; 428 resp->max_qp = attr->max_qp; 429 resp->max_qp_wr = attr->max_qp_wr; 430 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags); 431 resp->max_sge = attr->max_sge; 432 resp->max_sge_rd = attr->max_sge_rd; 433 resp->max_cq = attr->max_cq; 434 resp->max_cqe = attr->max_cqe; 435 resp->max_mr = attr->max_mr; 436 resp->max_pd = attr->max_pd; 437 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 438 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 439 resp->max_res_rd_atom = attr->max_res_rd_atom; 440 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 441 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 442 resp->atomic_cap = attr->atomic_cap; 443 resp->max_ee = attr->max_ee; 444 resp->max_rdd = attr->max_rdd; 445 resp->max_mw = attr->max_mw; 446 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 447 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 448 resp->max_mcast_grp = attr->max_mcast_grp; 449 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 450 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 451 resp->max_ah = attr->max_ah; 452 resp->max_fmr = attr->max_fmr; 453 resp->max_map_per_fmr = attr->max_map_per_fmr; 454 resp->max_srq = attr->max_srq; 455 resp->max_srq_wr = attr->max_srq_wr; 456 resp->max_srq_sge = attr->max_srq_sge; 457 resp->max_pkeys = attr->max_pkeys; 458 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 459 resp->phys_port_cnt = ib_dev->phys_port_cnt; 460 } 461 462 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 463 struct ib_device *ib_dev, 464 const char __user *buf, 465 int in_len, int out_len) 466 { 467 struct ib_uverbs_query_device cmd; 468 struct ib_uverbs_query_device_resp resp; 469 470 if (out_len < sizeof resp) 471 return -ENOSPC; 472 473 if (copy_from_user(&cmd, buf, sizeof cmd)) 474 return -EFAULT; 475 476 memset(&resp, 0, sizeof resp); 477 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs); 478 479 if (copy_to_user((void __user *) (unsigned long) cmd.response, 480 &resp, sizeof resp)) 481 return -EFAULT; 482 483 return in_len; 484 } 485 486 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 487 struct ib_device *ib_dev, 488 const char __user *buf, 489 int in_len, int out_len) 490 { 491 struct ib_uverbs_query_port cmd; 492 struct ib_uverbs_query_port_resp resp; 493 struct ib_port_attr attr; 494 int ret; 495 496 if (out_len < sizeof resp) 497 return -ENOSPC; 498 499 if (copy_from_user(&cmd, buf, sizeof cmd)) 500 return -EFAULT; 501 502 ret = ib_query_port(ib_dev, cmd.port_num, &attr); 503 if (ret) 504 return ret; 505 506 memset(&resp, 0, sizeof resp); 507 508 resp.state = attr.state; 509 resp.max_mtu = attr.max_mtu; 510 resp.active_mtu = attr.active_mtu; 511 resp.gid_tbl_len = attr.gid_tbl_len; 512 resp.port_cap_flags = attr.port_cap_flags; 513 resp.max_msg_sz = attr.max_msg_sz; 514 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 515 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 516 resp.pkey_tbl_len = attr.pkey_tbl_len; 517 resp.lid = attr.lid; 518 resp.sm_lid = attr.sm_lid; 519 resp.lmc = attr.lmc; 520 resp.max_vl_num = attr.max_vl_num; 521 resp.sm_sl = attr.sm_sl; 522 resp.subnet_timeout = attr.subnet_timeout; 523 resp.init_type_reply = attr.init_type_reply; 524 resp.active_width = attr.active_width; 525 resp.active_speed = attr.active_speed; 526 resp.phys_state = attr.phys_state; 527 resp.link_layer = rdma_port_get_link_layer(ib_dev, 528 cmd.port_num); 529 530 if (copy_to_user((void __user *) (unsigned long) cmd.response, 531 &resp, sizeof resp)) 532 return -EFAULT; 533 534 return in_len; 535 } 536 537 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 538 struct ib_device *ib_dev, 539 const char __user *buf, 540 int in_len, int out_len) 541 { 542 struct ib_uverbs_alloc_pd cmd; 543 struct ib_uverbs_alloc_pd_resp resp; 544 struct ib_udata udata; 545 struct ib_uobject *uobj; 546 struct ib_pd *pd; 547 int ret; 548 549 if (out_len < sizeof resp) 550 return -ENOSPC; 551 552 if (copy_from_user(&cmd, buf, sizeof cmd)) 553 return -EFAULT; 554 555 INIT_UDATA(&udata, buf + sizeof cmd, 556 (unsigned long) cmd.response + sizeof resp, 557 in_len - sizeof cmd, out_len - sizeof resp); 558 559 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 560 if (!uobj) 561 return -ENOMEM; 562 563 init_uobj(uobj, 0, file->ucontext, &pd_lock_class); 564 down_write(&uobj->mutex); 565 566 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); 567 if (IS_ERR(pd)) { 568 ret = PTR_ERR(pd); 569 goto err; 570 } 571 572 pd->device = ib_dev; 573 pd->uobject = uobj; 574 pd->local_mr = NULL; 575 atomic_set(&pd->usecnt, 0); 576 577 uobj->object = pd; 578 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 579 if (ret) 580 goto err_idr; 581 582 memset(&resp, 0, sizeof resp); 583 resp.pd_handle = uobj->id; 584 585 if (copy_to_user((void __user *) (unsigned long) cmd.response, 586 &resp, sizeof resp)) { 587 ret = -EFAULT; 588 goto err_copy; 589 } 590 591 mutex_lock(&file->mutex); 592 list_add_tail(&uobj->list, &file->ucontext->pd_list); 593 mutex_unlock(&file->mutex); 594 595 uobj->live = 1; 596 597 up_write(&uobj->mutex); 598 599 return in_len; 600 601 err_copy: 602 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 603 604 err_idr: 605 ib_dealloc_pd(pd); 606 607 err: 608 put_uobj_write(uobj); 609 return ret; 610 } 611 612 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 613 struct ib_device *ib_dev, 614 const char __user *buf, 615 int in_len, int out_len) 616 { 617 struct ib_uverbs_dealloc_pd cmd; 618 struct ib_uobject *uobj; 619 struct ib_pd *pd; 620 int ret; 621 622 if (copy_from_user(&cmd, buf, sizeof cmd)) 623 return -EFAULT; 624 625 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 626 if (!uobj) 627 return -EINVAL; 628 pd = uobj->object; 629 630 if (atomic_read(&pd->usecnt)) { 631 ret = -EBUSY; 632 goto err_put; 633 } 634 635 ret = pd->device->dealloc_pd(uobj->object); 636 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); 637 if (ret) 638 goto err_put; 639 640 uobj->live = 0; 641 put_uobj_write(uobj); 642 643 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 644 645 mutex_lock(&file->mutex); 646 list_del(&uobj->list); 647 mutex_unlock(&file->mutex); 648 649 put_uobj(uobj); 650 651 return in_len; 652 653 err_put: 654 put_uobj_write(uobj); 655 return ret; 656 } 657 658 struct xrcd_table_entry { 659 struct rb_node node; 660 struct ib_xrcd *xrcd; 661 struct inode *inode; 662 }; 663 664 static int xrcd_table_insert(struct ib_uverbs_device *dev, 665 struct inode *inode, 666 struct ib_xrcd *xrcd) 667 { 668 struct xrcd_table_entry *entry, *scan; 669 struct rb_node **p = &dev->xrcd_tree.rb_node; 670 struct rb_node *parent = NULL; 671 672 entry = kmalloc(sizeof *entry, GFP_KERNEL); 673 if (!entry) 674 return -ENOMEM; 675 676 entry->xrcd = xrcd; 677 entry->inode = inode; 678 679 while (*p) { 680 parent = *p; 681 scan = rb_entry(parent, struct xrcd_table_entry, node); 682 683 if (inode < scan->inode) { 684 p = &(*p)->rb_left; 685 } else if (inode > scan->inode) { 686 p = &(*p)->rb_right; 687 } else { 688 kfree(entry); 689 return -EEXIST; 690 } 691 } 692 693 rb_link_node(&entry->node, parent, p); 694 rb_insert_color(&entry->node, &dev->xrcd_tree); 695 igrab(inode); 696 return 0; 697 } 698 699 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 700 struct inode *inode) 701 { 702 struct xrcd_table_entry *entry; 703 struct rb_node *p = dev->xrcd_tree.rb_node; 704 705 while (p) { 706 entry = rb_entry(p, struct xrcd_table_entry, node); 707 708 if (inode < entry->inode) 709 p = p->rb_left; 710 else if (inode > entry->inode) 711 p = p->rb_right; 712 else 713 return entry; 714 } 715 716 return NULL; 717 } 718 719 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 720 { 721 struct xrcd_table_entry *entry; 722 723 entry = xrcd_table_search(dev, inode); 724 if (!entry) 725 return NULL; 726 727 return entry->xrcd; 728 } 729 730 static void xrcd_table_delete(struct ib_uverbs_device *dev, 731 struct inode *inode) 732 { 733 struct xrcd_table_entry *entry; 734 735 entry = xrcd_table_search(dev, inode); 736 if (entry) { 737 iput(inode); 738 rb_erase(&entry->node, &dev->xrcd_tree); 739 kfree(entry); 740 } 741 } 742 743 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 744 struct ib_device *ib_dev, 745 const char __user *buf, int in_len, 746 int out_len) 747 { 748 struct ib_uverbs_open_xrcd cmd; 749 struct ib_uverbs_open_xrcd_resp resp; 750 struct ib_udata udata; 751 struct ib_uxrcd_object *obj; 752 struct ib_xrcd *xrcd = NULL; 753 struct fd f = {NULL, 0}; 754 struct inode *inode = NULL; 755 int ret = 0; 756 int new_xrcd = 0; 757 758 if (out_len < sizeof resp) 759 return -ENOSPC; 760 761 if (copy_from_user(&cmd, buf, sizeof cmd)) 762 return -EFAULT; 763 764 INIT_UDATA(&udata, buf + sizeof cmd, 765 (unsigned long) cmd.response + sizeof resp, 766 in_len - sizeof cmd, out_len - sizeof resp); 767 768 mutex_lock(&file->device->xrcd_tree_mutex); 769 770 if (cmd.fd != -1) { 771 /* search for file descriptor */ 772 f = fdget(cmd.fd); 773 if (!f.file) { 774 ret = -EBADF; 775 goto err_tree_mutex_unlock; 776 } 777 778 inode = file_inode(f.file); 779 xrcd = find_xrcd(file->device, inode); 780 if (!xrcd && !(cmd.oflags & O_CREAT)) { 781 /* no file descriptor. Need CREATE flag */ 782 ret = -EAGAIN; 783 goto err_tree_mutex_unlock; 784 } 785 786 if (xrcd && cmd.oflags & O_EXCL) { 787 ret = -EINVAL; 788 goto err_tree_mutex_unlock; 789 } 790 } 791 792 obj = kmalloc(sizeof *obj, GFP_KERNEL); 793 if (!obj) { 794 ret = -ENOMEM; 795 goto err_tree_mutex_unlock; 796 } 797 798 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class); 799 800 down_write(&obj->uobject.mutex); 801 802 if (!xrcd) { 803 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata); 804 if (IS_ERR(xrcd)) { 805 ret = PTR_ERR(xrcd); 806 goto err; 807 } 808 809 xrcd->inode = inode; 810 xrcd->device = ib_dev; 811 atomic_set(&xrcd->usecnt, 0); 812 mutex_init(&xrcd->tgt_qp_mutex); 813 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 814 new_xrcd = 1; 815 } 816 817 atomic_set(&obj->refcnt, 0); 818 obj->uobject.object = xrcd; 819 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 820 if (ret) 821 goto err_idr; 822 823 memset(&resp, 0, sizeof resp); 824 resp.xrcd_handle = obj->uobject.id; 825 826 if (inode) { 827 if (new_xrcd) { 828 /* create new inode/xrcd table entry */ 829 ret = xrcd_table_insert(file->device, inode, xrcd); 830 if (ret) 831 goto err_insert_xrcd; 832 } 833 atomic_inc(&xrcd->usecnt); 834 } 835 836 if (copy_to_user((void __user *) (unsigned long) cmd.response, 837 &resp, sizeof resp)) { 838 ret = -EFAULT; 839 goto err_copy; 840 } 841 842 if (f.file) 843 fdput(f); 844 845 mutex_lock(&file->mutex); 846 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); 847 mutex_unlock(&file->mutex); 848 849 obj->uobject.live = 1; 850 up_write(&obj->uobject.mutex); 851 852 mutex_unlock(&file->device->xrcd_tree_mutex); 853 return in_len; 854 855 err_copy: 856 if (inode) { 857 if (new_xrcd) 858 xrcd_table_delete(file->device, inode); 859 atomic_dec(&xrcd->usecnt); 860 } 861 862 err_insert_xrcd: 863 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 864 865 err_idr: 866 ib_dealloc_xrcd(xrcd); 867 868 err: 869 put_uobj_write(&obj->uobject); 870 871 err_tree_mutex_unlock: 872 if (f.file) 873 fdput(f); 874 875 mutex_unlock(&file->device->xrcd_tree_mutex); 876 877 return ret; 878 } 879 880 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 881 struct ib_device *ib_dev, 882 const char __user *buf, int in_len, 883 int out_len) 884 { 885 struct ib_uverbs_close_xrcd cmd; 886 struct ib_uobject *uobj; 887 struct ib_xrcd *xrcd = NULL; 888 struct inode *inode = NULL; 889 struct ib_uxrcd_object *obj; 890 int live; 891 int ret = 0; 892 893 if (copy_from_user(&cmd, buf, sizeof cmd)) 894 return -EFAULT; 895 896 mutex_lock(&file->device->xrcd_tree_mutex); 897 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); 898 if (!uobj) { 899 ret = -EINVAL; 900 goto out; 901 } 902 903 xrcd = uobj->object; 904 inode = xrcd->inode; 905 obj = container_of(uobj, struct ib_uxrcd_object, uobject); 906 if (atomic_read(&obj->refcnt)) { 907 put_uobj_write(uobj); 908 ret = -EBUSY; 909 goto out; 910 } 911 912 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { 913 ret = ib_dealloc_xrcd(uobj->object); 914 if (!ret) 915 uobj->live = 0; 916 } 917 918 live = uobj->live; 919 if (inode && ret) 920 atomic_inc(&xrcd->usecnt); 921 922 put_uobj_write(uobj); 923 924 if (ret) 925 goto out; 926 927 if (inode && !live) 928 xrcd_table_delete(file->device, inode); 929 930 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); 931 mutex_lock(&file->mutex); 932 list_del(&uobj->list); 933 mutex_unlock(&file->mutex); 934 935 put_uobj(uobj); 936 ret = in_len; 937 938 out: 939 mutex_unlock(&file->device->xrcd_tree_mutex); 940 return ret; 941 } 942 943 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 944 struct ib_xrcd *xrcd) 945 { 946 struct inode *inode; 947 948 inode = xrcd->inode; 949 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 950 return; 951 952 ib_dealloc_xrcd(xrcd); 953 954 if (inode) 955 xrcd_table_delete(dev, inode); 956 } 957 958 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 959 struct ib_device *ib_dev, 960 const char __user *buf, int in_len, 961 int out_len) 962 { 963 struct ib_uverbs_reg_mr cmd; 964 struct ib_uverbs_reg_mr_resp resp; 965 struct ib_udata udata; 966 struct ib_uobject *uobj; 967 struct ib_pd *pd; 968 struct ib_mr *mr; 969 int ret; 970 971 if (out_len < sizeof resp) 972 return -ENOSPC; 973 974 if (copy_from_user(&cmd, buf, sizeof cmd)) 975 return -EFAULT; 976 977 INIT_UDATA(&udata, buf + sizeof cmd, 978 (unsigned long) cmd.response + sizeof resp, 979 in_len - sizeof cmd, out_len - sizeof resp); 980 981 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 982 return -EINVAL; 983 984 ret = ib_check_mr_access(cmd.access_flags); 985 if (ret) 986 return ret; 987 988 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 989 if (!uobj) 990 return -ENOMEM; 991 992 init_uobj(uobj, 0, file->ucontext, &mr_lock_class); 993 down_write(&uobj->mutex); 994 995 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 996 if (!pd) { 997 ret = -EINVAL; 998 goto err_free; 999 } 1000 1001 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 1002 if (!(pd->device->attrs.device_cap_flags & 1003 IB_DEVICE_ON_DEMAND_PAGING)) { 1004 pr_debug("ODP support not available\n"); 1005 ret = -EINVAL; 1006 goto err_put; 1007 } 1008 } 1009 1010 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 1011 cmd.access_flags, &udata); 1012 if (IS_ERR(mr)) { 1013 ret = PTR_ERR(mr); 1014 goto err_put; 1015 } 1016 1017 mr->device = pd->device; 1018 mr->pd = pd; 1019 mr->uobject = uobj; 1020 atomic_inc(&pd->usecnt); 1021 1022 uobj->object = mr; 1023 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 1024 if (ret) 1025 goto err_unreg; 1026 1027 memset(&resp, 0, sizeof resp); 1028 resp.lkey = mr->lkey; 1029 resp.rkey = mr->rkey; 1030 resp.mr_handle = uobj->id; 1031 1032 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1033 &resp, sizeof resp)) { 1034 ret = -EFAULT; 1035 goto err_copy; 1036 } 1037 1038 put_pd_read(pd); 1039 1040 mutex_lock(&file->mutex); 1041 list_add_tail(&uobj->list, &file->ucontext->mr_list); 1042 mutex_unlock(&file->mutex); 1043 1044 uobj->live = 1; 1045 1046 up_write(&uobj->mutex); 1047 1048 return in_len; 1049 1050 err_copy: 1051 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1052 1053 err_unreg: 1054 ib_dereg_mr(mr); 1055 1056 err_put: 1057 put_pd_read(pd); 1058 1059 err_free: 1060 put_uobj_write(uobj); 1061 return ret; 1062 } 1063 1064 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 1065 struct ib_device *ib_dev, 1066 const char __user *buf, int in_len, 1067 int out_len) 1068 { 1069 struct ib_uverbs_rereg_mr cmd; 1070 struct ib_uverbs_rereg_mr_resp resp; 1071 struct ib_udata udata; 1072 struct ib_pd *pd = NULL; 1073 struct ib_mr *mr; 1074 struct ib_pd *old_pd; 1075 int ret; 1076 struct ib_uobject *uobj; 1077 1078 if (out_len < sizeof(resp)) 1079 return -ENOSPC; 1080 1081 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1082 return -EFAULT; 1083 1084 INIT_UDATA(&udata, buf + sizeof(cmd), 1085 (unsigned long) cmd.response + sizeof(resp), 1086 in_len - sizeof(cmd), out_len - sizeof(resp)); 1087 1088 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 1089 return -EINVAL; 1090 1091 if ((cmd.flags & IB_MR_REREG_TRANS) && 1092 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 1093 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 1094 return -EINVAL; 1095 1096 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, 1097 file->ucontext); 1098 1099 if (!uobj) 1100 return -EINVAL; 1101 1102 mr = uobj->object; 1103 1104 if (cmd.flags & IB_MR_REREG_ACCESS) { 1105 ret = ib_check_mr_access(cmd.access_flags); 1106 if (ret) 1107 goto put_uobjs; 1108 } 1109 1110 if (cmd.flags & IB_MR_REREG_PD) { 1111 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1112 if (!pd) { 1113 ret = -EINVAL; 1114 goto put_uobjs; 1115 } 1116 } 1117 1118 old_pd = mr->pd; 1119 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 1120 cmd.length, cmd.hca_va, 1121 cmd.access_flags, pd, &udata); 1122 if (!ret) { 1123 if (cmd.flags & IB_MR_REREG_PD) { 1124 atomic_inc(&pd->usecnt); 1125 mr->pd = pd; 1126 atomic_dec(&old_pd->usecnt); 1127 } 1128 } else { 1129 goto put_uobj_pd; 1130 } 1131 1132 memset(&resp, 0, sizeof(resp)); 1133 resp.lkey = mr->lkey; 1134 resp.rkey = mr->rkey; 1135 1136 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1137 &resp, sizeof(resp))) 1138 ret = -EFAULT; 1139 else 1140 ret = in_len; 1141 1142 put_uobj_pd: 1143 if (cmd.flags & IB_MR_REREG_PD) 1144 put_pd_read(pd); 1145 1146 put_uobjs: 1147 1148 put_uobj_write(mr->uobject); 1149 1150 return ret; 1151 } 1152 1153 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 1154 struct ib_device *ib_dev, 1155 const char __user *buf, int in_len, 1156 int out_len) 1157 { 1158 struct ib_uverbs_dereg_mr cmd; 1159 struct ib_mr *mr; 1160 struct ib_uobject *uobj; 1161 int ret = -EINVAL; 1162 1163 if (copy_from_user(&cmd, buf, sizeof cmd)) 1164 return -EFAULT; 1165 1166 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 1167 if (!uobj) 1168 return -EINVAL; 1169 1170 mr = uobj->object; 1171 1172 ret = ib_dereg_mr(mr); 1173 if (!ret) 1174 uobj->live = 0; 1175 1176 put_uobj_write(uobj); 1177 1178 if (ret) 1179 return ret; 1180 1181 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1182 1183 mutex_lock(&file->mutex); 1184 list_del(&uobj->list); 1185 mutex_unlock(&file->mutex); 1186 1187 put_uobj(uobj); 1188 1189 return in_len; 1190 } 1191 1192 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 1193 struct ib_device *ib_dev, 1194 const char __user *buf, int in_len, 1195 int out_len) 1196 { 1197 struct ib_uverbs_alloc_mw cmd; 1198 struct ib_uverbs_alloc_mw_resp resp; 1199 struct ib_uobject *uobj; 1200 struct ib_pd *pd; 1201 struct ib_mw *mw; 1202 struct ib_udata udata; 1203 int ret; 1204 1205 if (out_len < sizeof(resp)) 1206 return -ENOSPC; 1207 1208 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1209 return -EFAULT; 1210 1211 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 1212 if (!uobj) 1213 return -ENOMEM; 1214 1215 init_uobj(uobj, 0, file->ucontext, &mw_lock_class); 1216 down_write(&uobj->mutex); 1217 1218 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1219 if (!pd) { 1220 ret = -EINVAL; 1221 goto err_free; 1222 } 1223 1224 INIT_UDATA(&udata, buf + sizeof(cmd), 1225 (unsigned long)cmd.response + sizeof(resp), 1226 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1227 out_len - sizeof(resp)); 1228 1229 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata); 1230 if (IS_ERR(mw)) { 1231 ret = PTR_ERR(mw); 1232 goto err_put; 1233 } 1234 1235 mw->device = pd->device; 1236 mw->pd = pd; 1237 mw->uobject = uobj; 1238 atomic_inc(&pd->usecnt); 1239 1240 uobj->object = mw; 1241 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj); 1242 if (ret) 1243 goto err_unalloc; 1244 1245 memset(&resp, 0, sizeof(resp)); 1246 resp.rkey = mw->rkey; 1247 resp.mw_handle = uobj->id; 1248 1249 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1250 &resp, sizeof(resp))) { 1251 ret = -EFAULT; 1252 goto err_copy; 1253 } 1254 1255 put_pd_read(pd); 1256 1257 mutex_lock(&file->mutex); 1258 list_add_tail(&uobj->list, &file->ucontext->mw_list); 1259 mutex_unlock(&file->mutex); 1260 1261 uobj->live = 1; 1262 1263 up_write(&uobj->mutex); 1264 1265 return in_len; 1266 1267 err_copy: 1268 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1269 1270 err_unalloc: 1271 uverbs_dealloc_mw(mw); 1272 1273 err_put: 1274 put_pd_read(pd); 1275 1276 err_free: 1277 put_uobj_write(uobj); 1278 return ret; 1279 } 1280 1281 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 1282 struct ib_device *ib_dev, 1283 const char __user *buf, int in_len, 1284 int out_len) 1285 { 1286 struct ib_uverbs_dealloc_mw cmd; 1287 struct ib_mw *mw; 1288 struct ib_uobject *uobj; 1289 int ret = -EINVAL; 1290 1291 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1292 return -EFAULT; 1293 1294 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext); 1295 if (!uobj) 1296 return -EINVAL; 1297 1298 mw = uobj->object; 1299 1300 ret = uverbs_dealloc_mw(mw); 1301 if (!ret) 1302 uobj->live = 0; 1303 1304 put_uobj_write(uobj); 1305 1306 if (ret) 1307 return ret; 1308 1309 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1310 1311 mutex_lock(&file->mutex); 1312 list_del(&uobj->list); 1313 mutex_unlock(&file->mutex); 1314 1315 put_uobj(uobj); 1316 1317 return in_len; 1318 } 1319 1320 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 1321 struct ib_device *ib_dev, 1322 const char __user *buf, int in_len, 1323 int out_len) 1324 { 1325 struct ib_uverbs_create_comp_channel cmd; 1326 struct ib_uverbs_create_comp_channel_resp resp; 1327 struct file *filp; 1328 int ret; 1329 1330 if (out_len < sizeof resp) 1331 return -ENOSPC; 1332 1333 if (copy_from_user(&cmd, buf, sizeof cmd)) 1334 return -EFAULT; 1335 1336 ret = get_unused_fd_flags(O_CLOEXEC); 1337 if (ret < 0) 1338 return ret; 1339 resp.fd = ret; 1340 1341 filp = ib_uverbs_alloc_event_file(file, ib_dev, 0); 1342 if (IS_ERR(filp)) { 1343 put_unused_fd(resp.fd); 1344 return PTR_ERR(filp); 1345 } 1346 1347 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1348 &resp, sizeof resp)) { 1349 put_unused_fd(resp.fd); 1350 fput(filp); 1351 return -EFAULT; 1352 } 1353 1354 fd_install(resp.fd, filp); 1355 return in_len; 1356 } 1357 1358 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 1359 struct ib_device *ib_dev, 1360 struct ib_udata *ucore, 1361 struct ib_udata *uhw, 1362 struct ib_uverbs_ex_create_cq *cmd, 1363 size_t cmd_sz, 1364 int (*cb)(struct ib_uverbs_file *file, 1365 struct ib_ucq_object *obj, 1366 struct ib_uverbs_ex_create_cq_resp *resp, 1367 struct ib_udata *udata, 1368 void *context), 1369 void *context) 1370 { 1371 struct ib_ucq_object *obj; 1372 struct ib_uverbs_event_file *ev_file = NULL; 1373 struct ib_cq *cq; 1374 int ret; 1375 struct ib_uverbs_ex_create_cq_resp resp; 1376 struct ib_cq_init_attr attr = {}; 1377 1378 if (cmd->comp_vector >= file->device->num_comp_vectors) 1379 return ERR_PTR(-EINVAL); 1380 1381 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1382 if (!obj) 1383 return ERR_PTR(-ENOMEM); 1384 1385 init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class); 1386 down_write(&obj->uobject.mutex); 1387 1388 if (cmd->comp_channel >= 0) { 1389 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel); 1390 if (!ev_file) { 1391 ret = -EINVAL; 1392 goto err; 1393 } 1394 } 1395 1396 obj->uverbs_file = file; 1397 obj->comp_events_reported = 0; 1398 obj->async_events_reported = 0; 1399 INIT_LIST_HEAD(&obj->comp_list); 1400 INIT_LIST_HEAD(&obj->async_list); 1401 1402 attr.cqe = cmd->cqe; 1403 attr.comp_vector = cmd->comp_vector; 1404 1405 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1406 attr.flags = cmd->flags; 1407 1408 cq = ib_dev->create_cq(ib_dev, &attr, 1409 file->ucontext, uhw); 1410 if (IS_ERR(cq)) { 1411 ret = PTR_ERR(cq); 1412 goto err_file; 1413 } 1414 1415 cq->device = ib_dev; 1416 cq->uobject = &obj->uobject; 1417 cq->comp_handler = ib_uverbs_comp_handler; 1418 cq->event_handler = ib_uverbs_cq_event_handler; 1419 cq->cq_context = ev_file; 1420 atomic_set(&cq->usecnt, 0); 1421 1422 obj->uobject.object = cq; 1423 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1424 if (ret) 1425 goto err_free; 1426 1427 memset(&resp, 0, sizeof resp); 1428 resp.base.cq_handle = obj->uobject.id; 1429 resp.base.cqe = cq->cqe; 1430 1431 resp.response_length = offsetof(typeof(resp), response_length) + 1432 sizeof(resp.response_length); 1433 1434 ret = cb(file, obj, &resp, ucore, context); 1435 if (ret) 1436 goto err_cb; 1437 1438 mutex_lock(&file->mutex); 1439 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 1440 mutex_unlock(&file->mutex); 1441 1442 obj->uobject.live = 1; 1443 1444 up_write(&obj->uobject.mutex); 1445 1446 return obj; 1447 1448 err_cb: 1449 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1450 1451 err_free: 1452 ib_destroy_cq(cq); 1453 1454 err_file: 1455 if (ev_file) 1456 ib_uverbs_release_ucq(file, ev_file, obj); 1457 1458 err: 1459 put_uobj_write(&obj->uobject); 1460 1461 return ERR_PTR(ret); 1462 } 1463 1464 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1465 struct ib_ucq_object *obj, 1466 struct ib_uverbs_ex_create_cq_resp *resp, 1467 struct ib_udata *ucore, void *context) 1468 { 1469 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1470 return -EFAULT; 1471 1472 return 0; 1473 } 1474 1475 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1476 struct ib_device *ib_dev, 1477 const char __user *buf, int in_len, 1478 int out_len) 1479 { 1480 struct ib_uverbs_create_cq cmd; 1481 struct ib_uverbs_ex_create_cq cmd_ex; 1482 struct ib_uverbs_create_cq_resp resp; 1483 struct ib_udata ucore; 1484 struct ib_udata uhw; 1485 struct ib_ucq_object *obj; 1486 1487 if (out_len < sizeof(resp)) 1488 return -ENOSPC; 1489 1490 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1491 return -EFAULT; 1492 1493 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp)); 1494 1495 INIT_UDATA(&uhw, buf + sizeof(cmd), 1496 (unsigned long)cmd.response + sizeof(resp), 1497 in_len - sizeof(cmd), out_len - sizeof(resp)); 1498 1499 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1500 cmd_ex.user_handle = cmd.user_handle; 1501 cmd_ex.cqe = cmd.cqe; 1502 cmd_ex.comp_vector = cmd.comp_vector; 1503 cmd_ex.comp_channel = cmd.comp_channel; 1504 1505 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex, 1506 offsetof(typeof(cmd_ex), comp_channel) + 1507 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1508 NULL); 1509 1510 if (IS_ERR(obj)) 1511 return PTR_ERR(obj); 1512 1513 return in_len; 1514 } 1515 1516 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1517 struct ib_ucq_object *obj, 1518 struct ib_uverbs_ex_create_cq_resp *resp, 1519 struct ib_udata *ucore, void *context) 1520 { 1521 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1522 return -EFAULT; 1523 1524 return 0; 1525 } 1526 1527 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1528 struct ib_device *ib_dev, 1529 struct ib_udata *ucore, 1530 struct ib_udata *uhw) 1531 { 1532 struct ib_uverbs_ex_create_cq_resp resp; 1533 struct ib_uverbs_ex_create_cq cmd; 1534 struct ib_ucq_object *obj; 1535 int err; 1536 1537 if (ucore->inlen < sizeof(cmd)) 1538 return -EINVAL; 1539 1540 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1541 if (err) 1542 return err; 1543 1544 if (cmd.comp_mask) 1545 return -EINVAL; 1546 1547 if (cmd.reserved) 1548 return -EINVAL; 1549 1550 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1551 sizeof(resp.response_length))) 1552 return -ENOSPC; 1553 1554 obj = create_cq(file, ib_dev, ucore, uhw, &cmd, 1555 min(ucore->inlen, sizeof(cmd)), 1556 ib_uverbs_ex_create_cq_cb, NULL); 1557 1558 if (IS_ERR(obj)) 1559 return PTR_ERR(obj); 1560 1561 return 0; 1562 } 1563 1564 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1565 struct ib_device *ib_dev, 1566 const char __user *buf, int in_len, 1567 int out_len) 1568 { 1569 struct ib_uverbs_resize_cq cmd; 1570 struct ib_uverbs_resize_cq_resp resp; 1571 struct ib_udata udata; 1572 struct ib_cq *cq; 1573 int ret = -EINVAL; 1574 1575 if (copy_from_user(&cmd, buf, sizeof cmd)) 1576 return -EFAULT; 1577 1578 INIT_UDATA(&udata, buf + sizeof cmd, 1579 (unsigned long) cmd.response + sizeof resp, 1580 in_len - sizeof cmd, out_len - sizeof resp); 1581 1582 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1583 if (!cq) 1584 return -EINVAL; 1585 1586 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1587 if (ret) 1588 goto out; 1589 1590 resp.cqe = cq->cqe; 1591 1592 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1593 &resp, sizeof resp.cqe)) 1594 ret = -EFAULT; 1595 1596 out: 1597 put_cq_read(cq); 1598 1599 return ret ? ret : in_len; 1600 } 1601 1602 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1603 { 1604 struct ib_uverbs_wc tmp; 1605 1606 tmp.wr_id = wc->wr_id; 1607 tmp.status = wc->status; 1608 tmp.opcode = wc->opcode; 1609 tmp.vendor_err = wc->vendor_err; 1610 tmp.byte_len = wc->byte_len; 1611 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1612 tmp.qp_num = wc->qp->qp_num; 1613 tmp.src_qp = wc->src_qp; 1614 tmp.wc_flags = wc->wc_flags; 1615 tmp.pkey_index = wc->pkey_index; 1616 tmp.slid = wc->slid; 1617 tmp.sl = wc->sl; 1618 tmp.dlid_path_bits = wc->dlid_path_bits; 1619 tmp.port_num = wc->port_num; 1620 tmp.reserved = 0; 1621 1622 if (copy_to_user(dest, &tmp, sizeof tmp)) 1623 return -EFAULT; 1624 1625 return 0; 1626 } 1627 1628 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1629 struct ib_device *ib_dev, 1630 const char __user *buf, int in_len, 1631 int out_len) 1632 { 1633 struct ib_uverbs_poll_cq cmd; 1634 struct ib_uverbs_poll_cq_resp resp; 1635 u8 __user *header_ptr; 1636 u8 __user *data_ptr; 1637 struct ib_cq *cq; 1638 struct ib_wc wc; 1639 int ret; 1640 1641 if (copy_from_user(&cmd, buf, sizeof cmd)) 1642 return -EFAULT; 1643 1644 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1645 if (!cq) 1646 return -EINVAL; 1647 1648 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1649 header_ptr = (void __user *)(unsigned long) cmd.response; 1650 data_ptr = header_ptr + sizeof resp; 1651 1652 memset(&resp, 0, sizeof resp); 1653 while (resp.count < cmd.ne) { 1654 ret = ib_poll_cq(cq, 1, &wc); 1655 if (ret < 0) 1656 goto out_put; 1657 if (!ret) 1658 break; 1659 1660 ret = copy_wc_to_user(data_ptr, &wc); 1661 if (ret) 1662 goto out_put; 1663 1664 data_ptr += sizeof(struct ib_uverbs_wc); 1665 ++resp.count; 1666 } 1667 1668 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1669 ret = -EFAULT; 1670 goto out_put; 1671 } 1672 1673 ret = in_len; 1674 1675 out_put: 1676 put_cq_read(cq); 1677 return ret; 1678 } 1679 1680 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1681 struct ib_device *ib_dev, 1682 const char __user *buf, int in_len, 1683 int out_len) 1684 { 1685 struct ib_uverbs_req_notify_cq cmd; 1686 struct ib_cq *cq; 1687 1688 if (copy_from_user(&cmd, buf, sizeof cmd)) 1689 return -EFAULT; 1690 1691 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1692 if (!cq) 1693 return -EINVAL; 1694 1695 ib_req_notify_cq(cq, cmd.solicited_only ? 1696 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1697 1698 put_cq_read(cq); 1699 1700 return in_len; 1701 } 1702 1703 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1704 struct ib_device *ib_dev, 1705 const char __user *buf, int in_len, 1706 int out_len) 1707 { 1708 struct ib_uverbs_destroy_cq cmd; 1709 struct ib_uverbs_destroy_cq_resp resp; 1710 struct ib_uobject *uobj; 1711 struct ib_cq *cq; 1712 struct ib_ucq_object *obj; 1713 struct ib_uverbs_event_file *ev_file; 1714 int ret = -EINVAL; 1715 1716 if (copy_from_user(&cmd, buf, sizeof cmd)) 1717 return -EFAULT; 1718 1719 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 1720 if (!uobj) 1721 return -EINVAL; 1722 cq = uobj->object; 1723 ev_file = cq->cq_context; 1724 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1725 1726 ret = ib_destroy_cq(cq); 1727 if (!ret) 1728 uobj->live = 0; 1729 1730 put_uobj_write(uobj); 1731 1732 if (ret) 1733 return ret; 1734 1735 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1736 1737 mutex_lock(&file->mutex); 1738 list_del(&uobj->list); 1739 mutex_unlock(&file->mutex); 1740 1741 ib_uverbs_release_ucq(file, ev_file, obj); 1742 1743 memset(&resp, 0, sizeof resp); 1744 resp.comp_events_reported = obj->comp_events_reported; 1745 resp.async_events_reported = obj->async_events_reported; 1746 1747 put_uobj(uobj); 1748 1749 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1750 &resp, sizeof resp)) 1751 return -EFAULT; 1752 1753 return in_len; 1754 } 1755 1756 static int create_qp(struct ib_uverbs_file *file, 1757 struct ib_udata *ucore, 1758 struct ib_udata *uhw, 1759 struct ib_uverbs_ex_create_qp *cmd, 1760 size_t cmd_sz, 1761 int (*cb)(struct ib_uverbs_file *file, 1762 struct ib_uverbs_ex_create_qp_resp *resp, 1763 struct ib_udata *udata), 1764 void *context) 1765 { 1766 struct ib_uqp_object *obj; 1767 struct ib_device *device; 1768 struct ib_pd *pd = NULL; 1769 struct ib_xrcd *xrcd = NULL; 1770 struct ib_uobject *uninitialized_var(xrcd_uobj); 1771 struct ib_cq *scq = NULL, *rcq = NULL; 1772 struct ib_srq *srq = NULL; 1773 struct ib_qp *qp; 1774 char *buf; 1775 struct ib_qp_init_attr attr = {}; 1776 struct ib_uverbs_ex_create_qp_resp resp; 1777 int ret; 1778 struct ib_rwq_ind_table *ind_tbl = NULL; 1779 bool has_sq = true; 1780 1781 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1782 return -EPERM; 1783 1784 obj = kzalloc(sizeof *obj, GFP_KERNEL); 1785 if (!obj) 1786 return -ENOMEM; 1787 1788 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, 1789 &qp_lock_class); 1790 down_write(&obj->uevent.uobject.mutex); 1791 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) + 1792 sizeof(cmd->rwq_ind_tbl_handle) && 1793 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) { 1794 ind_tbl = idr_read_rwq_indirection_table(cmd->rwq_ind_tbl_handle, 1795 file->ucontext); 1796 if (!ind_tbl) { 1797 ret = -EINVAL; 1798 goto err_put; 1799 } 1800 1801 attr.rwq_ind_tbl = ind_tbl; 1802 } 1803 1804 if ((cmd_sz >= offsetof(typeof(*cmd), reserved1) + 1805 sizeof(cmd->reserved1)) && cmd->reserved1) { 1806 ret = -EOPNOTSUPP; 1807 goto err_put; 1808 } 1809 1810 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { 1811 ret = -EINVAL; 1812 goto err_put; 1813 } 1814 1815 if (ind_tbl && !cmd->max_send_wr) 1816 has_sq = false; 1817 1818 if (cmd->qp_type == IB_QPT_XRC_TGT) { 1819 xrcd = idr_read_xrcd(cmd->pd_handle, file->ucontext, 1820 &xrcd_uobj); 1821 if (!xrcd) { 1822 ret = -EINVAL; 1823 goto err_put; 1824 } 1825 device = xrcd->device; 1826 } else { 1827 if (cmd->qp_type == IB_QPT_XRC_INI) { 1828 cmd->max_recv_wr = 0; 1829 cmd->max_recv_sge = 0; 1830 } else { 1831 if (cmd->is_srq) { 1832 srq = idr_read_srq(cmd->srq_handle, 1833 file->ucontext); 1834 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1835 ret = -EINVAL; 1836 goto err_put; 1837 } 1838 } 1839 1840 if (!ind_tbl) { 1841 if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1842 rcq = idr_read_cq(cmd->recv_cq_handle, 1843 file->ucontext, 0); 1844 if (!rcq) { 1845 ret = -EINVAL; 1846 goto err_put; 1847 } 1848 } 1849 } 1850 } 1851 1852 if (has_sq) 1853 scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq); 1854 if (!ind_tbl) 1855 rcq = rcq ?: scq; 1856 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 1857 if (!pd || (!scq && has_sq)) { 1858 ret = -EINVAL; 1859 goto err_put; 1860 } 1861 1862 device = pd->device; 1863 } 1864 1865 attr.event_handler = ib_uverbs_qp_event_handler; 1866 attr.qp_context = file; 1867 attr.send_cq = scq; 1868 attr.recv_cq = rcq; 1869 attr.srq = srq; 1870 attr.xrcd = xrcd; 1871 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : 1872 IB_SIGNAL_REQ_WR; 1873 attr.qp_type = cmd->qp_type; 1874 attr.create_flags = 0; 1875 1876 attr.cap.max_send_wr = cmd->max_send_wr; 1877 attr.cap.max_recv_wr = cmd->max_recv_wr; 1878 attr.cap.max_send_sge = cmd->max_send_sge; 1879 attr.cap.max_recv_sge = cmd->max_recv_sge; 1880 attr.cap.max_inline_data = cmd->max_inline_data; 1881 1882 obj->uevent.events_reported = 0; 1883 INIT_LIST_HEAD(&obj->uevent.event_list); 1884 INIT_LIST_HEAD(&obj->mcast_list); 1885 1886 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) + 1887 sizeof(cmd->create_flags)) 1888 attr.create_flags = cmd->create_flags; 1889 1890 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 1891 IB_QP_CREATE_CROSS_CHANNEL | 1892 IB_QP_CREATE_MANAGED_SEND | 1893 IB_QP_CREATE_MANAGED_RECV | 1894 IB_QP_CREATE_SCATTER_FCS)) { 1895 ret = -EINVAL; 1896 goto err_put; 1897 } 1898 1899 buf = (void *)cmd + sizeof(*cmd); 1900 if (cmd_sz > sizeof(*cmd)) 1901 if (!(buf[0] == 0 && !memcmp(buf, buf + 1, 1902 cmd_sz - sizeof(*cmd) - 1))) { 1903 ret = -EINVAL; 1904 goto err_put; 1905 } 1906 1907 if (cmd->qp_type == IB_QPT_XRC_TGT) 1908 qp = ib_create_qp(pd, &attr); 1909 else 1910 qp = device->create_qp(pd, &attr, uhw); 1911 1912 if (IS_ERR(qp)) { 1913 ret = PTR_ERR(qp); 1914 goto err_put; 1915 } 1916 1917 if (cmd->qp_type != IB_QPT_XRC_TGT) { 1918 qp->real_qp = qp; 1919 qp->device = device; 1920 qp->pd = pd; 1921 qp->send_cq = attr.send_cq; 1922 qp->recv_cq = attr.recv_cq; 1923 qp->srq = attr.srq; 1924 qp->rwq_ind_tbl = ind_tbl; 1925 qp->event_handler = attr.event_handler; 1926 qp->qp_context = attr.qp_context; 1927 qp->qp_type = attr.qp_type; 1928 atomic_set(&qp->usecnt, 0); 1929 atomic_inc(&pd->usecnt); 1930 if (attr.send_cq) 1931 atomic_inc(&attr.send_cq->usecnt); 1932 if (attr.recv_cq) 1933 atomic_inc(&attr.recv_cq->usecnt); 1934 if (attr.srq) 1935 atomic_inc(&attr.srq->usecnt); 1936 if (ind_tbl) 1937 atomic_inc(&ind_tbl->usecnt); 1938 } 1939 qp->uobject = &obj->uevent.uobject; 1940 1941 obj->uevent.uobject.object = qp; 1942 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1943 if (ret) 1944 goto err_destroy; 1945 1946 memset(&resp, 0, sizeof resp); 1947 resp.base.qpn = qp->qp_num; 1948 resp.base.qp_handle = obj->uevent.uobject.id; 1949 resp.base.max_recv_sge = attr.cap.max_recv_sge; 1950 resp.base.max_send_sge = attr.cap.max_send_sge; 1951 resp.base.max_recv_wr = attr.cap.max_recv_wr; 1952 resp.base.max_send_wr = attr.cap.max_send_wr; 1953 resp.base.max_inline_data = attr.cap.max_inline_data; 1954 1955 resp.response_length = offsetof(typeof(resp), response_length) + 1956 sizeof(resp.response_length); 1957 1958 ret = cb(file, &resp, ucore); 1959 if (ret) 1960 goto err_cb; 1961 1962 if (xrcd) { 1963 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1964 uobject); 1965 atomic_inc(&obj->uxrcd->refcnt); 1966 put_xrcd_read(xrcd_uobj); 1967 } 1968 1969 if (pd) 1970 put_pd_read(pd); 1971 if (scq) 1972 put_cq_read(scq); 1973 if (rcq && rcq != scq) 1974 put_cq_read(rcq); 1975 if (srq) 1976 put_srq_read(srq); 1977 if (ind_tbl) 1978 put_rwq_indirection_table_read(ind_tbl); 1979 1980 mutex_lock(&file->mutex); 1981 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1982 mutex_unlock(&file->mutex); 1983 1984 obj->uevent.uobject.live = 1; 1985 1986 up_write(&obj->uevent.uobject.mutex); 1987 1988 return 0; 1989 err_cb: 1990 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1991 1992 err_destroy: 1993 ib_destroy_qp(qp); 1994 1995 err_put: 1996 if (xrcd) 1997 put_xrcd_read(xrcd_uobj); 1998 if (pd) 1999 put_pd_read(pd); 2000 if (scq) 2001 put_cq_read(scq); 2002 if (rcq && rcq != scq) 2003 put_cq_read(rcq); 2004 if (srq) 2005 put_srq_read(srq); 2006 if (ind_tbl) 2007 put_rwq_indirection_table_read(ind_tbl); 2008 2009 put_uobj_write(&obj->uevent.uobject); 2010 return ret; 2011 } 2012 2013 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file, 2014 struct ib_uverbs_ex_create_qp_resp *resp, 2015 struct ib_udata *ucore) 2016 { 2017 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 2018 return -EFAULT; 2019 2020 return 0; 2021 } 2022 2023 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 2024 struct ib_device *ib_dev, 2025 const char __user *buf, int in_len, 2026 int out_len) 2027 { 2028 struct ib_uverbs_create_qp cmd; 2029 struct ib_uverbs_ex_create_qp cmd_ex; 2030 struct ib_udata ucore; 2031 struct ib_udata uhw; 2032 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp); 2033 int err; 2034 2035 if (out_len < resp_size) 2036 return -ENOSPC; 2037 2038 if (copy_from_user(&cmd, buf, sizeof(cmd))) 2039 return -EFAULT; 2040 2041 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), 2042 resp_size); 2043 INIT_UDATA(&uhw, buf + sizeof(cmd), 2044 (unsigned long)cmd.response + resp_size, 2045 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 2046 out_len - resp_size); 2047 2048 memset(&cmd_ex, 0, sizeof(cmd_ex)); 2049 cmd_ex.user_handle = cmd.user_handle; 2050 cmd_ex.pd_handle = cmd.pd_handle; 2051 cmd_ex.send_cq_handle = cmd.send_cq_handle; 2052 cmd_ex.recv_cq_handle = cmd.recv_cq_handle; 2053 cmd_ex.srq_handle = cmd.srq_handle; 2054 cmd_ex.max_send_wr = cmd.max_send_wr; 2055 cmd_ex.max_recv_wr = cmd.max_recv_wr; 2056 cmd_ex.max_send_sge = cmd.max_send_sge; 2057 cmd_ex.max_recv_sge = cmd.max_recv_sge; 2058 cmd_ex.max_inline_data = cmd.max_inline_data; 2059 cmd_ex.sq_sig_all = cmd.sq_sig_all; 2060 cmd_ex.qp_type = cmd.qp_type; 2061 cmd_ex.is_srq = cmd.is_srq; 2062 2063 err = create_qp(file, &ucore, &uhw, &cmd_ex, 2064 offsetof(typeof(cmd_ex), is_srq) + 2065 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb, 2066 NULL); 2067 2068 if (err) 2069 return err; 2070 2071 return in_len; 2072 } 2073 2074 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file, 2075 struct ib_uverbs_ex_create_qp_resp *resp, 2076 struct ib_udata *ucore) 2077 { 2078 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 2079 return -EFAULT; 2080 2081 return 0; 2082 } 2083 2084 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, 2085 struct ib_device *ib_dev, 2086 struct ib_udata *ucore, 2087 struct ib_udata *uhw) 2088 { 2089 struct ib_uverbs_ex_create_qp_resp resp; 2090 struct ib_uverbs_ex_create_qp cmd = {0}; 2091 int err; 2092 2093 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) + 2094 sizeof(cmd.comp_mask))) 2095 return -EINVAL; 2096 2097 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2098 if (err) 2099 return err; 2100 2101 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) 2102 return -EINVAL; 2103 2104 if (cmd.reserved) 2105 return -EINVAL; 2106 2107 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 2108 sizeof(resp.response_length))) 2109 return -ENOSPC; 2110 2111 err = create_qp(file, ucore, uhw, &cmd, 2112 min(ucore->inlen, sizeof(cmd)), 2113 ib_uverbs_ex_create_qp_cb, NULL); 2114 2115 if (err) 2116 return err; 2117 2118 return 0; 2119 } 2120 2121 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 2122 struct ib_device *ib_dev, 2123 const char __user *buf, int in_len, int out_len) 2124 { 2125 struct ib_uverbs_open_qp cmd; 2126 struct ib_uverbs_create_qp_resp resp; 2127 struct ib_udata udata; 2128 struct ib_uqp_object *obj; 2129 struct ib_xrcd *xrcd; 2130 struct ib_uobject *uninitialized_var(xrcd_uobj); 2131 struct ib_qp *qp; 2132 struct ib_qp_open_attr attr; 2133 int ret; 2134 2135 if (out_len < sizeof resp) 2136 return -ENOSPC; 2137 2138 if (copy_from_user(&cmd, buf, sizeof cmd)) 2139 return -EFAULT; 2140 2141 INIT_UDATA(&udata, buf + sizeof cmd, 2142 (unsigned long) cmd.response + sizeof resp, 2143 in_len - sizeof cmd, out_len - sizeof resp); 2144 2145 obj = kmalloc(sizeof *obj, GFP_KERNEL); 2146 if (!obj) 2147 return -ENOMEM; 2148 2149 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 2150 down_write(&obj->uevent.uobject.mutex); 2151 2152 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 2153 if (!xrcd) { 2154 ret = -EINVAL; 2155 goto err_put; 2156 } 2157 2158 attr.event_handler = ib_uverbs_qp_event_handler; 2159 attr.qp_context = file; 2160 attr.qp_num = cmd.qpn; 2161 attr.qp_type = cmd.qp_type; 2162 2163 obj->uevent.events_reported = 0; 2164 INIT_LIST_HEAD(&obj->uevent.event_list); 2165 INIT_LIST_HEAD(&obj->mcast_list); 2166 2167 qp = ib_open_qp(xrcd, &attr); 2168 if (IS_ERR(qp)) { 2169 ret = PTR_ERR(qp); 2170 goto err_put; 2171 } 2172 2173 qp->uobject = &obj->uevent.uobject; 2174 2175 obj->uevent.uobject.object = qp; 2176 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 2177 if (ret) 2178 goto err_destroy; 2179 2180 memset(&resp, 0, sizeof resp); 2181 resp.qpn = qp->qp_num; 2182 resp.qp_handle = obj->uevent.uobject.id; 2183 2184 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2185 &resp, sizeof resp)) { 2186 ret = -EFAULT; 2187 goto err_remove; 2188 } 2189 2190 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 2191 atomic_inc(&obj->uxrcd->refcnt); 2192 put_xrcd_read(xrcd_uobj); 2193 2194 mutex_lock(&file->mutex); 2195 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 2196 mutex_unlock(&file->mutex); 2197 2198 obj->uevent.uobject.live = 1; 2199 2200 up_write(&obj->uevent.uobject.mutex); 2201 2202 return in_len; 2203 2204 err_remove: 2205 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 2206 2207 err_destroy: 2208 ib_destroy_qp(qp); 2209 2210 err_put: 2211 put_xrcd_read(xrcd_uobj); 2212 put_uobj_write(&obj->uevent.uobject); 2213 return ret; 2214 } 2215 2216 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 2217 struct ib_device *ib_dev, 2218 const char __user *buf, int in_len, 2219 int out_len) 2220 { 2221 struct ib_uverbs_query_qp cmd; 2222 struct ib_uverbs_query_qp_resp resp; 2223 struct ib_qp *qp; 2224 struct ib_qp_attr *attr; 2225 struct ib_qp_init_attr *init_attr; 2226 int ret; 2227 2228 if (copy_from_user(&cmd, buf, sizeof cmd)) 2229 return -EFAULT; 2230 2231 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2232 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 2233 if (!attr || !init_attr) { 2234 ret = -ENOMEM; 2235 goto out; 2236 } 2237 2238 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2239 if (!qp) { 2240 ret = -EINVAL; 2241 goto out; 2242 } 2243 2244 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 2245 2246 put_qp_read(qp); 2247 2248 if (ret) 2249 goto out; 2250 2251 memset(&resp, 0, sizeof resp); 2252 2253 resp.qp_state = attr->qp_state; 2254 resp.cur_qp_state = attr->cur_qp_state; 2255 resp.path_mtu = attr->path_mtu; 2256 resp.path_mig_state = attr->path_mig_state; 2257 resp.qkey = attr->qkey; 2258 resp.rq_psn = attr->rq_psn; 2259 resp.sq_psn = attr->sq_psn; 2260 resp.dest_qp_num = attr->dest_qp_num; 2261 resp.qp_access_flags = attr->qp_access_flags; 2262 resp.pkey_index = attr->pkey_index; 2263 resp.alt_pkey_index = attr->alt_pkey_index; 2264 resp.sq_draining = attr->sq_draining; 2265 resp.max_rd_atomic = attr->max_rd_atomic; 2266 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 2267 resp.min_rnr_timer = attr->min_rnr_timer; 2268 resp.port_num = attr->port_num; 2269 resp.timeout = attr->timeout; 2270 resp.retry_cnt = attr->retry_cnt; 2271 resp.rnr_retry = attr->rnr_retry; 2272 resp.alt_port_num = attr->alt_port_num; 2273 resp.alt_timeout = attr->alt_timeout; 2274 2275 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 2276 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 2277 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 2278 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 2279 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 2280 resp.dest.dlid = attr->ah_attr.dlid; 2281 resp.dest.sl = attr->ah_attr.sl; 2282 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 2283 resp.dest.static_rate = attr->ah_attr.static_rate; 2284 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 2285 resp.dest.port_num = attr->ah_attr.port_num; 2286 2287 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 2288 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 2289 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 2290 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 2291 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 2292 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 2293 resp.alt_dest.sl = attr->alt_ah_attr.sl; 2294 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 2295 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 2296 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 2297 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 2298 2299 resp.max_send_wr = init_attr->cap.max_send_wr; 2300 resp.max_recv_wr = init_attr->cap.max_recv_wr; 2301 resp.max_send_sge = init_attr->cap.max_send_sge; 2302 resp.max_recv_sge = init_attr->cap.max_recv_sge; 2303 resp.max_inline_data = init_attr->cap.max_inline_data; 2304 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 2305 2306 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2307 &resp, sizeof resp)) 2308 ret = -EFAULT; 2309 2310 out: 2311 kfree(attr); 2312 kfree(init_attr); 2313 2314 return ret ? ret : in_len; 2315 } 2316 2317 /* Remove ignored fields set in the attribute mask */ 2318 static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 2319 { 2320 switch (qp_type) { 2321 case IB_QPT_XRC_INI: 2322 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 2323 case IB_QPT_XRC_TGT: 2324 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 2325 IB_QP_RNR_RETRY); 2326 default: 2327 return mask; 2328 } 2329 } 2330 2331 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2332 struct ib_device *ib_dev, 2333 const char __user *buf, int in_len, 2334 int out_len) 2335 { 2336 struct ib_uverbs_modify_qp cmd; 2337 struct ib_udata udata; 2338 struct ib_qp *qp; 2339 struct ib_qp_attr *attr; 2340 int ret; 2341 2342 if (copy_from_user(&cmd, buf, sizeof cmd)) 2343 return -EFAULT; 2344 2345 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 2346 out_len); 2347 2348 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2349 if (!attr) 2350 return -ENOMEM; 2351 2352 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2353 if (!qp) { 2354 ret = -EINVAL; 2355 goto out; 2356 } 2357 2358 attr->qp_state = cmd.qp_state; 2359 attr->cur_qp_state = cmd.cur_qp_state; 2360 attr->path_mtu = cmd.path_mtu; 2361 attr->path_mig_state = cmd.path_mig_state; 2362 attr->qkey = cmd.qkey; 2363 attr->rq_psn = cmd.rq_psn; 2364 attr->sq_psn = cmd.sq_psn; 2365 attr->dest_qp_num = cmd.dest_qp_num; 2366 attr->qp_access_flags = cmd.qp_access_flags; 2367 attr->pkey_index = cmd.pkey_index; 2368 attr->alt_pkey_index = cmd.alt_pkey_index; 2369 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 2370 attr->max_rd_atomic = cmd.max_rd_atomic; 2371 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 2372 attr->min_rnr_timer = cmd.min_rnr_timer; 2373 attr->port_num = cmd.port_num; 2374 attr->timeout = cmd.timeout; 2375 attr->retry_cnt = cmd.retry_cnt; 2376 attr->rnr_retry = cmd.rnr_retry; 2377 attr->alt_port_num = cmd.alt_port_num; 2378 attr->alt_timeout = cmd.alt_timeout; 2379 2380 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 2381 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 2382 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 2383 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 2384 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 2385 attr->ah_attr.dlid = cmd.dest.dlid; 2386 attr->ah_attr.sl = cmd.dest.sl; 2387 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 2388 attr->ah_attr.static_rate = cmd.dest.static_rate; 2389 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 2390 attr->ah_attr.port_num = cmd.dest.port_num; 2391 2392 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 2393 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 2394 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 2395 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 2396 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 2397 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 2398 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 2399 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 2400 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 2401 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 2402 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 2403 2404 if (qp->real_qp == qp) { 2405 ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask); 2406 if (ret) 2407 goto release_qp; 2408 ret = qp->device->modify_qp(qp, attr, 2409 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 2410 } else { 2411 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 2412 } 2413 2414 if (ret) 2415 goto release_qp; 2416 2417 ret = in_len; 2418 2419 release_qp: 2420 put_qp_read(qp); 2421 2422 out: 2423 kfree(attr); 2424 2425 return ret; 2426 } 2427 2428 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2429 struct ib_device *ib_dev, 2430 const char __user *buf, int in_len, 2431 int out_len) 2432 { 2433 struct ib_uverbs_destroy_qp cmd; 2434 struct ib_uverbs_destroy_qp_resp resp; 2435 struct ib_uobject *uobj; 2436 struct ib_qp *qp; 2437 struct ib_uqp_object *obj; 2438 int ret = -EINVAL; 2439 2440 if (copy_from_user(&cmd, buf, sizeof cmd)) 2441 return -EFAULT; 2442 2443 memset(&resp, 0, sizeof resp); 2444 2445 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 2446 if (!uobj) 2447 return -EINVAL; 2448 qp = uobj->object; 2449 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2450 2451 if (!list_empty(&obj->mcast_list)) { 2452 put_uobj_write(uobj); 2453 return -EBUSY; 2454 } 2455 2456 ret = ib_destroy_qp(qp); 2457 if (!ret) 2458 uobj->live = 0; 2459 2460 put_uobj_write(uobj); 2461 2462 if (ret) 2463 return ret; 2464 2465 if (obj->uxrcd) 2466 atomic_dec(&obj->uxrcd->refcnt); 2467 2468 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 2469 2470 mutex_lock(&file->mutex); 2471 list_del(&uobj->list); 2472 mutex_unlock(&file->mutex); 2473 2474 ib_uverbs_release_uevent(file, &obj->uevent); 2475 2476 resp.events_reported = obj->uevent.events_reported; 2477 2478 put_uobj(uobj); 2479 2480 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2481 &resp, sizeof resp)) 2482 return -EFAULT; 2483 2484 return in_len; 2485 } 2486 2487 static void *alloc_wr(size_t wr_size, __u32 num_sge) 2488 { 2489 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + 2490 num_sge * sizeof (struct ib_sge), GFP_KERNEL); 2491 }; 2492 2493 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2494 struct ib_device *ib_dev, 2495 const char __user *buf, int in_len, 2496 int out_len) 2497 { 2498 struct ib_uverbs_post_send cmd; 2499 struct ib_uverbs_post_send_resp resp; 2500 struct ib_uverbs_send_wr *user_wr; 2501 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2502 struct ib_qp *qp; 2503 int i, sg_ind; 2504 int is_ud; 2505 ssize_t ret = -EINVAL; 2506 size_t next_size; 2507 2508 if (copy_from_user(&cmd, buf, sizeof cmd)) 2509 return -EFAULT; 2510 2511 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2512 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2513 return -EINVAL; 2514 2515 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2516 return -EINVAL; 2517 2518 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2519 if (!user_wr) 2520 return -ENOMEM; 2521 2522 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2523 if (!qp) 2524 goto out; 2525 2526 is_ud = qp->qp_type == IB_QPT_UD; 2527 sg_ind = 0; 2528 last = NULL; 2529 for (i = 0; i < cmd.wr_count; ++i) { 2530 if (copy_from_user(user_wr, 2531 buf + sizeof cmd + i * cmd.wqe_size, 2532 cmd.wqe_size)) { 2533 ret = -EFAULT; 2534 goto out_put; 2535 } 2536 2537 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2538 ret = -EINVAL; 2539 goto out_put; 2540 } 2541 2542 if (is_ud) { 2543 struct ib_ud_wr *ud; 2544 2545 if (user_wr->opcode != IB_WR_SEND && 2546 user_wr->opcode != IB_WR_SEND_WITH_IMM) { 2547 ret = -EINVAL; 2548 goto out_put; 2549 } 2550 2551 next_size = sizeof(*ud); 2552 ud = alloc_wr(next_size, user_wr->num_sge); 2553 if (!ud) { 2554 ret = -ENOMEM; 2555 goto out_put; 2556 } 2557 2558 ud->ah = idr_read_ah(user_wr->wr.ud.ah, file->ucontext); 2559 if (!ud->ah) { 2560 kfree(ud); 2561 ret = -EINVAL; 2562 goto out_put; 2563 } 2564 ud->remote_qpn = user_wr->wr.ud.remote_qpn; 2565 ud->remote_qkey = user_wr->wr.ud.remote_qkey; 2566 2567 next = &ud->wr; 2568 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 2569 user_wr->opcode == IB_WR_RDMA_WRITE || 2570 user_wr->opcode == IB_WR_RDMA_READ) { 2571 struct ib_rdma_wr *rdma; 2572 2573 next_size = sizeof(*rdma); 2574 rdma = alloc_wr(next_size, user_wr->num_sge); 2575 if (!rdma) { 2576 ret = -ENOMEM; 2577 goto out_put; 2578 } 2579 2580 rdma->remote_addr = user_wr->wr.rdma.remote_addr; 2581 rdma->rkey = user_wr->wr.rdma.rkey; 2582 2583 next = &rdma->wr; 2584 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2585 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2586 struct ib_atomic_wr *atomic; 2587 2588 next_size = sizeof(*atomic); 2589 atomic = alloc_wr(next_size, user_wr->num_sge); 2590 if (!atomic) { 2591 ret = -ENOMEM; 2592 goto out_put; 2593 } 2594 2595 atomic->remote_addr = user_wr->wr.atomic.remote_addr; 2596 atomic->compare_add = user_wr->wr.atomic.compare_add; 2597 atomic->swap = user_wr->wr.atomic.swap; 2598 atomic->rkey = user_wr->wr.atomic.rkey; 2599 2600 next = &atomic->wr; 2601 } else if (user_wr->opcode == IB_WR_SEND || 2602 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2603 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2604 next_size = sizeof(*next); 2605 next = alloc_wr(next_size, user_wr->num_sge); 2606 if (!next) { 2607 ret = -ENOMEM; 2608 goto out_put; 2609 } 2610 } else { 2611 ret = -EINVAL; 2612 goto out_put; 2613 } 2614 2615 if (user_wr->opcode == IB_WR_SEND_WITH_IMM || 2616 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 2617 next->ex.imm_data = 2618 (__be32 __force) user_wr->ex.imm_data; 2619 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { 2620 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; 2621 } 2622 2623 if (!last) 2624 wr = next; 2625 else 2626 last->next = next; 2627 last = next; 2628 2629 next->next = NULL; 2630 next->wr_id = user_wr->wr_id; 2631 next->num_sge = user_wr->num_sge; 2632 next->opcode = user_wr->opcode; 2633 next->send_flags = user_wr->send_flags; 2634 2635 if (next->num_sge) { 2636 next->sg_list = (void *) next + 2637 ALIGN(next_size, sizeof(struct ib_sge)); 2638 if (copy_from_user(next->sg_list, 2639 buf + sizeof cmd + 2640 cmd.wr_count * cmd.wqe_size + 2641 sg_ind * sizeof (struct ib_sge), 2642 next->num_sge * sizeof (struct ib_sge))) { 2643 ret = -EFAULT; 2644 goto out_put; 2645 } 2646 sg_ind += next->num_sge; 2647 } else 2648 next->sg_list = NULL; 2649 } 2650 2651 resp.bad_wr = 0; 2652 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2653 if (ret) 2654 for (next = wr; next; next = next->next) { 2655 ++resp.bad_wr; 2656 if (next == bad_wr) 2657 break; 2658 } 2659 2660 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2661 &resp, sizeof resp)) 2662 ret = -EFAULT; 2663 2664 out_put: 2665 put_qp_read(qp); 2666 2667 while (wr) { 2668 if (is_ud && ud_wr(wr)->ah) 2669 put_ah_read(ud_wr(wr)->ah); 2670 next = wr->next; 2671 kfree(wr); 2672 wr = next; 2673 } 2674 2675 out: 2676 kfree(user_wr); 2677 2678 return ret ? ret : in_len; 2679 } 2680 2681 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2682 int in_len, 2683 u32 wr_count, 2684 u32 sge_count, 2685 u32 wqe_size) 2686 { 2687 struct ib_uverbs_recv_wr *user_wr; 2688 struct ib_recv_wr *wr = NULL, *last, *next; 2689 int sg_ind; 2690 int i; 2691 int ret; 2692 2693 if (in_len < wqe_size * wr_count + 2694 sge_count * sizeof (struct ib_uverbs_sge)) 2695 return ERR_PTR(-EINVAL); 2696 2697 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2698 return ERR_PTR(-EINVAL); 2699 2700 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2701 if (!user_wr) 2702 return ERR_PTR(-ENOMEM); 2703 2704 sg_ind = 0; 2705 last = NULL; 2706 for (i = 0; i < wr_count; ++i) { 2707 if (copy_from_user(user_wr, buf + i * wqe_size, 2708 wqe_size)) { 2709 ret = -EFAULT; 2710 goto err; 2711 } 2712 2713 if (user_wr->num_sge + sg_ind > sge_count) { 2714 ret = -EINVAL; 2715 goto err; 2716 } 2717 2718 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2719 user_wr->num_sge * sizeof (struct ib_sge), 2720 GFP_KERNEL); 2721 if (!next) { 2722 ret = -ENOMEM; 2723 goto err; 2724 } 2725 2726 if (!last) 2727 wr = next; 2728 else 2729 last->next = next; 2730 last = next; 2731 2732 next->next = NULL; 2733 next->wr_id = user_wr->wr_id; 2734 next->num_sge = user_wr->num_sge; 2735 2736 if (next->num_sge) { 2737 next->sg_list = (void *) next + 2738 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2739 if (copy_from_user(next->sg_list, 2740 buf + wr_count * wqe_size + 2741 sg_ind * sizeof (struct ib_sge), 2742 next->num_sge * sizeof (struct ib_sge))) { 2743 ret = -EFAULT; 2744 goto err; 2745 } 2746 sg_ind += next->num_sge; 2747 } else 2748 next->sg_list = NULL; 2749 } 2750 2751 kfree(user_wr); 2752 return wr; 2753 2754 err: 2755 kfree(user_wr); 2756 2757 while (wr) { 2758 next = wr->next; 2759 kfree(wr); 2760 wr = next; 2761 } 2762 2763 return ERR_PTR(ret); 2764 } 2765 2766 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2767 struct ib_device *ib_dev, 2768 const char __user *buf, int in_len, 2769 int out_len) 2770 { 2771 struct ib_uverbs_post_recv cmd; 2772 struct ib_uverbs_post_recv_resp resp; 2773 struct ib_recv_wr *wr, *next, *bad_wr; 2774 struct ib_qp *qp; 2775 ssize_t ret = -EINVAL; 2776 2777 if (copy_from_user(&cmd, buf, sizeof cmd)) 2778 return -EFAULT; 2779 2780 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2781 in_len - sizeof cmd, cmd.wr_count, 2782 cmd.sge_count, cmd.wqe_size); 2783 if (IS_ERR(wr)) 2784 return PTR_ERR(wr); 2785 2786 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2787 if (!qp) 2788 goto out; 2789 2790 resp.bad_wr = 0; 2791 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2792 2793 put_qp_read(qp); 2794 2795 if (ret) 2796 for (next = wr; next; next = next->next) { 2797 ++resp.bad_wr; 2798 if (next == bad_wr) 2799 break; 2800 } 2801 2802 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2803 &resp, sizeof resp)) 2804 ret = -EFAULT; 2805 2806 out: 2807 while (wr) { 2808 next = wr->next; 2809 kfree(wr); 2810 wr = next; 2811 } 2812 2813 return ret ? ret : in_len; 2814 } 2815 2816 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2817 struct ib_device *ib_dev, 2818 const char __user *buf, int in_len, 2819 int out_len) 2820 { 2821 struct ib_uverbs_post_srq_recv cmd; 2822 struct ib_uverbs_post_srq_recv_resp resp; 2823 struct ib_recv_wr *wr, *next, *bad_wr; 2824 struct ib_srq *srq; 2825 ssize_t ret = -EINVAL; 2826 2827 if (copy_from_user(&cmd, buf, sizeof cmd)) 2828 return -EFAULT; 2829 2830 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2831 in_len - sizeof cmd, cmd.wr_count, 2832 cmd.sge_count, cmd.wqe_size); 2833 if (IS_ERR(wr)) 2834 return PTR_ERR(wr); 2835 2836 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2837 if (!srq) 2838 goto out; 2839 2840 resp.bad_wr = 0; 2841 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2842 2843 put_srq_read(srq); 2844 2845 if (ret) 2846 for (next = wr; next; next = next->next) { 2847 ++resp.bad_wr; 2848 if (next == bad_wr) 2849 break; 2850 } 2851 2852 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2853 &resp, sizeof resp)) 2854 ret = -EFAULT; 2855 2856 out: 2857 while (wr) { 2858 next = wr->next; 2859 kfree(wr); 2860 wr = next; 2861 } 2862 2863 return ret ? ret : in_len; 2864 } 2865 2866 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2867 struct ib_device *ib_dev, 2868 const char __user *buf, int in_len, 2869 int out_len) 2870 { 2871 struct ib_uverbs_create_ah cmd; 2872 struct ib_uverbs_create_ah_resp resp; 2873 struct ib_uobject *uobj; 2874 struct ib_pd *pd; 2875 struct ib_ah *ah; 2876 struct ib_ah_attr attr; 2877 int ret; 2878 2879 if (out_len < sizeof resp) 2880 return -ENOSPC; 2881 2882 if (copy_from_user(&cmd, buf, sizeof cmd)) 2883 return -EFAULT; 2884 2885 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 2886 if (!uobj) 2887 return -ENOMEM; 2888 2889 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class); 2890 down_write(&uobj->mutex); 2891 2892 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 2893 if (!pd) { 2894 ret = -EINVAL; 2895 goto err; 2896 } 2897 2898 attr.dlid = cmd.attr.dlid; 2899 attr.sl = cmd.attr.sl; 2900 attr.src_path_bits = cmd.attr.src_path_bits; 2901 attr.static_rate = cmd.attr.static_rate; 2902 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 2903 attr.port_num = cmd.attr.port_num; 2904 attr.grh.flow_label = cmd.attr.grh.flow_label; 2905 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 2906 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 2907 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 2908 memset(&attr.dmac, 0, sizeof(attr.dmac)); 2909 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 2910 2911 ah = ib_create_ah(pd, &attr); 2912 if (IS_ERR(ah)) { 2913 ret = PTR_ERR(ah); 2914 goto err_put; 2915 } 2916 2917 ah->uobject = uobj; 2918 uobj->object = ah; 2919 2920 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 2921 if (ret) 2922 goto err_destroy; 2923 2924 resp.ah_handle = uobj->id; 2925 2926 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2927 &resp, sizeof resp)) { 2928 ret = -EFAULT; 2929 goto err_copy; 2930 } 2931 2932 put_pd_read(pd); 2933 2934 mutex_lock(&file->mutex); 2935 list_add_tail(&uobj->list, &file->ucontext->ah_list); 2936 mutex_unlock(&file->mutex); 2937 2938 uobj->live = 1; 2939 2940 up_write(&uobj->mutex); 2941 2942 return in_len; 2943 2944 err_copy: 2945 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2946 2947 err_destroy: 2948 ib_destroy_ah(ah); 2949 2950 err_put: 2951 put_pd_read(pd); 2952 2953 err: 2954 put_uobj_write(uobj); 2955 return ret; 2956 } 2957 2958 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2959 struct ib_device *ib_dev, 2960 const char __user *buf, int in_len, int out_len) 2961 { 2962 struct ib_uverbs_destroy_ah cmd; 2963 struct ib_ah *ah; 2964 struct ib_uobject *uobj; 2965 int ret; 2966 2967 if (copy_from_user(&cmd, buf, sizeof cmd)) 2968 return -EFAULT; 2969 2970 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 2971 if (!uobj) 2972 return -EINVAL; 2973 ah = uobj->object; 2974 2975 ret = ib_destroy_ah(ah); 2976 if (!ret) 2977 uobj->live = 0; 2978 2979 put_uobj_write(uobj); 2980 2981 if (ret) 2982 return ret; 2983 2984 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2985 2986 mutex_lock(&file->mutex); 2987 list_del(&uobj->list); 2988 mutex_unlock(&file->mutex); 2989 2990 put_uobj(uobj); 2991 2992 return in_len; 2993 } 2994 2995 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2996 struct ib_device *ib_dev, 2997 const char __user *buf, int in_len, 2998 int out_len) 2999 { 3000 struct ib_uverbs_attach_mcast cmd; 3001 struct ib_qp *qp; 3002 struct ib_uqp_object *obj; 3003 struct ib_uverbs_mcast_entry *mcast; 3004 int ret; 3005 3006 if (copy_from_user(&cmd, buf, sizeof cmd)) 3007 return -EFAULT; 3008 3009 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 3010 if (!qp) 3011 return -EINVAL; 3012 3013 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 3014 3015 list_for_each_entry(mcast, &obj->mcast_list, list) 3016 if (cmd.mlid == mcast->lid && 3017 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 3018 ret = 0; 3019 goto out_put; 3020 } 3021 3022 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 3023 if (!mcast) { 3024 ret = -ENOMEM; 3025 goto out_put; 3026 } 3027 3028 mcast->lid = cmd.mlid; 3029 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 3030 3031 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 3032 if (!ret) 3033 list_add_tail(&mcast->list, &obj->mcast_list); 3034 else 3035 kfree(mcast); 3036 3037 out_put: 3038 put_qp_write(qp); 3039 3040 return ret ? ret : in_len; 3041 } 3042 3043 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 3044 struct ib_device *ib_dev, 3045 const char __user *buf, int in_len, 3046 int out_len) 3047 { 3048 struct ib_uverbs_detach_mcast cmd; 3049 struct ib_uqp_object *obj; 3050 struct ib_qp *qp; 3051 struct ib_uverbs_mcast_entry *mcast; 3052 int ret = -EINVAL; 3053 3054 if (copy_from_user(&cmd, buf, sizeof cmd)) 3055 return -EFAULT; 3056 3057 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 3058 if (!qp) 3059 return -EINVAL; 3060 3061 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 3062 if (ret) 3063 goto out_put; 3064 3065 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 3066 3067 list_for_each_entry(mcast, &obj->mcast_list, list) 3068 if (cmd.mlid == mcast->lid && 3069 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 3070 list_del(&mcast->list); 3071 kfree(mcast); 3072 break; 3073 } 3074 3075 out_put: 3076 put_qp_write(qp); 3077 3078 return ret ? ret : in_len; 3079 } 3080 3081 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 3082 union ib_flow_spec *ib_spec) 3083 { 3084 if (kern_spec->reserved) 3085 return -EINVAL; 3086 3087 ib_spec->type = kern_spec->type; 3088 3089 switch (ib_spec->type) { 3090 case IB_FLOW_SPEC_ETH: 3091 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth); 3092 if (ib_spec->eth.size != kern_spec->eth.size) 3093 return -EINVAL; 3094 memcpy(&ib_spec->eth.val, &kern_spec->eth.val, 3095 sizeof(struct ib_flow_eth_filter)); 3096 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask, 3097 sizeof(struct ib_flow_eth_filter)); 3098 break; 3099 case IB_FLOW_SPEC_IPV4: 3100 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4); 3101 if (ib_spec->ipv4.size != kern_spec->ipv4.size) 3102 return -EINVAL; 3103 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val, 3104 sizeof(struct ib_flow_ipv4_filter)); 3105 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask, 3106 sizeof(struct ib_flow_ipv4_filter)); 3107 break; 3108 case IB_FLOW_SPEC_IPV6: 3109 ib_spec->ipv6.size = sizeof(struct ib_flow_spec_ipv6); 3110 if (ib_spec->ipv6.size != kern_spec->ipv6.size) 3111 return -EINVAL; 3112 memcpy(&ib_spec->ipv6.val, &kern_spec->ipv6.val, 3113 sizeof(struct ib_flow_ipv6_filter)); 3114 memcpy(&ib_spec->ipv6.mask, &kern_spec->ipv6.mask, 3115 sizeof(struct ib_flow_ipv6_filter)); 3116 break; 3117 case IB_FLOW_SPEC_TCP: 3118 case IB_FLOW_SPEC_UDP: 3119 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp); 3120 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size) 3121 return -EINVAL; 3122 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val, 3123 sizeof(struct ib_flow_tcp_udp_filter)); 3124 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask, 3125 sizeof(struct ib_flow_tcp_udp_filter)); 3126 break; 3127 default: 3128 return -EINVAL; 3129 } 3130 return 0; 3131 } 3132 3133 int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, 3134 struct ib_device *ib_dev, 3135 struct ib_udata *ucore, 3136 struct ib_udata *uhw) 3137 { 3138 struct ib_uverbs_ex_create_wq cmd = {}; 3139 struct ib_uverbs_ex_create_wq_resp resp = {}; 3140 struct ib_uwq_object *obj; 3141 int err = 0; 3142 struct ib_cq *cq; 3143 struct ib_pd *pd; 3144 struct ib_wq *wq; 3145 struct ib_wq_init_attr wq_init_attr = {}; 3146 size_t required_cmd_sz; 3147 size_t required_resp_len; 3148 3149 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge); 3150 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn); 3151 3152 if (ucore->inlen < required_cmd_sz) 3153 return -EINVAL; 3154 3155 if (ucore->outlen < required_resp_len) 3156 return -ENOSPC; 3157 3158 if (ucore->inlen > sizeof(cmd) && 3159 !ib_is_udata_cleared(ucore, sizeof(cmd), 3160 ucore->inlen - sizeof(cmd))) 3161 return -EOPNOTSUPP; 3162 3163 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3164 if (err) 3165 return err; 3166 3167 if (cmd.comp_mask) 3168 return -EOPNOTSUPP; 3169 3170 obj = kmalloc(sizeof(*obj), GFP_KERNEL); 3171 if (!obj) 3172 return -ENOMEM; 3173 3174 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, 3175 &wq_lock_class); 3176 down_write(&obj->uevent.uobject.mutex); 3177 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 3178 if (!pd) { 3179 err = -EINVAL; 3180 goto err_uobj; 3181 } 3182 3183 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 3184 if (!cq) { 3185 err = -EINVAL; 3186 goto err_put_pd; 3187 } 3188 3189 wq_init_attr.cq = cq; 3190 wq_init_attr.max_sge = cmd.max_sge; 3191 wq_init_attr.max_wr = cmd.max_wr; 3192 wq_init_attr.wq_context = file; 3193 wq_init_attr.wq_type = cmd.wq_type; 3194 wq_init_attr.event_handler = ib_uverbs_wq_event_handler; 3195 obj->uevent.events_reported = 0; 3196 INIT_LIST_HEAD(&obj->uevent.event_list); 3197 wq = pd->device->create_wq(pd, &wq_init_attr, uhw); 3198 if (IS_ERR(wq)) { 3199 err = PTR_ERR(wq); 3200 goto err_put_cq; 3201 } 3202 3203 wq->uobject = &obj->uevent.uobject; 3204 obj->uevent.uobject.object = wq; 3205 wq->wq_type = wq_init_attr.wq_type; 3206 wq->cq = cq; 3207 wq->pd = pd; 3208 wq->device = pd->device; 3209 wq->wq_context = wq_init_attr.wq_context; 3210 atomic_set(&wq->usecnt, 0); 3211 atomic_inc(&pd->usecnt); 3212 atomic_inc(&cq->usecnt); 3213 wq->uobject = &obj->uevent.uobject; 3214 obj->uevent.uobject.object = wq; 3215 err = idr_add_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject); 3216 if (err) 3217 goto destroy_wq; 3218 3219 memset(&resp, 0, sizeof(resp)); 3220 resp.wq_handle = obj->uevent.uobject.id; 3221 resp.max_sge = wq_init_attr.max_sge; 3222 resp.max_wr = wq_init_attr.max_wr; 3223 resp.wqn = wq->wq_num; 3224 resp.response_length = required_resp_len; 3225 err = ib_copy_to_udata(ucore, 3226 &resp, resp.response_length); 3227 if (err) 3228 goto err_copy; 3229 3230 put_pd_read(pd); 3231 put_cq_read(cq); 3232 3233 mutex_lock(&file->mutex); 3234 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->wq_list); 3235 mutex_unlock(&file->mutex); 3236 3237 obj->uevent.uobject.live = 1; 3238 up_write(&obj->uevent.uobject.mutex); 3239 return 0; 3240 3241 err_copy: 3242 idr_remove_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject); 3243 destroy_wq: 3244 ib_destroy_wq(wq); 3245 err_put_cq: 3246 put_cq_read(cq); 3247 err_put_pd: 3248 put_pd_read(pd); 3249 err_uobj: 3250 put_uobj_write(&obj->uevent.uobject); 3251 3252 return err; 3253 } 3254 3255 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, 3256 struct ib_device *ib_dev, 3257 struct ib_udata *ucore, 3258 struct ib_udata *uhw) 3259 { 3260 struct ib_uverbs_ex_destroy_wq cmd = {}; 3261 struct ib_uverbs_ex_destroy_wq_resp resp = {}; 3262 struct ib_wq *wq; 3263 struct ib_uobject *uobj; 3264 struct ib_uwq_object *obj; 3265 size_t required_cmd_sz; 3266 size_t required_resp_len; 3267 int ret; 3268 3269 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle); 3270 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 3271 3272 if (ucore->inlen < required_cmd_sz) 3273 return -EINVAL; 3274 3275 if (ucore->outlen < required_resp_len) 3276 return -ENOSPC; 3277 3278 if (ucore->inlen > sizeof(cmd) && 3279 !ib_is_udata_cleared(ucore, sizeof(cmd), 3280 ucore->inlen - sizeof(cmd))) 3281 return -EOPNOTSUPP; 3282 3283 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3284 if (ret) 3285 return ret; 3286 3287 if (cmd.comp_mask) 3288 return -EOPNOTSUPP; 3289 3290 resp.response_length = required_resp_len; 3291 uobj = idr_write_uobj(&ib_uverbs_wq_idr, cmd.wq_handle, 3292 file->ucontext); 3293 if (!uobj) 3294 return -EINVAL; 3295 3296 wq = uobj->object; 3297 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); 3298 ret = ib_destroy_wq(wq); 3299 if (!ret) 3300 uobj->live = 0; 3301 3302 put_uobj_write(uobj); 3303 if (ret) 3304 return ret; 3305 3306 idr_remove_uobj(&ib_uverbs_wq_idr, uobj); 3307 3308 mutex_lock(&file->mutex); 3309 list_del(&uobj->list); 3310 mutex_unlock(&file->mutex); 3311 3312 ib_uverbs_release_uevent(file, &obj->uevent); 3313 resp.events_reported = obj->uevent.events_reported; 3314 put_uobj(uobj); 3315 3316 ret = ib_copy_to_udata(ucore, &resp, resp.response_length); 3317 if (ret) 3318 return ret; 3319 3320 return 0; 3321 } 3322 3323 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, 3324 struct ib_device *ib_dev, 3325 struct ib_udata *ucore, 3326 struct ib_udata *uhw) 3327 { 3328 struct ib_uverbs_ex_modify_wq cmd = {}; 3329 struct ib_wq *wq; 3330 struct ib_wq_attr wq_attr = {}; 3331 size_t required_cmd_sz; 3332 int ret; 3333 3334 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state); 3335 if (ucore->inlen < required_cmd_sz) 3336 return -EINVAL; 3337 3338 if (ucore->inlen > sizeof(cmd) && 3339 !ib_is_udata_cleared(ucore, sizeof(cmd), 3340 ucore->inlen - sizeof(cmd))) 3341 return -EOPNOTSUPP; 3342 3343 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3344 if (ret) 3345 return ret; 3346 3347 if (!cmd.attr_mask) 3348 return -EINVAL; 3349 3350 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE)) 3351 return -EINVAL; 3352 3353 wq = idr_read_wq(cmd.wq_handle, file->ucontext); 3354 if (!wq) 3355 return -EINVAL; 3356 3357 wq_attr.curr_wq_state = cmd.curr_wq_state; 3358 wq_attr.wq_state = cmd.wq_state; 3359 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); 3360 put_wq_read(wq); 3361 return ret; 3362 } 3363 3364 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, 3365 struct ib_device *ib_dev, 3366 struct ib_udata *ucore, 3367 struct ib_udata *uhw) 3368 { 3369 struct ib_uverbs_ex_create_rwq_ind_table cmd = {}; 3370 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; 3371 struct ib_uobject *uobj; 3372 int err = 0; 3373 struct ib_rwq_ind_table_init_attr init_attr = {}; 3374 struct ib_rwq_ind_table *rwq_ind_tbl; 3375 struct ib_wq **wqs = NULL; 3376 u32 *wqs_handles = NULL; 3377 struct ib_wq *wq = NULL; 3378 int i, j, num_read_wqs; 3379 u32 num_wq_handles; 3380 u32 expected_in_size; 3381 size_t required_cmd_sz_header; 3382 size_t required_resp_len; 3383 3384 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size); 3385 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num); 3386 3387 if (ucore->inlen < required_cmd_sz_header) 3388 return -EINVAL; 3389 3390 if (ucore->outlen < required_resp_len) 3391 return -ENOSPC; 3392 3393 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header); 3394 if (err) 3395 return err; 3396 3397 ucore->inbuf += required_cmd_sz_header; 3398 ucore->inlen -= required_cmd_sz_header; 3399 3400 if (cmd.comp_mask) 3401 return -EOPNOTSUPP; 3402 3403 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE) 3404 return -EINVAL; 3405 3406 num_wq_handles = 1 << cmd.log_ind_tbl_size; 3407 expected_in_size = num_wq_handles * sizeof(__u32); 3408 if (num_wq_handles == 1) 3409 /* input size for wq handles is u64 aligned */ 3410 expected_in_size += sizeof(__u32); 3411 3412 if (ucore->inlen < expected_in_size) 3413 return -EINVAL; 3414 3415 if (ucore->inlen > expected_in_size && 3416 !ib_is_udata_cleared(ucore, expected_in_size, 3417 ucore->inlen - expected_in_size)) 3418 return -EOPNOTSUPP; 3419 3420 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), 3421 GFP_KERNEL); 3422 if (!wqs_handles) 3423 return -ENOMEM; 3424 3425 err = ib_copy_from_udata(wqs_handles, ucore, 3426 num_wq_handles * sizeof(__u32)); 3427 if (err) 3428 goto err_free; 3429 3430 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); 3431 if (!wqs) { 3432 err = -ENOMEM; 3433 goto err_free; 3434 } 3435 3436 for (num_read_wqs = 0; num_read_wqs < num_wq_handles; 3437 num_read_wqs++) { 3438 wq = idr_read_wq(wqs_handles[num_read_wqs], file->ucontext); 3439 if (!wq) { 3440 err = -EINVAL; 3441 goto put_wqs; 3442 } 3443 3444 wqs[num_read_wqs] = wq; 3445 } 3446 3447 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 3448 if (!uobj) { 3449 err = -ENOMEM; 3450 goto put_wqs; 3451 } 3452 3453 init_uobj(uobj, 0, file->ucontext, &rwq_ind_table_lock_class); 3454 down_write(&uobj->mutex); 3455 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; 3456 init_attr.ind_tbl = wqs; 3457 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); 3458 3459 if (IS_ERR(rwq_ind_tbl)) { 3460 err = PTR_ERR(rwq_ind_tbl); 3461 goto err_uobj; 3462 } 3463 3464 rwq_ind_tbl->ind_tbl = wqs; 3465 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size; 3466 rwq_ind_tbl->uobject = uobj; 3467 uobj->object = rwq_ind_tbl; 3468 rwq_ind_tbl->device = ib_dev; 3469 atomic_set(&rwq_ind_tbl->usecnt, 0); 3470 3471 for (i = 0; i < num_wq_handles; i++) 3472 atomic_inc(&wqs[i]->usecnt); 3473 3474 err = idr_add_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); 3475 if (err) 3476 goto destroy_ind_tbl; 3477 3478 resp.ind_tbl_handle = uobj->id; 3479 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; 3480 resp.response_length = required_resp_len; 3481 3482 err = ib_copy_to_udata(ucore, 3483 &resp, resp.response_length); 3484 if (err) 3485 goto err_copy; 3486 3487 kfree(wqs_handles); 3488 3489 for (j = 0; j < num_read_wqs; j++) 3490 put_wq_read(wqs[j]); 3491 3492 mutex_lock(&file->mutex); 3493 list_add_tail(&uobj->list, &file->ucontext->rwq_ind_tbl_list); 3494 mutex_unlock(&file->mutex); 3495 3496 uobj->live = 1; 3497 3498 up_write(&uobj->mutex); 3499 return 0; 3500 3501 err_copy: 3502 idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); 3503 destroy_ind_tbl: 3504 ib_destroy_rwq_ind_table(rwq_ind_tbl); 3505 err_uobj: 3506 put_uobj_write(uobj); 3507 put_wqs: 3508 for (j = 0; j < num_read_wqs; j++) 3509 put_wq_read(wqs[j]); 3510 err_free: 3511 kfree(wqs_handles); 3512 kfree(wqs); 3513 return err; 3514 } 3515 3516 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, 3517 struct ib_device *ib_dev, 3518 struct ib_udata *ucore, 3519 struct ib_udata *uhw) 3520 { 3521 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {}; 3522 struct ib_rwq_ind_table *rwq_ind_tbl; 3523 struct ib_uobject *uobj; 3524 int ret; 3525 struct ib_wq **ind_tbl; 3526 size_t required_cmd_sz; 3527 3528 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle); 3529 3530 if (ucore->inlen < required_cmd_sz) 3531 return -EINVAL; 3532 3533 if (ucore->inlen > sizeof(cmd) && 3534 !ib_is_udata_cleared(ucore, sizeof(cmd), 3535 ucore->inlen - sizeof(cmd))) 3536 return -EOPNOTSUPP; 3537 3538 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3539 if (ret) 3540 return ret; 3541 3542 if (cmd.comp_mask) 3543 return -EOPNOTSUPP; 3544 3545 uobj = idr_write_uobj(&ib_uverbs_rwq_ind_tbl_idr, cmd.ind_tbl_handle, 3546 file->ucontext); 3547 if (!uobj) 3548 return -EINVAL; 3549 rwq_ind_tbl = uobj->object; 3550 ind_tbl = rwq_ind_tbl->ind_tbl; 3551 3552 ret = ib_destroy_rwq_ind_table(rwq_ind_tbl); 3553 if (!ret) 3554 uobj->live = 0; 3555 3556 put_uobj_write(uobj); 3557 3558 if (ret) 3559 return ret; 3560 3561 idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); 3562 3563 mutex_lock(&file->mutex); 3564 list_del(&uobj->list); 3565 mutex_unlock(&file->mutex); 3566 3567 put_uobj(uobj); 3568 kfree(ind_tbl); 3569 return ret; 3570 } 3571 3572 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 3573 struct ib_device *ib_dev, 3574 struct ib_udata *ucore, 3575 struct ib_udata *uhw) 3576 { 3577 struct ib_uverbs_create_flow cmd; 3578 struct ib_uverbs_create_flow_resp resp; 3579 struct ib_uobject *uobj; 3580 struct ib_flow *flow_id; 3581 struct ib_uverbs_flow_attr *kern_flow_attr; 3582 struct ib_flow_attr *flow_attr; 3583 struct ib_qp *qp; 3584 int err = 0; 3585 void *kern_spec; 3586 void *ib_spec; 3587 int i; 3588 3589 if (ucore->inlen < sizeof(cmd)) 3590 return -EINVAL; 3591 3592 if (ucore->outlen < sizeof(resp)) 3593 return -ENOSPC; 3594 3595 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3596 if (err) 3597 return err; 3598 3599 ucore->inbuf += sizeof(cmd); 3600 ucore->inlen -= sizeof(cmd); 3601 3602 if (cmd.comp_mask) 3603 return -EINVAL; 3604 3605 if (!capable(CAP_NET_RAW)) 3606 return -EPERM; 3607 3608 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED) 3609 return -EINVAL; 3610 3611 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 3612 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) || 3613 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT))) 3614 return -EINVAL; 3615 3616 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 3617 return -EINVAL; 3618 3619 if (cmd.flow_attr.size > ucore->inlen || 3620 cmd.flow_attr.size > 3621 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 3622 return -EINVAL; 3623 3624 if (cmd.flow_attr.reserved[0] || 3625 cmd.flow_attr.reserved[1]) 3626 return -EINVAL; 3627 3628 if (cmd.flow_attr.num_of_specs) { 3629 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 3630 GFP_KERNEL); 3631 if (!kern_flow_attr) 3632 return -ENOMEM; 3633 3634 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 3635 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 3636 cmd.flow_attr.size); 3637 if (err) 3638 goto err_free_attr; 3639 } else { 3640 kern_flow_attr = &cmd.flow_attr; 3641 } 3642 3643 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 3644 if (!uobj) { 3645 err = -ENOMEM; 3646 goto err_free_attr; 3647 } 3648 init_uobj(uobj, 0, file->ucontext, &rule_lock_class); 3649 down_write(&uobj->mutex); 3650 3651 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 3652 if (!qp) { 3653 err = -EINVAL; 3654 goto err_uobj; 3655 } 3656 3657 flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL); 3658 if (!flow_attr) { 3659 err = -ENOMEM; 3660 goto err_put; 3661 } 3662 3663 flow_attr->type = kern_flow_attr->type; 3664 flow_attr->priority = kern_flow_attr->priority; 3665 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 3666 flow_attr->port = kern_flow_attr->port; 3667 flow_attr->flags = kern_flow_attr->flags; 3668 flow_attr->size = sizeof(*flow_attr); 3669 3670 kern_spec = kern_flow_attr + 1; 3671 ib_spec = flow_attr + 1; 3672 for (i = 0; i < flow_attr->num_of_specs && 3673 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3674 cmd.flow_attr.size >= 3675 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3676 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 3677 if (err) 3678 goto err_free; 3679 flow_attr->size += 3680 ((union ib_flow_spec *) ib_spec)->size; 3681 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3682 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3683 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 3684 } 3685 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3686 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 3687 i, cmd.flow_attr.size); 3688 err = -EINVAL; 3689 goto err_free; 3690 } 3691 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 3692 if (IS_ERR(flow_id)) { 3693 err = PTR_ERR(flow_id); 3694 goto err_free; 3695 } 3696 flow_id->qp = qp; 3697 flow_id->uobject = uobj; 3698 uobj->object = flow_id; 3699 3700 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj); 3701 if (err) 3702 goto destroy_flow; 3703 3704 memset(&resp, 0, sizeof(resp)); 3705 resp.flow_handle = uobj->id; 3706 3707 err = ib_copy_to_udata(ucore, 3708 &resp, sizeof(resp)); 3709 if (err) 3710 goto err_copy; 3711 3712 put_qp_read(qp); 3713 mutex_lock(&file->mutex); 3714 list_add_tail(&uobj->list, &file->ucontext->rule_list); 3715 mutex_unlock(&file->mutex); 3716 3717 uobj->live = 1; 3718 3719 up_write(&uobj->mutex); 3720 kfree(flow_attr); 3721 if (cmd.flow_attr.num_of_specs) 3722 kfree(kern_flow_attr); 3723 return 0; 3724 err_copy: 3725 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 3726 destroy_flow: 3727 ib_destroy_flow(flow_id); 3728 err_free: 3729 kfree(flow_attr); 3730 err_put: 3731 put_qp_read(qp); 3732 err_uobj: 3733 put_uobj_write(uobj); 3734 err_free_attr: 3735 if (cmd.flow_attr.num_of_specs) 3736 kfree(kern_flow_attr); 3737 return err; 3738 } 3739 3740 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3741 struct ib_device *ib_dev, 3742 struct ib_udata *ucore, 3743 struct ib_udata *uhw) 3744 { 3745 struct ib_uverbs_destroy_flow cmd; 3746 struct ib_flow *flow_id; 3747 struct ib_uobject *uobj; 3748 int ret; 3749 3750 if (ucore->inlen < sizeof(cmd)) 3751 return -EINVAL; 3752 3753 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3754 if (ret) 3755 return ret; 3756 3757 if (cmd.comp_mask) 3758 return -EINVAL; 3759 3760 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, 3761 file->ucontext); 3762 if (!uobj) 3763 return -EINVAL; 3764 flow_id = uobj->object; 3765 3766 ret = ib_destroy_flow(flow_id); 3767 if (!ret) 3768 uobj->live = 0; 3769 3770 put_uobj_write(uobj); 3771 3772 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 3773 3774 mutex_lock(&file->mutex); 3775 list_del(&uobj->list); 3776 mutex_unlock(&file->mutex); 3777 3778 put_uobj(uobj); 3779 3780 return ret; 3781 } 3782 3783 static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3784 struct ib_device *ib_dev, 3785 struct ib_uverbs_create_xsrq *cmd, 3786 struct ib_udata *udata) 3787 { 3788 struct ib_uverbs_create_srq_resp resp; 3789 struct ib_usrq_object *obj; 3790 struct ib_pd *pd; 3791 struct ib_srq *srq; 3792 struct ib_uobject *uninitialized_var(xrcd_uobj); 3793 struct ib_srq_init_attr attr; 3794 int ret; 3795 3796 obj = kmalloc(sizeof *obj, GFP_KERNEL); 3797 if (!obj) 3798 return -ENOMEM; 3799 3800 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class); 3801 down_write(&obj->uevent.uobject.mutex); 3802 3803 if (cmd->srq_type == IB_SRQT_XRC) { 3804 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); 3805 if (!attr.ext.xrc.xrcd) { 3806 ret = -EINVAL; 3807 goto err; 3808 } 3809 3810 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3811 atomic_inc(&obj->uxrcd->refcnt); 3812 3813 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); 3814 if (!attr.ext.xrc.cq) { 3815 ret = -EINVAL; 3816 goto err_put_xrcd; 3817 } 3818 } 3819 3820 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 3821 if (!pd) { 3822 ret = -EINVAL; 3823 goto err_put_cq; 3824 } 3825 3826 attr.event_handler = ib_uverbs_srq_event_handler; 3827 attr.srq_context = file; 3828 attr.srq_type = cmd->srq_type; 3829 attr.attr.max_wr = cmd->max_wr; 3830 attr.attr.max_sge = cmd->max_sge; 3831 attr.attr.srq_limit = cmd->srq_limit; 3832 3833 obj->uevent.events_reported = 0; 3834 INIT_LIST_HEAD(&obj->uevent.event_list); 3835 3836 srq = pd->device->create_srq(pd, &attr, udata); 3837 if (IS_ERR(srq)) { 3838 ret = PTR_ERR(srq); 3839 goto err_put; 3840 } 3841 3842 srq->device = pd->device; 3843 srq->pd = pd; 3844 srq->srq_type = cmd->srq_type; 3845 srq->uobject = &obj->uevent.uobject; 3846 srq->event_handler = attr.event_handler; 3847 srq->srq_context = attr.srq_context; 3848 3849 if (cmd->srq_type == IB_SRQT_XRC) { 3850 srq->ext.xrc.cq = attr.ext.xrc.cq; 3851 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3852 atomic_inc(&attr.ext.xrc.cq->usecnt); 3853 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3854 } 3855 3856 atomic_inc(&pd->usecnt); 3857 atomic_set(&srq->usecnt, 0); 3858 3859 obj->uevent.uobject.object = srq; 3860 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3861 if (ret) 3862 goto err_destroy; 3863 3864 memset(&resp, 0, sizeof resp); 3865 resp.srq_handle = obj->uevent.uobject.id; 3866 resp.max_wr = attr.attr.max_wr; 3867 resp.max_sge = attr.attr.max_sge; 3868 if (cmd->srq_type == IB_SRQT_XRC) 3869 resp.srqn = srq->ext.xrc.srq_num; 3870 3871 if (copy_to_user((void __user *) (unsigned long) cmd->response, 3872 &resp, sizeof resp)) { 3873 ret = -EFAULT; 3874 goto err_copy; 3875 } 3876 3877 if (cmd->srq_type == IB_SRQT_XRC) { 3878 put_uobj_read(xrcd_uobj); 3879 put_cq_read(attr.ext.xrc.cq); 3880 } 3881 put_pd_read(pd); 3882 3883 mutex_lock(&file->mutex); 3884 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); 3885 mutex_unlock(&file->mutex); 3886 3887 obj->uevent.uobject.live = 1; 3888 3889 up_write(&obj->uevent.uobject.mutex); 3890 3891 return 0; 3892 3893 err_copy: 3894 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3895 3896 err_destroy: 3897 ib_destroy_srq(srq); 3898 3899 err_put: 3900 put_pd_read(pd); 3901 3902 err_put_cq: 3903 if (cmd->srq_type == IB_SRQT_XRC) 3904 put_cq_read(attr.ext.xrc.cq); 3905 3906 err_put_xrcd: 3907 if (cmd->srq_type == IB_SRQT_XRC) { 3908 atomic_dec(&obj->uxrcd->refcnt); 3909 put_uobj_read(xrcd_uobj); 3910 } 3911 3912 err: 3913 put_uobj_write(&obj->uevent.uobject); 3914 return ret; 3915 } 3916 3917 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3918 struct ib_device *ib_dev, 3919 const char __user *buf, int in_len, 3920 int out_len) 3921 { 3922 struct ib_uverbs_create_srq cmd; 3923 struct ib_uverbs_create_xsrq xcmd; 3924 struct ib_uverbs_create_srq_resp resp; 3925 struct ib_udata udata; 3926 int ret; 3927 3928 if (out_len < sizeof resp) 3929 return -ENOSPC; 3930 3931 if (copy_from_user(&cmd, buf, sizeof cmd)) 3932 return -EFAULT; 3933 3934 xcmd.response = cmd.response; 3935 xcmd.user_handle = cmd.user_handle; 3936 xcmd.srq_type = IB_SRQT_BASIC; 3937 xcmd.pd_handle = cmd.pd_handle; 3938 xcmd.max_wr = cmd.max_wr; 3939 xcmd.max_sge = cmd.max_sge; 3940 xcmd.srq_limit = cmd.srq_limit; 3941 3942 INIT_UDATA(&udata, buf + sizeof cmd, 3943 (unsigned long) cmd.response + sizeof resp, 3944 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 3945 out_len - sizeof resp); 3946 3947 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 3948 if (ret) 3949 return ret; 3950 3951 return in_len; 3952 } 3953 3954 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 3955 struct ib_device *ib_dev, 3956 const char __user *buf, int in_len, int out_len) 3957 { 3958 struct ib_uverbs_create_xsrq cmd; 3959 struct ib_uverbs_create_srq_resp resp; 3960 struct ib_udata udata; 3961 int ret; 3962 3963 if (out_len < sizeof resp) 3964 return -ENOSPC; 3965 3966 if (copy_from_user(&cmd, buf, sizeof cmd)) 3967 return -EFAULT; 3968 3969 INIT_UDATA(&udata, buf + sizeof cmd, 3970 (unsigned long) cmd.response + sizeof resp, 3971 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 3972 out_len - sizeof resp); 3973 3974 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 3975 if (ret) 3976 return ret; 3977 3978 return in_len; 3979 } 3980 3981 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 3982 struct ib_device *ib_dev, 3983 const char __user *buf, int in_len, 3984 int out_len) 3985 { 3986 struct ib_uverbs_modify_srq cmd; 3987 struct ib_udata udata; 3988 struct ib_srq *srq; 3989 struct ib_srq_attr attr; 3990 int ret; 3991 3992 if (copy_from_user(&cmd, buf, sizeof cmd)) 3993 return -EFAULT; 3994 3995 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 3996 out_len); 3997 3998 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 3999 if (!srq) 4000 return -EINVAL; 4001 4002 attr.max_wr = cmd.max_wr; 4003 attr.srq_limit = cmd.srq_limit; 4004 4005 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 4006 4007 put_srq_read(srq); 4008 4009 return ret ? ret : in_len; 4010 } 4011 4012 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 4013 struct ib_device *ib_dev, 4014 const char __user *buf, 4015 int in_len, int out_len) 4016 { 4017 struct ib_uverbs_query_srq cmd; 4018 struct ib_uverbs_query_srq_resp resp; 4019 struct ib_srq_attr attr; 4020 struct ib_srq *srq; 4021 int ret; 4022 4023 if (out_len < sizeof resp) 4024 return -ENOSPC; 4025 4026 if (copy_from_user(&cmd, buf, sizeof cmd)) 4027 return -EFAULT; 4028 4029 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 4030 if (!srq) 4031 return -EINVAL; 4032 4033 ret = ib_query_srq(srq, &attr); 4034 4035 put_srq_read(srq); 4036 4037 if (ret) 4038 return ret; 4039 4040 memset(&resp, 0, sizeof resp); 4041 4042 resp.max_wr = attr.max_wr; 4043 resp.max_sge = attr.max_sge; 4044 resp.srq_limit = attr.srq_limit; 4045 4046 if (copy_to_user((void __user *) (unsigned long) cmd.response, 4047 &resp, sizeof resp)) 4048 return -EFAULT; 4049 4050 return in_len; 4051 } 4052 4053 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 4054 struct ib_device *ib_dev, 4055 const char __user *buf, int in_len, 4056 int out_len) 4057 { 4058 struct ib_uverbs_destroy_srq cmd; 4059 struct ib_uverbs_destroy_srq_resp resp; 4060 struct ib_uobject *uobj; 4061 struct ib_srq *srq; 4062 struct ib_uevent_object *obj; 4063 int ret = -EINVAL; 4064 struct ib_usrq_object *us; 4065 enum ib_srq_type srq_type; 4066 4067 if (copy_from_user(&cmd, buf, sizeof cmd)) 4068 return -EFAULT; 4069 4070 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 4071 if (!uobj) 4072 return -EINVAL; 4073 srq = uobj->object; 4074 obj = container_of(uobj, struct ib_uevent_object, uobject); 4075 srq_type = srq->srq_type; 4076 4077 ret = ib_destroy_srq(srq); 4078 if (!ret) 4079 uobj->live = 0; 4080 4081 put_uobj_write(uobj); 4082 4083 if (ret) 4084 return ret; 4085 4086 if (srq_type == IB_SRQT_XRC) { 4087 us = container_of(obj, struct ib_usrq_object, uevent); 4088 atomic_dec(&us->uxrcd->refcnt); 4089 } 4090 4091 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 4092 4093 mutex_lock(&file->mutex); 4094 list_del(&uobj->list); 4095 mutex_unlock(&file->mutex); 4096 4097 ib_uverbs_release_uevent(file, obj); 4098 4099 memset(&resp, 0, sizeof resp); 4100 resp.events_reported = obj->events_reported; 4101 4102 put_uobj(uobj); 4103 4104 if (copy_to_user((void __user *) (unsigned long) cmd.response, 4105 &resp, sizeof resp)) 4106 ret = -EFAULT; 4107 4108 return ret ? ret : in_len; 4109 } 4110 4111 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 4112 struct ib_device *ib_dev, 4113 struct ib_udata *ucore, 4114 struct ib_udata *uhw) 4115 { 4116 struct ib_uverbs_ex_query_device_resp resp = { {0} }; 4117 struct ib_uverbs_ex_query_device cmd; 4118 struct ib_device_attr attr = {0}; 4119 int err; 4120 4121 if (ucore->inlen < sizeof(cmd)) 4122 return -EINVAL; 4123 4124 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 4125 if (err) 4126 return err; 4127 4128 if (cmd.comp_mask) 4129 return -EINVAL; 4130 4131 if (cmd.reserved) 4132 return -EINVAL; 4133 4134 resp.response_length = offsetof(typeof(resp), odp_caps); 4135 4136 if (ucore->outlen < resp.response_length) 4137 return -ENOSPC; 4138 4139 err = ib_dev->query_device(ib_dev, &attr, uhw); 4140 if (err) 4141 return err; 4142 4143 copy_query_dev_fields(file, ib_dev, &resp.base, &attr); 4144 4145 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 4146 goto end; 4147 4148 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 4149 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 4150 resp.odp_caps.per_transport_caps.rc_odp_caps = 4151 attr.odp_caps.per_transport_caps.rc_odp_caps; 4152 resp.odp_caps.per_transport_caps.uc_odp_caps = 4153 attr.odp_caps.per_transport_caps.uc_odp_caps; 4154 resp.odp_caps.per_transport_caps.ud_odp_caps = 4155 attr.odp_caps.per_transport_caps.ud_odp_caps; 4156 #endif 4157 resp.response_length += sizeof(resp.odp_caps); 4158 4159 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 4160 goto end; 4161 4162 resp.timestamp_mask = attr.timestamp_mask; 4163 resp.response_length += sizeof(resp.timestamp_mask); 4164 4165 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 4166 goto end; 4167 4168 resp.hca_core_clock = attr.hca_core_clock; 4169 resp.response_length += sizeof(resp.hca_core_clock); 4170 4171 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex)) 4172 goto end; 4173 4174 resp.device_cap_flags_ex = attr.device_cap_flags; 4175 resp.response_length += sizeof(resp.device_cap_flags_ex); 4176 end: 4177 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 4178 return err; 4179 } 4180