1 /* 2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/dma-mapping.h> 35 #include <net/addrconf.h> 36 #include "rxe.h" 37 #include "rxe_loc.h" 38 #include "rxe_queue.h" 39 #include "rxe_hw_counters.h" 40 41 static int rxe_query_device(struct ib_device *dev, 42 struct ib_device_attr *attr, 43 struct ib_udata *uhw) 44 { 45 struct rxe_dev *rxe = to_rdev(dev); 46 47 if (uhw->inlen || uhw->outlen) 48 return -EINVAL; 49 50 *attr = rxe->attr; 51 return 0; 52 } 53 54 static int rxe_query_port(struct ib_device *dev, 55 u8 port_num, struct ib_port_attr *attr) 56 { 57 struct rxe_dev *rxe = to_rdev(dev); 58 struct rxe_port *port; 59 int rc = -EINVAL; 60 61 if (unlikely(port_num != 1)) { 62 pr_warn("invalid port_number %d\n", port_num); 63 goto out; 64 } 65 66 port = &rxe->port; 67 68 /* *attr being zeroed by the caller, avoid zeroing it here */ 69 *attr = port->attr; 70 71 mutex_lock(&rxe->usdev_lock); 72 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed, 73 &attr->active_width); 74 mutex_unlock(&rxe->usdev_lock); 75 76 out: 77 return rc; 78 } 79 80 static int rxe_query_gid(struct ib_device *device, 81 u8 port_num, int index, union ib_gid *gid) 82 { 83 int ret; 84 85 if (index > RXE_PORT_GID_TBL_LEN) 86 return -EINVAL; 87 88 ret = ib_get_cached_gid(device, port_num, index, gid, NULL); 89 if (ret == -EAGAIN) { 90 memcpy(gid, &zgid, sizeof(*gid)); 91 return 0; 92 } 93 94 return ret; 95 } 96 97 static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int 98 index, const union ib_gid *gid, 99 const struct ib_gid_attr *attr, void **context) 100 { 101 if (index >= RXE_PORT_GID_TBL_LEN) 102 return -EINVAL; 103 return 0; 104 } 105 106 static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int 107 index, void **context) 108 { 109 if (index >= RXE_PORT_GID_TBL_LEN) 110 return -EINVAL; 111 return 0; 112 } 113 114 static struct net_device *rxe_get_netdev(struct ib_device *device, 115 u8 port_num) 116 { 117 struct rxe_dev *rxe = to_rdev(device); 118 119 if (rxe->ndev) { 120 dev_hold(rxe->ndev); 121 return rxe->ndev; 122 } 123 124 return NULL; 125 } 126 127 static int rxe_query_pkey(struct ib_device *device, 128 u8 port_num, u16 index, u16 *pkey) 129 { 130 struct rxe_dev *rxe = to_rdev(device); 131 struct rxe_port *port; 132 133 if (unlikely(port_num != 1)) { 134 dev_warn(device->dev.parent, "invalid port_num = %d\n", 135 port_num); 136 goto err1; 137 } 138 139 port = &rxe->port; 140 141 if (unlikely(index >= port->attr.pkey_tbl_len)) { 142 dev_warn(device->dev.parent, "invalid index = %d\n", 143 index); 144 goto err1; 145 } 146 147 *pkey = port->pkey_tbl[index]; 148 return 0; 149 150 err1: 151 return -EINVAL; 152 } 153 154 static int rxe_modify_device(struct ib_device *dev, 155 int mask, struct ib_device_modify *attr) 156 { 157 struct rxe_dev *rxe = to_rdev(dev); 158 159 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) 160 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid); 161 162 if (mask & IB_DEVICE_MODIFY_NODE_DESC) { 163 memcpy(rxe->ib_dev.node_desc, 164 attr->node_desc, sizeof(rxe->ib_dev.node_desc)); 165 } 166 167 return 0; 168 } 169 170 static int rxe_modify_port(struct ib_device *dev, 171 u8 port_num, int mask, struct ib_port_modify *attr) 172 { 173 struct rxe_dev *rxe = to_rdev(dev); 174 struct rxe_port *port; 175 176 if (unlikely(port_num != 1)) { 177 pr_warn("invalid port_num = %d\n", port_num); 178 goto err1; 179 } 180 181 port = &rxe->port; 182 183 port->attr.port_cap_flags |= attr->set_port_cap_mask; 184 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask; 185 186 if (mask & IB_PORT_RESET_QKEY_CNTR) 187 port->attr.qkey_viol_cntr = 0; 188 189 return 0; 190 191 err1: 192 return -EINVAL; 193 } 194 195 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev, 196 u8 port_num) 197 { 198 struct rxe_dev *rxe = to_rdev(dev); 199 200 return rxe_link_layer(rxe, port_num); 201 } 202 203 static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev, 204 struct ib_udata *udata) 205 { 206 struct rxe_dev *rxe = to_rdev(dev); 207 struct rxe_ucontext *uc; 208 209 uc = rxe_alloc(&rxe->uc_pool); 210 return uc ? &uc->ibuc : ERR_PTR(-ENOMEM); 211 } 212 213 static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc) 214 { 215 struct rxe_ucontext *uc = to_ruc(ibuc); 216 217 rxe_drop_ref(uc); 218 return 0; 219 } 220 221 static int rxe_port_immutable(struct ib_device *dev, u8 port_num, 222 struct ib_port_immutable *immutable) 223 { 224 int err; 225 struct ib_port_attr attr; 226 227 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 228 229 err = ib_query_port(dev, port_num, &attr); 230 if (err) 231 return err; 232 233 immutable->pkey_tbl_len = attr.pkey_tbl_len; 234 immutable->gid_tbl_len = attr.gid_tbl_len; 235 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 236 237 return 0; 238 } 239 240 static struct ib_pd *rxe_alloc_pd(struct ib_device *dev, 241 struct ib_ucontext *context, 242 struct ib_udata *udata) 243 { 244 struct rxe_dev *rxe = to_rdev(dev); 245 struct rxe_pd *pd; 246 247 pd = rxe_alloc(&rxe->pd_pool); 248 return pd ? &pd->ibpd : ERR_PTR(-ENOMEM); 249 } 250 251 static int rxe_dealloc_pd(struct ib_pd *ibpd) 252 { 253 struct rxe_pd *pd = to_rpd(ibpd); 254 255 rxe_drop_ref(pd); 256 return 0; 257 } 258 259 static int rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr, 260 struct rxe_av *av) 261 { 262 int err; 263 union ib_gid sgid; 264 struct ib_gid_attr sgid_attr; 265 266 err = ib_get_cached_gid(&rxe->ib_dev, rdma_ah_get_port_num(attr), 267 rdma_ah_read_grh(attr)->sgid_index, &sgid, 268 &sgid_attr); 269 if (err) { 270 pr_err("Failed to query sgid. err = %d\n", err); 271 return err; 272 } 273 274 err = rxe_av_from_attr(rxe, rdma_ah_get_port_num(attr), av, attr); 275 if (!err) 276 err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid); 277 278 if (sgid_attr.ndev) 279 dev_put(sgid_attr.ndev); 280 return err; 281 } 282 283 static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, 284 struct rdma_ah_attr *attr, 285 struct ib_udata *udata) 286 287 { 288 int err; 289 struct rxe_dev *rxe = to_rdev(ibpd->device); 290 struct rxe_pd *pd = to_rpd(ibpd); 291 struct rxe_ah *ah; 292 293 err = rxe_av_chk_attr(rxe, attr); 294 if (err) 295 goto err1; 296 297 ah = rxe_alloc(&rxe->ah_pool); 298 if (!ah) { 299 err = -ENOMEM; 300 goto err1; 301 } 302 303 rxe_add_ref(pd); 304 ah->pd = pd; 305 306 err = rxe_init_av(rxe, attr, &ah->av); 307 if (err) 308 goto err2; 309 310 return &ah->ibah; 311 312 err2: 313 rxe_drop_ref(pd); 314 rxe_drop_ref(ah); 315 err1: 316 return ERR_PTR(err); 317 } 318 319 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) 320 { 321 int err; 322 struct rxe_dev *rxe = to_rdev(ibah->device); 323 struct rxe_ah *ah = to_rah(ibah); 324 325 err = rxe_av_chk_attr(rxe, attr); 326 if (err) 327 return err; 328 329 err = rxe_init_av(rxe, attr, &ah->av); 330 if (err) 331 return err; 332 333 return 0; 334 } 335 336 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) 337 { 338 struct rxe_dev *rxe = to_rdev(ibah->device); 339 struct rxe_ah *ah = to_rah(ibah); 340 341 memset(attr, 0, sizeof(*attr)); 342 attr->type = ibah->type; 343 rxe_av_to_attr(rxe, &ah->av, attr); 344 return 0; 345 } 346 347 static int rxe_destroy_ah(struct ib_ah *ibah) 348 { 349 struct rxe_ah *ah = to_rah(ibah); 350 351 rxe_drop_ref(ah->pd); 352 rxe_drop_ref(ah); 353 return 0; 354 } 355 356 static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr) 357 { 358 int err; 359 int i; 360 u32 length; 361 struct rxe_recv_wqe *recv_wqe; 362 int num_sge = ibwr->num_sge; 363 364 if (unlikely(queue_full(rq->queue))) { 365 err = -ENOMEM; 366 goto err1; 367 } 368 369 if (unlikely(num_sge > rq->max_sge)) { 370 err = -EINVAL; 371 goto err1; 372 } 373 374 length = 0; 375 for (i = 0; i < num_sge; i++) 376 length += ibwr->sg_list[i].length; 377 378 recv_wqe = producer_addr(rq->queue); 379 recv_wqe->wr_id = ibwr->wr_id; 380 recv_wqe->num_sge = num_sge; 381 382 memcpy(recv_wqe->dma.sge, ibwr->sg_list, 383 num_sge * sizeof(struct ib_sge)); 384 385 recv_wqe->dma.length = length; 386 recv_wqe->dma.resid = length; 387 recv_wqe->dma.num_sge = num_sge; 388 recv_wqe->dma.cur_sge = 0; 389 recv_wqe->dma.sge_offset = 0; 390 391 /* make sure all changes to the work queue are written before we 392 * update the producer pointer 393 */ 394 smp_wmb(); 395 396 advance_producer(rq->queue); 397 return 0; 398 399 err1: 400 return err; 401 } 402 403 static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd, 404 struct ib_srq_init_attr *init, 405 struct ib_udata *udata) 406 { 407 int err; 408 struct rxe_dev *rxe = to_rdev(ibpd->device); 409 struct rxe_pd *pd = to_rpd(ibpd); 410 struct rxe_srq *srq; 411 struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL; 412 413 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK); 414 if (err) 415 goto err1; 416 417 srq = rxe_alloc(&rxe->srq_pool); 418 if (!srq) { 419 err = -ENOMEM; 420 goto err1; 421 } 422 423 rxe_add_index(srq); 424 rxe_add_ref(pd); 425 srq->pd = pd; 426 427 err = rxe_srq_from_init(rxe, srq, init, context, udata); 428 if (err) 429 goto err2; 430 431 return &srq->ibsrq; 432 433 err2: 434 rxe_drop_ref(pd); 435 rxe_drop_index(srq); 436 rxe_drop_ref(srq); 437 err1: 438 return ERR_PTR(err); 439 } 440 441 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 442 enum ib_srq_attr_mask mask, 443 struct ib_udata *udata) 444 { 445 int err; 446 struct rxe_srq *srq = to_rsrq(ibsrq); 447 struct rxe_dev *rxe = to_rdev(ibsrq->device); 448 449 err = rxe_srq_chk_attr(rxe, srq, attr, mask); 450 if (err) 451 goto err1; 452 453 err = rxe_srq_from_attr(rxe, srq, attr, mask, udata); 454 if (err) 455 goto err1; 456 457 return 0; 458 459 err1: 460 return err; 461 } 462 463 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) 464 { 465 struct rxe_srq *srq = to_rsrq(ibsrq); 466 467 if (srq->error) 468 return -EINVAL; 469 470 attr->max_wr = srq->rq.queue->buf->index_mask; 471 attr->max_sge = srq->rq.max_sge; 472 attr->srq_limit = srq->limit; 473 return 0; 474 } 475 476 static int rxe_destroy_srq(struct ib_srq *ibsrq) 477 { 478 struct rxe_srq *srq = to_rsrq(ibsrq); 479 480 if (srq->rq.queue) 481 rxe_queue_cleanup(srq->rq.queue); 482 483 rxe_drop_ref(srq->pd); 484 rxe_drop_index(srq); 485 rxe_drop_ref(srq); 486 487 return 0; 488 } 489 490 static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 491 struct ib_recv_wr **bad_wr) 492 { 493 int err = 0; 494 unsigned long flags; 495 struct rxe_srq *srq = to_rsrq(ibsrq); 496 497 spin_lock_irqsave(&srq->rq.producer_lock, flags); 498 499 while (wr) { 500 err = post_one_recv(&srq->rq, wr); 501 if (unlikely(err)) 502 break; 503 wr = wr->next; 504 } 505 506 spin_unlock_irqrestore(&srq->rq.producer_lock, flags); 507 508 if (err) 509 *bad_wr = wr; 510 511 return err; 512 } 513 514 static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd, 515 struct ib_qp_init_attr *init, 516 struct ib_udata *udata) 517 { 518 int err; 519 struct rxe_dev *rxe = to_rdev(ibpd->device); 520 struct rxe_pd *pd = to_rpd(ibpd); 521 struct rxe_qp *qp; 522 523 err = rxe_qp_chk_init(rxe, init); 524 if (err) 525 goto err1; 526 527 qp = rxe_alloc(&rxe->qp_pool); 528 if (!qp) { 529 err = -ENOMEM; 530 goto err1; 531 } 532 533 if (udata) { 534 if (udata->inlen) { 535 err = -EINVAL; 536 goto err2; 537 } 538 qp->is_user = 1; 539 } 540 541 rxe_add_index(qp); 542 543 err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd); 544 if (err) 545 goto err3; 546 547 return &qp->ibqp; 548 549 err3: 550 rxe_drop_index(qp); 551 err2: 552 rxe_drop_ref(qp); 553 err1: 554 return ERR_PTR(err); 555 } 556 557 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 558 int mask, struct ib_udata *udata) 559 { 560 int err; 561 struct rxe_dev *rxe = to_rdev(ibqp->device); 562 struct rxe_qp *qp = to_rqp(ibqp); 563 564 err = rxe_qp_chk_attr(rxe, qp, attr, mask); 565 if (err) 566 goto err1; 567 568 err = rxe_qp_from_attr(qp, attr, mask, udata); 569 if (err) 570 goto err1; 571 572 return 0; 573 574 err1: 575 return err; 576 } 577 578 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 579 int mask, struct ib_qp_init_attr *init) 580 { 581 struct rxe_qp *qp = to_rqp(ibqp); 582 583 rxe_qp_to_init(qp, init); 584 rxe_qp_to_attr(qp, attr, mask); 585 586 return 0; 587 } 588 589 static int rxe_destroy_qp(struct ib_qp *ibqp) 590 { 591 struct rxe_qp *qp = to_rqp(ibqp); 592 593 rxe_qp_destroy(qp); 594 rxe_drop_index(qp); 595 rxe_drop_ref(qp); 596 return 0; 597 } 598 599 static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr, 600 unsigned int mask, unsigned int length) 601 { 602 int num_sge = ibwr->num_sge; 603 struct rxe_sq *sq = &qp->sq; 604 605 if (unlikely(num_sge > sq->max_sge)) 606 goto err1; 607 608 if (unlikely(mask & WR_ATOMIC_MASK)) { 609 if (length < 8) 610 goto err1; 611 612 if (atomic_wr(ibwr)->remote_addr & 0x7) 613 goto err1; 614 } 615 616 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) && 617 (length > sq->max_inline))) 618 goto err1; 619 620 return 0; 621 622 err1: 623 return -EINVAL; 624 } 625 626 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, 627 struct ib_send_wr *ibwr) 628 { 629 wr->wr_id = ibwr->wr_id; 630 wr->num_sge = ibwr->num_sge; 631 wr->opcode = ibwr->opcode; 632 wr->send_flags = ibwr->send_flags; 633 634 if (qp_type(qp) == IB_QPT_UD || 635 qp_type(qp) == IB_QPT_SMI || 636 qp_type(qp) == IB_QPT_GSI) { 637 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn; 638 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey; 639 if (qp_type(qp) == IB_QPT_GSI) 640 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index; 641 if (wr->opcode == IB_WR_SEND_WITH_IMM) 642 wr->ex.imm_data = ibwr->ex.imm_data; 643 } else { 644 switch (wr->opcode) { 645 case IB_WR_RDMA_WRITE_WITH_IMM: 646 wr->ex.imm_data = ibwr->ex.imm_data; 647 /* fall through */ 648 case IB_WR_RDMA_READ: 649 case IB_WR_RDMA_WRITE: 650 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr; 651 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey; 652 break; 653 case IB_WR_SEND_WITH_IMM: 654 wr->ex.imm_data = ibwr->ex.imm_data; 655 break; 656 case IB_WR_SEND_WITH_INV: 657 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; 658 break; 659 case IB_WR_ATOMIC_CMP_AND_SWP: 660 case IB_WR_ATOMIC_FETCH_AND_ADD: 661 wr->wr.atomic.remote_addr = 662 atomic_wr(ibwr)->remote_addr; 663 wr->wr.atomic.compare_add = 664 atomic_wr(ibwr)->compare_add; 665 wr->wr.atomic.swap = atomic_wr(ibwr)->swap; 666 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey; 667 break; 668 case IB_WR_LOCAL_INV: 669 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; 670 break; 671 case IB_WR_REG_MR: 672 wr->wr.reg.mr = reg_wr(ibwr)->mr; 673 wr->wr.reg.key = reg_wr(ibwr)->key; 674 wr->wr.reg.access = reg_wr(ibwr)->access; 675 break; 676 default: 677 break; 678 } 679 } 680 } 681 682 static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, 683 unsigned int mask, unsigned int length, 684 struct rxe_send_wqe *wqe) 685 { 686 int num_sge = ibwr->num_sge; 687 struct ib_sge *sge; 688 int i; 689 u8 *p; 690 691 init_send_wr(qp, &wqe->wr, ibwr); 692 693 if (qp_type(qp) == IB_QPT_UD || 694 qp_type(qp) == IB_QPT_SMI || 695 qp_type(qp) == IB_QPT_GSI) 696 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av)); 697 698 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) { 699 p = wqe->dma.inline_data; 700 701 sge = ibwr->sg_list; 702 for (i = 0; i < num_sge; i++, sge++) { 703 memcpy(p, (void *)(uintptr_t)sge->addr, 704 sge->length); 705 706 p += sge->length; 707 } 708 } else if (mask & WR_REG_MASK) { 709 wqe->mask = mask; 710 wqe->state = wqe_state_posted; 711 return 0; 712 } else 713 memcpy(wqe->dma.sge, ibwr->sg_list, 714 num_sge * sizeof(struct ib_sge)); 715 716 wqe->iova = (mask & WR_ATOMIC_MASK) ? 717 atomic_wr(ibwr)->remote_addr : 718 rdma_wr(ibwr)->remote_addr; 719 wqe->mask = mask; 720 wqe->dma.length = length; 721 wqe->dma.resid = length; 722 wqe->dma.num_sge = num_sge; 723 wqe->dma.cur_sge = 0; 724 wqe->dma.sge_offset = 0; 725 wqe->state = wqe_state_posted; 726 wqe->ssn = atomic_add_return(1, &qp->ssn); 727 728 return 0; 729 } 730 731 static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr, 732 unsigned int mask, u32 length) 733 { 734 int err; 735 struct rxe_sq *sq = &qp->sq; 736 struct rxe_send_wqe *send_wqe; 737 unsigned long flags; 738 739 err = validate_send_wr(qp, ibwr, mask, length); 740 if (err) 741 return err; 742 743 spin_lock_irqsave(&qp->sq.sq_lock, flags); 744 745 if (unlikely(queue_full(sq->queue))) { 746 err = -ENOMEM; 747 goto err1; 748 } 749 750 send_wqe = producer_addr(sq->queue); 751 752 err = init_send_wqe(qp, ibwr, mask, length, send_wqe); 753 if (unlikely(err)) 754 goto err1; 755 756 /* 757 * make sure all changes to the work queue are 758 * written before we update the producer pointer 759 */ 760 smp_wmb(); 761 762 advance_producer(sq->queue); 763 spin_unlock_irqrestore(&qp->sq.sq_lock, flags); 764 765 return 0; 766 767 err1: 768 spin_unlock_irqrestore(&qp->sq.sq_lock, flags); 769 return err; 770 } 771 772 static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr, 773 struct ib_send_wr **bad_wr) 774 { 775 int err = 0; 776 unsigned int mask; 777 unsigned int length = 0; 778 int i; 779 int must_sched; 780 781 while (wr) { 782 mask = wr_opcode_mask(wr->opcode, qp); 783 if (unlikely(!mask)) { 784 err = -EINVAL; 785 *bad_wr = wr; 786 break; 787 } 788 789 if (unlikely((wr->send_flags & IB_SEND_INLINE) && 790 !(mask & WR_INLINE_MASK))) { 791 err = -EINVAL; 792 *bad_wr = wr; 793 break; 794 } 795 796 length = 0; 797 for (i = 0; i < wr->num_sge; i++) 798 length += wr->sg_list[i].length; 799 800 err = post_one_send(qp, wr, mask, length); 801 802 if (err) { 803 *bad_wr = wr; 804 break; 805 } 806 wr = wr->next; 807 } 808 809 /* 810 * Must sched in case of GSI QP because ib_send_mad() hold irq lock, 811 * and the requester call ip_local_out_sk() that takes spin_lock_bh. 812 */ 813 must_sched = (qp_type(qp) == IB_QPT_GSI) || 814 (queue_count(qp->sq.queue) > 1); 815 816 rxe_run_task(&qp->req.task, must_sched); 817 if (unlikely(qp->req.state == QP_STATE_ERROR)) 818 rxe_run_task(&qp->comp.task, 1); 819 820 return err; 821 } 822 823 static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 824 struct ib_send_wr **bad_wr) 825 { 826 struct rxe_qp *qp = to_rqp(ibqp); 827 828 if (unlikely(!qp->valid)) { 829 *bad_wr = wr; 830 return -EINVAL; 831 } 832 833 if (unlikely(qp->req.state < QP_STATE_READY)) { 834 *bad_wr = wr; 835 return -EINVAL; 836 } 837 838 if (qp->is_user) { 839 /* Utilize process context to do protocol processing */ 840 rxe_run_task(&qp->req.task, 0); 841 return 0; 842 } else 843 return rxe_post_send_kernel(qp, wr, bad_wr); 844 } 845 846 static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 847 struct ib_recv_wr **bad_wr) 848 { 849 int err = 0; 850 struct rxe_qp *qp = to_rqp(ibqp); 851 struct rxe_rq *rq = &qp->rq; 852 unsigned long flags; 853 854 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) { 855 *bad_wr = wr; 856 err = -EINVAL; 857 goto err1; 858 } 859 860 if (unlikely(qp->srq)) { 861 *bad_wr = wr; 862 err = -EINVAL; 863 goto err1; 864 } 865 866 spin_lock_irqsave(&rq->producer_lock, flags); 867 868 while (wr) { 869 err = post_one_recv(rq, wr); 870 if (unlikely(err)) { 871 *bad_wr = wr; 872 break; 873 } 874 wr = wr->next; 875 } 876 877 spin_unlock_irqrestore(&rq->producer_lock, flags); 878 879 if (qp->resp.state == QP_STATE_ERROR) 880 rxe_run_task(&qp->resp.task, 1); 881 882 err1: 883 return err; 884 } 885 886 static struct ib_cq *rxe_create_cq(struct ib_device *dev, 887 const struct ib_cq_init_attr *attr, 888 struct ib_ucontext *context, 889 struct ib_udata *udata) 890 { 891 int err; 892 struct rxe_dev *rxe = to_rdev(dev); 893 struct rxe_cq *cq; 894 895 if (attr->flags) 896 return ERR_PTR(-EINVAL); 897 898 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata); 899 if (err) 900 goto err1; 901 902 cq = rxe_alloc(&rxe->cq_pool); 903 if (!cq) { 904 err = -ENOMEM; 905 goto err1; 906 } 907 908 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, 909 context, udata); 910 if (err) 911 goto err2; 912 913 return &cq->ibcq; 914 915 err2: 916 rxe_drop_ref(cq); 917 err1: 918 return ERR_PTR(err); 919 } 920 921 static int rxe_destroy_cq(struct ib_cq *ibcq) 922 { 923 struct rxe_cq *cq = to_rcq(ibcq); 924 925 rxe_cq_disable(cq); 926 927 rxe_drop_ref(cq); 928 return 0; 929 } 930 931 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) 932 { 933 int err; 934 struct rxe_cq *cq = to_rcq(ibcq); 935 struct rxe_dev *rxe = to_rdev(ibcq->device); 936 937 err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata); 938 if (err) 939 goto err1; 940 941 err = rxe_cq_resize_queue(cq, cqe, udata); 942 if (err) 943 goto err1; 944 945 return 0; 946 947 err1: 948 return err; 949 } 950 951 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 952 { 953 int i; 954 struct rxe_cq *cq = to_rcq(ibcq); 955 struct rxe_cqe *cqe; 956 unsigned long flags; 957 958 spin_lock_irqsave(&cq->cq_lock, flags); 959 for (i = 0; i < num_entries; i++) { 960 cqe = queue_head(cq->queue); 961 if (!cqe) 962 break; 963 964 memcpy(wc++, &cqe->ibwc, sizeof(*wc)); 965 advance_consumer(cq->queue); 966 } 967 spin_unlock_irqrestore(&cq->cq_lock, flags); 968 969 return i; 970 } 971 972 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt) 973 { 974 struct rxe_cq *cq = to_rcq(ibcq); 975 int count = queue_count(cq->queue); 976 977 return (count > wc_cnt) ? wc_cnt : count; 978 } 979 980 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 981 { 982 struct rxe_cq *cq = to_rcq(ibcq); 983 unsigned long irq_flags; 984 int ret = 0; 985 986 spin_lock_irqsave(&cq->cq_lock, irq_flags); 987 if (cq->notify != IB_CQ_NEXT_COMP) 988 cq->notify = flags & IB_CQ_SOLICITED_MASK; 989 990 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue)) 991 ret = 1; 992 993 spin_unlock_irqrestore(&cq->cq_lock, irq_flags); 994 995 return ret; 996 } 997 998 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) 999 { 1000 struct rxe_dev *rxe = to_rdev(ibpd->device); 1001 struct rxe_pd *pd = to_rpd(ibpd); 1002 struct rxe_mem *mr; 1003 int err; 1004 1005 mr = rxe_alloc(&rxe->mr_pool); 1006 if (!mr) { 1007 err = -ENOMEM; 1008 goto err1; 1009 } 1010 1011 rxe_add_index(mr); 1012 1013 rxe_add_ref(pd); 1014 1015 err = rxe_mem_init_dma(rxe, pd, access, mr); 1016 if (err) 1017 goto err2; 1018 1019 return &mr->ibmr; 1020 1021 err2: 1022 rxe_drop_ref(pd); 1023 rxe_drop_index(mr); 1024 rxe_drop_ref(mr); 1025 err1: 1026 return ERR_PTR(err); 1027 } 1028 1029 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, 1030 u64 start, 1031 u64 length, 1032 u64 iova, 1033 int access, struct ib_udata *udata) 1034 { 1035 int err; 1036 struct rxe_dev *rxe = to_rdev(ibpd->device); 1037 struct rxe_pd *pd = to_rpd(ibpd); 1038 struct rxe_mem *mr; 1039 1040 mr = rxe_alloc(&rxe->mr_pool); 1041 if (!mr) { 1042 err = -ENOMEM; 1043 goto err2; 1044 } 1045 1046 rxe_add_index(mr); 1047 1048 rxe_add_ref(pd); 1049 1050 err = rxe_mem_init_user(rxe, pd, start, length, iova, 1051 access, udata, mr); 1052 if (err) 1053 goto err3; 1054 1055 return &mr->ibmr; 1056 1057 err3: 1058 rxe_drop_ref(pd); 1059 rxe_drop_index(mr); 1060 rxe_drop_ref(mr); 1061 err2: 1062 return ERR_PTR(err); 1063 } 1064 1065 static int rxe_dereg_mr(struct ib_mr *ibmr) 1066 { 1067 struct rxe_mem *mr = to_rmr(ibmr); 1068 1069 mr->state = RXE_MEM_STATE_ZOMBIE; 1070 rxe_drop_ref(mr->pd); 1071 rxe_drop_index(mr); 1072 rxe_drop_ref(mr); 1073 return 0; 1074 } 1075 1076 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, 1077 enum ib_mr_type mr_type, 1078 u32 max_num_sg) 1079 { 1080 struct rxe_dev *rxe = to_rdev(ibpd->device); 1081 struct rxe_pd *pd = to_rpd(ibpd); 1082 struct rxe_mem *mr; 1083 int err; 1084 1085 if (mr_type != IB_MR_TYPE_MEM_REG) 1086 return ERR_PTR(-EINVAL); 1087 1088 mr = rxe_alloc(&rxe->mr_pool); 1089 if (!mr) { 1090 err = -ENOMEM; 1091 goto err1; 1092 } 1093 1094 rxe_add_index(mr); 1095 1096 rxe_add_ref(pd); 1097 1098 err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr); 1099 if (err) 1100 goto err2; 1101 1102 return &mr->ibmr; 1103 1104 err2: 1105 rxe_drop_ref(pd); 1106 rxe_drop_index(mr); 1107 rxe_drop_ref(mr); 1108 err1: 1109 return ERR_PTR(err); 1110 } 1111 1112 static int rxe_set_page(struct ib_mr *ibmr, u64 addr) 1113 { 1114 struct rxe_mem *mr = to_rmr(ibmr); 1115 struct rxe_map *map; 1116 struct rxe_phys_buf *buf; 1117 1118 if (unlikely(mr->nbuf == mr->num_buf)) 1119 return -ENOMEM; 1120 1121 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP]; 1122 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP]; 1123 1124 buf->addr = addr; 1125 buf->size = ibmr->page_size; 1126 mr->nbuf++; 1127 1128 return 0; 1129 } 1130 1131 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, 1132 int sg_nents, unsigned int *sg_offset) 1133 { 1134 struct rxe_mem *mr = to_rmr(ibmr); 1135 int n; 1136 1137 mr->nbuf = 0; 1138 1139 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page); 1140 1141 mr->va = ibmr->iova; 1142 mr->iova = ibmr->iova; 1143 mr->length = ibmr->length; 1144 mr->page_shift = ilog2(ibmr->page_size); 1145 mr->page_mask = ibmr->page_size - 1; 1146 mr->offset = mr->iova & mr->page_mask; 1147 1148 return n; 1149 } 1150 1151 static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) 1152 { 1153 int err; 1154 struct rxe_dev *rxe = to_rdev(ibqp->device); 1155 struct rxe_qp *qp = to_rqp(ibqp); 1156 struct rxe_mc_grp *grp; 1157 1158 /* takes a ref on grp if successful */ 1159 err = rxe_mcast_get_grp(rxe, mgid, &grp); 1160 if (err) 1161 return err; 1162 1163 err = rxe_mcast_add_grp_elem(rxe, qp, grp); 1164 1165 rxe_drop_ref(grp); 1166 return err; 1167 } 1168 1169 static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) 1170 { 1171 struct rxe_dev *rxe = to_rdev(ibqp->device); 1172 struct rxe_qp *qp = to_rqp(ibqp); 1173 1174 return rxe_mcast_drop_grp_elem(rxe, qp, mgid); 1175 } 1176 1177 static ssize_t parent_show(struct device *device, 1178 struct device_attribute *attr, char *buf) 1179 { 1180 struct rxe_dev *rxe = container_of(device, struct rxe_dev, 1181 ib_dev.dev); 1182 1183 return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1)); 1184 } 1185 1186 static DEVICE_ATTR_RO(parent); 1187 1188 static struct device_attribute *rxe_dev_attributes[] = { 1189 &dev_attr_parent, 1190 }; 1191 1192 int rxe_register_device(struct rxe_dev *rxe) 1193 { 1194 int err; 1195 int i; 1196 struct ib_device *dev = &rxe->ib_dev; 1197 struct crypto_shash *tfm; 1198 1199 strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX); 1200 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc)); 1201 1202 dev->owner = THIS_MODULE; 1203 dev->node_type = RDMA_NODE_IB_CA; 1204 dev->phys_port_cnt = 1; 1205 dev->num_comp_vectors = num_possible_cpus(); 1206 dev->dev.parent = rxe_dma_device(rxe); 1207 dev->local_dma_lkey = 0; 1208 addrconf_addr_eui48((unsigned char *)&dev->node_guid, 1209 rxe->ndev->dev_addr); 1210 dev->dev.dma_ops = &dma_virt_ops; 1211 dma_coerce_mask_and_coherent(&dev->dev, 1212 dma_get_required_mask(dev->dev.parent)); 1213 1214 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION; 1215 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) 1216 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) 1217 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) 1218 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) 1219 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) 1220 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) 1221 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ) 1222 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ) 1223 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ) 1224 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ) 1225 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV) 1226 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) 1227 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) 1228 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) 1229 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) 1230 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) 1231 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV) 1232 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) 1233 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ) 1234 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) 1235 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ) 1236 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ) 1237 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) 1238 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR) 1239 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) 1240 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) 1241 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH) 1242 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH) 1243 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) 1244 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) 1245 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST) 1246 ; 1247 1248 dev->query_device = rxe_query_device; 1249 dev->modify_device = rxe_modify_device; 1250 dev->query_port = rxe_query_port; 1251 dev->modify_port = rxe_modify_port; 1252 dev->get_link_layer = rxe_get_link_layer; 1253 dev->query_gid = rxe_query_gid; 1254 dev->get_netdev = rxe_get_netdev; 1255 dev->add_gid = rxe_add_gid; 1256 dev->del_gid = rxe_del_gid; 1257 dev->query_pkey = rxe_query_pkey; 1258 dev->alloc_ucontext = rxe_alloc_ucontext; 1259 dev->dealloc_ucontext = rxe_dealloc_ucontext; 1260 dev->mmap = rxe_mmap; 1261 dev->get_port_immutable = rxe_port_immutable; 1262 dev->alloc_pd = rxe_alloc_pd; 1263 dev->dealloc_pd = rxe_dealloc_pd; 1264 dev->create_ah = rxe_create_ah; 1265 dev->modify_ah = rxe_modify_ah; 1266 dev->query_ah = rxe_query_ah; 1267 dev->destroy_ah = rxe_destroy_ah; 1268 dev->create_srq = rxe_create_srq; 1269 dev->modify_srq = rxe_modify_srq; 1270 dev->query_srq = rxe_query_srq; 1271 dev->destroy_srq = rxe_destroy_srq; 1272 dev->post_srq_recv = rxe_post_srq_recv; 1273 dev->create_qp = rxe_create_qp; 1274 dev->modify_qp = rxe_modify_qp; 1275 dev->query_qp = rxe_query_qp; 1276 dev->destroy_qp = rxe_destroy_qp; 1277 dev->post_send = rxe_post_send; 1278 dev->post_recv = rxe_post_recv; 1279 dev->create_cq = rxe_create_cq; 1280 dev->destroy_cq = rxe_destroy_cq; 1281 dev->resize_cq = rxe_resize_cq; 1282 dev->poll_cq = rxe_poll_cq; 1283 dev->peek_cq = rxe_peek_cq; 1284 dev->req_notify_cq = rxe_req_notify_cq; 1285 dev->get_dma_mr = rxe_get_dma_mr; 1286 dev->reg_user_mr = rxe_reg_user_mr; 1287 dev->dereg_mr = rxe_dereg_mr; 1288 dev->alloc_mr = rxe_alloc_mr; 1289 dev->map_mr_sg = rxe_map_mr_sg; 1290 dev->attach_mcast = rxe_attach_mcast; 1291 dev->detach_mcast = rxe_detach_mcast; 1292 dev->get_hw_stats = rxe_ib_get_hw_stats; 1293 dev->alloc_hw_stats = rxe_ib_alloc_hw_stats; 1294 1295 tfm = crypto_alloc_shash("crc32", 0, 0); 1296 if (IS_ERR(tfm)) { 1297 pr_err("failed to allocate crc algorithm err:%ld\n", 1298 PTR_ERR(tfm)); 1299 return PTR_ERR(tfm); 1300 } 1301 rxe->tfm = tfm; 1302 1303 err = ib_register_device(dev, NULL); 1304 if (err) { 1305 pr_warn("%s failed with error %d\n", __func__, err); 1306 goto err1; 1307 } 1308 1309 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) { 1310 err = device_create_file(&dev->dev, rxe_dev_attributes[i]); 1311 if (err) { 1312 pr_warn("%s failed with error %d for attr number %d\n", 1313 __func__, err, i); 1314 goto err2; 1315 } 1316 } 1317 1318 return 0; 1319 1320 err2: 1321 ib_unregister_device(dev); 1322 err1: 1323 crypto_free_shash(rxe->tfm); 1324 1325 return err; 1326 } 1327 1328 int rxe_unregister_device(struct rxe_dev *rxe) 1329 { 1330 int i; 1331 struct ib_device *dev = &rxe->ib_dev; 1332 1333 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) 1334 device_remove_file(&dev->dev, rxe_dev_attributes[i]); 1335 1336 ib_unregister_device(dev); 1337 1338 return 0; 1339 } 1340