1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: IB Verbs interpreter 37 */ 38 39 #include <linux/interrupt.h> 40 #include <linux/types.h> 41 #include <linux/pci.h> 42 #include <linux/netdevice.h> 43 #include <linux/if_ether.h> 44 #include <net/addrconf.h> 45 46 #include <rdma/ib_verbs.h> 47 #include <rdma/ib_user_verbs.h> 48 #include <rdma/ib_umem.h> 49 #include <rdma/ib_addr.h> 50 #include <rdma/ib_mad.h> 51 #include <rdma/ib_cache.h> 52 #include <rdma/uverbs_ioctl.h> 53 54 #include "bnxt_ulp.h" 55 56 #include "roce_hsi.h" 57 #include "qplib_res.h" 58 #include "qplib_sp.h" 59 #include "qplib_fp.h" 60 #include "qplib_rcfw.h" 61 62 #include "bnxt_re.h" 63 #include "ib_verbs.h" 64 65 #include <rdma/uverbs_types.h> 66 #include <rdma/uverbs_std_types.h> 67 68 #include <rdma/ib_user_ioctl_cmds.h> 69 70 #define UVERBS_MODULE_NAME bnxt_re 71 #include <rdma/uverbs_named_ioctl.h> 72 73 #include <rdma/bnxt_re-abi.h> 74 75 static int __from_ib_access_flags(int iflags) 76 { 77 int qflags = 0; 78 79 if (iflags & IB_ACCESS_LOCAL_WRITE) 80 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; 81 if (iflags & IB_ACCESS_REMOTE_READ) 82 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; 83 if (iflags & IB_ACCESS_REMOTE_WRITE) 84 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; 85 if (iflags & IB_ACCESS_REMOTE_ATOMIC) 86 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; 87 if (iflags & IB_ACCESS_MW_BIND) 88 qflags |= BNXT_QPLIB_ACCESS_MW_BIND; 89 if (iflags & IB_ZERO_BASED) 90 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; 91 if (iflags & IB_ACCESS_ON_DEMAND) 92 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; 93 return qflags; 94 }; 95 96 static enum ib_access_flags __to_ib_access_flags(int qflags) 97 { 98 enum ib_access_flags iflags = 0; 99 100 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) 101 iflags |= IB_ACCESS_LOCAL_WRITE; 102 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) 103 iflags |= IB_ACCESS_REMOTE_WRITE; 104 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) 105 iflags |= IB_ACCESS_REMOTE_READ; 106 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) 107 iflags |= IB_ACCESS_REMOTE_ATOMIC; 108 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) 109 iflags |= IB_ACCESS_MW_BIND; 110 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) 111 iflags |= IB_ZERO_BASED; 112 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) 113 iflags |= IB_ACCESS_ON_DEMAND; 114 return iflags; 115 }; 116 117 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, 118 struct bnxt_qplib_sge *sg_list, int num) 119 { 120 int i, total = 0; 121 122 for (i = 0; i < num; i++) { 123 sg_list[i].addr = ib_sg_list[i].addr; 124 sg_list[i].lkey = ib_sg_list[i].lkey; 125 sg_list[i].size = ib_sg_list[i].length; 126 total += sg_list[i].size; 127 } 128 return total; 129 } 130 131 /* Device */ 132 int bnxt_re_query_device(struct ib_device *ibdev, 133 struct ib_device_attr *ib_attr, 134 struct ib_udata *udata) 135 { 136 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 137 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; 138 139 memset(ib_attr, 0, sizeof(*ib_attr)); 140 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver, 141 min(sizeof(dev_attr->fw_ver), 142 sizeof(ib_attr->fw_ver))); 143 addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid, 144 rdev->netdev->dev_addr); 145 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE; 146 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED; 147 148 ib_attr->vendor_id = rdev->en_dev->pdev->vendor; 149 ib_attr->vendor_part_id = rdev->en_dev->pdev->device; 150 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device; 151 ib_attr->max_qp = dev_attr->max_qp; 152 ib_attr->max_qp_wr = dev_attr->max_qp_wqes; 153 ib_attr->device_cap_flags = 154 IB_DEVICE_CURR_QP_STATE_MOD 155 | IB_DEVICE_RC_RNR_NAK_GEN 156 | IB_DEVICE_SHUTDOWN_PORT 157 | IB_DEVICE_SYS_IMAGE_GUID 158 | IB_DEVICE_RESIZE_MAX_WR 159 | IB_DEVICE_PORT_ACTIVE_EVENT 160 | IB_DEVICE_N_NOTIFY_CQ 161 | IB_DEVICE_MEM_WINDOW 162 | IB_DEVICE_MEM_WINDOW_TYPE_2B 163 | IB_DEVICE_MEM_MGT_EXTENSIONS; 164 ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; 165 ib_attr->max_send_sge = dev_attr->max_qp_sges; 166 ib_attr->max_recv_sge = dev_attr->max_qp_sges; 167 ib_attr->max_sge_rd = dev_attr->max_qp_sges; 168 ib_attr->max_cq = dev_attr->max_cq; 169 ib_attr->max_cqe = dev_attr->max_cq_wqes; 170 ib_attr->max_mr = dev_attr->max_mr; 171 ib_attr->max_pd = dev_attr->max_pd; 172 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; 173 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; 174 ib_attr->atomic_cap = IB_ATOMIC_NONE; 175 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE; 176 if (dev_attr->is_atomic) { 177 ib_attr->atomic_cap = IB_ATOMIC_GLOB; 178 ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB; 179 } 180 181 ib_attr->max_ee_rd_atom = 0; 182 ib_attr->max_res_rd_atom = 0; 183 ib_attr->max_ee_init_rd_atom = 0; 184 ib_attr->max_ee = 0; 185 ib_attr->max_rdd = 0; 186 ib_attr->max_mw = dev_attr->max_mw; 187 ib_attr->max_raw_ipv6_qp = 0; 188 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp; 189 ib_attr->max_mcast_grp = 0; 190 ib_attr->max_mcast_qp_attach = 0; 191 ib_attr->max_total_mcast_qp_attach = 0; 192 ib_attr->max_ah = dev_attr->max_ah; 193 194 ib_attr->max_srq = dev_attr->max_srq; 195 ib_attr->max_srq_wr = dev_attr->max_srq_wqes; 196 ib_attr->max_srq_sge = dev_attr->max_srq_sges; 197 198 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS; 199 200 ib_attr->max_pkeys = 1; 201 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY; 202 return 0; 203 } 204 205 /* Port */ 206 int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num, 207 struct ib_port_attr *port_attr) 208 { 209 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 210 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; 211 int rc; 212 213 memset(port_attr, 0, sizeof(*port_attr)); 214 215 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) { 216 port_attr->state = IB_PORT_ACTIVE; 217 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; 218 } else { 219 port_attr->state = IB_PORT_DOWN; 220 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; 221 } 222 port_attr->max_mtu = IB_MTU_4096; 223 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu); 224 port_attr->gid_tbl_len = dev_attr->max_sgid; 225 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | 226 IB_PORT_DEVICE_MGMT_SUP | 227 IB_PORT_VENDOR_CLASS_SUP; 228 port_attr->ip_gids = true; 229 230 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW; 231 port_attr->bad_pkey_cntr = 0; 232 port_attr->qkey_viol_cntr = 0; 233 port_attr->pkey_tbl_len = dev_attr->max_pkey; 234 port_attr->lid = 0; 235 port_attr->sm_lid = 0; 236 port_attr->lmc = 0; 237 port_attr->max_vl_num = 4; 238 port_attr->sm_sl = 0; 239 port_attr->subnet_timeout = 0; 240 port_attr->init_type_reply = 0; 241 rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed, 242 &port_attr->active_width); 243 244 return rc; 245 } 246 247 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num, 248 struct ib_port_immutable *immutable) 249 { 250 struct ib_port_attr port_attr; 251 252 if (bnxt_re_query_port(ibdev, port_num, &port_attr)) 253 return -EINVAL; 254 255 immutable->pkey_tbl_len = port_attr.pkey_tbl_len; 256 immutable->gid_tbl_len = port_attr.gid_tbl_len; 257 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; 258 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 259 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 260 return 0; 261 } 262 263 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str) 264 { 265 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 266 267 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d", 268 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1], 269 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]); 270 } 271 272 int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num, 273 u16 index, u16 *pkey) 274 { 275 if (index > 0) 276 return -EINVAL; 277 278 *pkey = IB_DEFAULT_PKEY_FULL; 279 280 return 0; 281 } 282 283 int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num, 284 int index, union ib_gid *gid) 285 { 286 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 287 int rc = 0; 288 289 /* Ignore port_num */ 290 memset(gid, 0, sizeof(*gid)); 291 rc = bnxt_qplib_get_sgid(&rdev->qplib_res, 292 &rdev->qplib_res.sgid_tbl, index, 293 (struct bnxt_qplib_gid *)gid); 294 return rc; 295 } 296 297 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) 298 { 299 int rc = 0; 300 struct bnxt_re_gid_ctx *ctx, **ctx_tbl; 301 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); 302 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; 303 struct bnxt_qplib_gid *gid_to_del; 304 u16 vlan_id = 0xFFFF; 305 306 /* Delete the entry from the hardware */ 307 ctx = *context; 308 if (!ctx) 309 return -EINVAL; 310 311 if (sgid_tbl && sgid_tbl->active) { 312 if (ctx->idx >= sgid_tbl->max) 313 return -EINVAL; 314 gid_to_del = &sgid_tbl->tbl[ctx->idx].gid; 315 vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id; 316 /* DEL_GID is called in WQ context(netdevice_event_work_handler) 317 * or via the ib_unregister_device path. In the former case QP1 318 * may not be destroyed yet, in which case just return as FW 319 * needs that entry to be present and will fail it's deletion. 320 * We could get invoked again after QP1 is destroyed OR get an 321 * ADD_GID call with a different GID value for the same index 322 * where we issue MODIFY_GID cmd to update the GID entry -- TBD 323 */ 324 if (ctx->idx == 0 && 325 rdma_link_local_addr((struct in6_addr *)gid_to_del) && 326 ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) { 327 ibdev_dbg(&rdev->ibdev, 328 "Trying to delete GID0 while QP1 is alive\n"); 329 return -EFAULT; 330 } 331 ctx->refcnt--; 332 if (!ctx->refcnt) { 333 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, 334 vlan_id, true); 335 if (rc) { 336 ibdev_err(&rdev->ibdev, 337 "Failed to remove GID: %#x", rc); 338 } else { 339 ctx_tbl = sgid_tbl->ctx; 340 ctx_tbl[ctx->idx] = NULL; 341 kfree(ctx); 342 } 343 } 344 } else { 345 return -EINVAL; 346 } 347 return rc; 348 } 349 350 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context) 351 { 352 int rc; 353 u32 tbl_idx = 0; 354 u16 vlan_id = 0xFFFF; 355 struct bnxt_re_gid_ctx *ctx, **ctx_tbl; 356 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); 357 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; 358 359 rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL); 360 if (rc) 361 return rc; 362 363 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid, 364 rdev->qplib_res.netdev->dev_addr, 365 vlan_id, true, &tbl_idx); 366 if (rc == -EALREADY) { 367 ctx_tbl = sgid_tbl->ctx; 368 ctx_tbl[tbl_idx]->refcnt++; 369 *context = ctx_tbl[tbl_idx]; 370 return 0; 371 } 372 373 if (rc < 0) { 374 ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc); 375 return rc; 376 } 377 378 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 379 if (!ctx) 380 return -ENOMEM; 381 ctx_tbl = sgid_tbl->ctx; 382 ctx->idx = tbl_idx; 383 ctx->refcnt = 1; 384 ctx_tbl[tbl_idx] = ctx; 385 *context = ctx; 386 387 return rc; 388 } 389 390 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, 391 u32 port_num) 392 { 393 return IB_LINK_LAYER_ETHERNET; 394 } 395 396 #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE) 397 398 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd) 399 { 400 struct bnxt_re_fence_data *fence = &pd->fence; 401 struct ib_mr *ib_mr = &fence->mr->ib_mr; 402 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; 403 404 memset(wqe, 0, sizeof(*wqe)); 405 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; 406 wqe->wr_id = BNXT_QPLIB_FENCE_WRID; 407 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 408 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 409 wqe->bind.zero_based = false; 410 wqe->bind.parent_l_key = ib_mr->lkey; 411 wqe->bind.va = (u64)(unsigned long)fence->va; 412 wqe->bind.length = fence->size; 413 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ); 414 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1; 415 416 /* Save the initial rkey in fence structure for now; 417 * wqe->bind.r_key will be set at (re)bind time. 418 */ 419 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey); 420 } 421 422 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp) 423 { 424 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp, 425 qplib_qp); 426 struct ib_pd *ib_pd = qp->ib_qp.pd; 427 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 428 struct bnxt_re_fence_data *fence = &pd->fence; 429 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe; 430 struct bnxt_qplib_swqe wqe; 431 int rc; 432 433 memcpy(&wqe, fence_wqe, sizeof(wqe)); 434 wqe.bind.r_key = fence->bind_rkey; 435 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey); 436 437 ibdev_dbg(&qp->rdev->ibdev, 438 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", 439 wqe.bind.r_key, qp->qplib_qp.id, pd); 440 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); 441 if (rc) { 442 ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n"); 443 return rc; 444 } 445 bnxt_qplib_post_send_db(&qp->qplib_qp); 446 447 return rc; 448 } 449 450 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd) 451 { 452 struct bnxt_re_fence_data *fence = &pd->fence; 453 struct bnxt_re_dev *rdev = pd->rdev; 454 struct device *dev = &rdev->en_dev->pdev->dev; 455 struct bnxt_re_mr *mr = fence->mr; 456 457 if (fence->mw) { 458 bnxt_re_dealloc_mw(fence->mw); 459 fence->mw = NULL; 460 } 461 if (mr) { 462 if (mr->ib_mr.rkey) 463 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr, 464 true); 465 if (mr->ib_mr.lkey) 466 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); 467 kfree(mr); 468 fence->mr = NULL; 469 } 470 if (fence->dma_addr) { 471 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES, 472 DMA_BIDIRECTIONAL); 473 fence->dma_addr = 0; 474 } 475 } 476 477 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) 478 { 479 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND; 480 struct bnxt_re_fence_data *fence = &pd->fence; 481 struct bnxt_re_dev *rdev = pd->rdev; 482 struct device *dev = &rdev->en_dev->pdev->dev; 483 struct bnxt_re_mr *mr = NULL; 484 dma_addr_t dma_addr = 0; 485 struct ib_mw *mw; 486 int rc; 487 488 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES, 489 DMA_BIDIRECTIONAL); 490 rc = dma_mapping_error(dev, dma_addr); 491 if (rc) { 492 ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n"); 493 rc = -EIO; 494 fence->dma_addr = 0; 495 goto fail; 496 } 497 fence->dma_addr = dma_addr; 498 499 /* Allocate a MR */ 500 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 501 if (!mr) { 502 rc = -ENOMEM; 503 goto fail; 504 } 505 fence->mr = mr; 506 mr->rdev = rdev; 507 mr->qplib_mr.pd = &pd->qplib_pd; 508 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; 509 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); 510 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); 511 if (rc) { 512 ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n"); 513 goto fail; 514 } 515 516 /* Register MR */ 517 mr->ib_mr.lkey = mr->qplib_mr.lkey; 518 mr->qplib_mr.va = (u64)(unsigned long)fence->va; 519 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES; 520 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 521 BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE); 522 if (rc) { 523 ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n"); 524 goto fail; 525 } 526 mr->ib_mr.rkey = mr->qplib_mr.rkey; 527 528 /* Create a fence MW only for kernel consumers */ 529 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); 530 if (IS_ERR(mw)) { 531 ibdev_err(&rdev->ibdev, 532 "Failed to create fence-MW for PD: %p\n", pd); 533 rc = PTR_ERR(mw); 534 goto fail; 535 } 536 fence->mw = mw; 537 538 bnxt_re_create_fence_wqe(pd); 539 return 0; 540 541 fail: 542 bnxt_re_destroy_fence_mr(pd); 543 return rc; 544 } 545 546 static struct bnxt_re_user_mmap_entry* 547 bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset, 548 enum bnxt_re_mmap_flag mmap_flag, u64 *offset) 549 { 550 struct bnxt_re_user_mmap_entry *entry; 551 int ret; 552 553 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 554 if (!entry) 555 return NULL; 556 557 entry->mem_offset = mem_offset; 558 entry->mmap_flag = mmap_flag; 559 entry->uctx = uctx; 560 561 switch (mmap_flag) { 562 case BNXT_RE_MMAP_SH_PAGE: 563 ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx, 564 &entry->rdma_entry, PAGE_SIZE, 0); 565 break; 566 case BNXT_RE_MMAP_UC_DB: 567 case BNXT_RE_MMAP_WC_DB: 568 ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx, 569 &entry->rdma_entry, PAGE_SIZE); 570 break; 571 default: 572 ret = -EINVAL; 573 break; 574 } 575 576 if (ret) { 577 kfree(entry); 578 return NULL; 579 } 580 if (offset) 581 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry); 582 583 return entry; 584 } 585 586 /* Protection Domains */ 587 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata) 588 { 589 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 590 struct bnxt_re_dev *rdev = pd->rdev; 591 592 if (udata) { 593 rdma_user_mmap_entry_remove(pd->pd_db_mmap); 594 pd->pd_db_mmap = NULL; 595 } 596 597 bnxt_re_destroy_fence_mr(pd); 598 599 if (pd->qplib_pd.id) { 600 if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res, 601 &rdev->qplib_res.pd_tbl, 602 &pd->qplib_pd)) 603 atomic_dec(&rdev->pd_count); 604 } 605 return 0; 606 } 607 608 int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 609 { 610 struct ib_device *ibdev = ibpd->device; 611 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 612 struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context( 613 udata, struct bnxt_re_ucontext, ib_uctx); 614 struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd); 615 struct bnxt_re_user_mmap_entry *entry = NULL; 616 int rc = 0; 617 618 pd->rdev = rdev; 619 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) { 620 ibdev_err(&rdev->ibdev, "Failed to allocate HW PD"); 621 rc = -ENOMEM; 622 goto fail; 623 } 624 625 if (udata) { 626 struct bnxt_re_pd_resp resp = {}; 627 628 if (!ucntx->dpi.dbr) { 629 /* Allocate DPI in alloc_pd to avoid failing of 630 * ibv_devinfo and family of application when DPIs 631 * are depleted. 632 */ 633 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, 634 &ucntx->dpi, ucntx, BNXT_QPLIB_DPI_TYPE_UC)) { 635 rc = -ENOMEM; 636 goto dbfail; 637 } 638 } 639 640 resp.pdid = pd->qplib_pd.id; 641 /* Still allow mapping this DBR to the new user PD. */ 642 resp.dpi = ucntx->dpi.dpi; 643 644 entry = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr, 645 BNXT_RE_MMAP_UC_DB, &resp.dbr); 646 647 if (!entry) { 648 rc = -ENOMEM; 649 goto dbfail; 650 } 651 652 pd->pd_db_mmap = &entry->rdma_entry; 653 654 rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); 655 if (rc) { 656 rdma_user_mmap_entry_remove(pd->pd_db_mmap); 657 rc = -EFAULT; 658 goto dbfail; 659 } 660 } 661 662 if (!udata) 663 if (bnxt_re_create_fence_mr(pd)) 664 ibdev_warn(&rdev->ibdev, 665 "Failed to create Fence-MR\n"); 666 atomic_inc(&rdev->pd_count); 667 668 return 0; 669 dbfail: 670 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, 671 &pd->qplib_pd); 672 fail: 673 return rc; 674 } 675 676 /* Address Handles */ 677 int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags) 678 { 679 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); 680 struct bnxt_re_dev *rdev = ah->rdev; 681 bool block = true; 682 int rc = 0; 683 684 block = !(flags & RDMA_DESTROY_AH_SLEEPABLE); 685 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block); 686 if (BNXT_RE_CHECK_RC(rc)) { 687 if (rc == -ETIMEDOUT) 688 rc = 0; 689 else 690 goto fail; 691 } 692 atomic_dec(&rdev->ah_count); 693 fail: 694 return rc; 695 } 696 697 static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype) 698 { 699 u8 nw_type; 700 701 switch (ntype) { 702 case RDMA_NETWORK_IPV4: 703 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4; 704 break; 705 case RDMA_NETWORK_IPV6: 706 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6; 707 break; 708 default: 709 nw_type = CMDQ_CREATE_AH_TYPE_V1; 710 break; 711 } 712 return nw_type; 713 } 714 715 int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr, 716 struct ib_udata *udata) 717 { 718 struct ib_pd *ib_pd = ib_ah->pd; 719 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 720 struct rdma_ah_attr *ah_attr = init_attr->ah_attr; 721 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); 722 struct bnxt_re_dev *rdev = pd->rdev; 723 const struct ib_gid_attr *sgid_attr; 724 struct bnxt_re_gid_ctx *ctx; 725 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); 726 u8 nw_type; 727 int rc; 728 729 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) { 730 ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set"); 731 return -EINVAL; 732 } 733 734 ah->rdev = rdev; 735 ah->qplib_ah.pd = &pd->qplib_pd; 736 737 /* Supply the configuration for the HW */ 738 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw, 739 sizeof(union ib_gid)); 740 sgid_attr = grh->sgid_attr; 741 /* Get the HW context of the GID. The reference 742 * of GID table entry is already taken by the caller. 743 */ 744 ctx = rdma_read_gid_hw_context(sgid_attr); 745 ah->qplib_ah.sgid_index = ctx->idx; 746 ah->qplib_ah.host_sgid_index = grh->sgid_index; 747 ah->qplib_ah.traffic_class = grh->traffic_class; 748 ah->qplib_ah.flow_label = grh->flow_label; 749 ah->qplib_ah.hop_limit = grh->hop_limit; 750 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr); 751 752 /* Get network header type for this GID */ 753 nw_type = rdma_gid_attr_network_type(sgid_attr); 754 ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type); 755 756 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN); 757 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, 758 !(init_attr->flags & 759 RDMA_CREATE_AH_SLEEPABLE)); 760 if (rc) { 761 ibdev_err(&rdev->ibdev, "Failed to allocate HW AH"); 762 return rc; 763 } 764 765 /* Write AVID to shared page. */ 766 if (udata) { 767 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context( 768 udata, struct bnxt_re_ucontext, ib_uctx); 769 unsigned long flag; 770 u32 *wrptr; 771 772 spin_lock_irqsave(&uctx->sh_lock, flag); 773 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT); 774 *wrptr = ah->qplib_ah.id; 775 wmb(); /* make sure cache is updated. */ 776 spin_unlock_irqrestore(&uctx->sh_lock, flag); 777 } 778 atomic_inc(&rdev->ah_count); 779 780 return 0; 781 } 782 783 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) 784 { 785 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); 786 787 ah_attr->type = ib_ah->type; 788 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl); 789 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN); 790 rdma_ah_set_grh(ah_attr, NULL, 0, 791 ah->qplib_ah.host_sgid_index, 792 0, ah->qplib_ah.traffic_class); 793 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data); 794 rdma_ah_set_port_num(ah_attr, 1); 795 rdma_ah_set_static_rate(ah_attr, 0); 796 return 0; 797 } 798 799 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) 800 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) 801 { 802 unsigned long flags; 803 804 spin_lock_irqsave(&qp->scq->cq_lock, flags); 805 if (qp->rcq != qp->scq) 806 spin_lock(&qp->rcq->cq_lock); 807 else 808 __acquire(&qp->rcq->cq_lock); 809 810 return flags; 811 } 812 813 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, 814 unsigned long flags) 815 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) 816 { 817 if (qp->rcq != qp->scq) 818 spin_unlock(&qp->rcq->cq_lock); 819 else 820 __release(&qp->rcq->cq_lock); 821 spin_unlock_irqrestore(&qp->scq->cq_lock, flags); 822 } 823 824 static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) 825 { 826 struct bnxt_re_qp *gsi_sqp; 827 struct bnxt_re_ah *gsi_sah; 828 struct bnxt_re_dev *rdev; 829 int rc = 0; 830 831 rdev = qp->rdev; 832 gsi_sqp = rdev->gsi_ctx.gsi_sqp; 833 gsi_sah = rdev->gsi_ctx.gsi_sah; 834 835 ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n"); 836 bnxt_qplib_destroy_ah(&rdev->qplib_res, 837 &gsi_sah->qplib_ah, 838 true); 839 atomic_dec(&rdev->ah_count); 840 bnxt_qplib_clean_qp(&qp->qplib_qp); 841 842 ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n"); 843 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp); 844 if (rc) { 845 ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed"); 846 goto fail; 847 } 848 bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp); 849 850 /* remove from active qp list */ 851 mutex_lock(&rdev->qp_lock); 852 list_del(&gsi_sqp->list); 853 mutex_unlock(&rdev->qp_lock); 854 atomic_dec(&rdev->qp_count); 855 856 kfree(rdev->gsi_ctx.sqp_tbl); 857 kfree(gsi_sah); 858 kfree(gsi_sqp); 859 rdev->gsi_ctx.gsi_sqp = NULL; 860 rdev->gsi_ctx.gsi_sah = NULL; 861 rdev->gsi_ctx.sqp_tbl = NULL; 862 863 return 0; 864 fail: 865 return rc; 866 } 867 868 /* Queue Pairs */ 869 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) 870 { 871 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 872 struct bnxt_re_dev *rdev = qp->rdev; 873 unsigned int flags; 874 int rc; 875 876 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); 877 878 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); 879 if (rc) { 880 ibdev_err(&rdev->ibdev, "Failed to destroy HW QP"); 881 return rc; 882 } 883 884 if (rdma_is_kernel_res(&qp->ib_qp.res)) { 885 flags = bnxt_re_lock_cqs(qp); 886 bnxt_qplib_clean_qp(&qp->qplib_qp); 887 bnxt_re_unlock_cqs(qp, flags); 888 } 889 890 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp); 891 892 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) { 893 rc = bnxt_re_destroy_gsi_sqp(qp); 894 if (rc) 895 return rc; 896 } 897 898 mutex_lock(&rdev->qp_lock); 899 list_del(&qp->list); 900 mutex_unlock(&rdev->qp_lock); 901 atomic_dec(&rdev->qp_count); 902 903 ib_umem_release(qp->rumem); 904 ib_umem_release(qp->sumem); 905 906 return 0; 907 } 908 909 static u8 __from_ib_qp_type(enum ib_qp_type type) 910 { 911 switch (type) { 912 case IB_QPT_GSI: 913 return CMDQ_CREATE_QP1_TYPE_GSI; 914 case IB_QPT_RC: 915 return CMDQ_CREATE_QP_TYPE_RC; 916 case IB_QPT_UD: 917 return CMDQ_CREATE_QP_TYPE_UD; 918 default: 919 return IB_QPT_MAX; 920 } 921 } 922 923 static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp, 924 int rsge, int max) 925 { 926 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) 927 rsge = max; 928 return bnxt_re_get_rwqe_size(rsge); 929 } 930 931 static u16 bnxt_re_get_wqe_size(int ilsize, int nsge) 932 { 933 u16 wqe_size, calc_ils; 934 935 wqe_size = bnxt_re_get_swqe_size(nsge); 936 if (ilsize) { 937 calc_ils = sizeof(struct sq_send_hdr) + ilsize; 938 wqe_size = max_t(u16, calc_ils, wqe_size); 939 wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr)); 940 } 941 return wqe_size; 942 } 943 944 static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp, 945 struct ib_qp_init_attr *init_attr) 946 { 947 struct bnxt_qplib_dev_attr *dev_attr; 948 struct bnxt_qplib_qp *qplqp; 949 struct bnxt_re_dev *rdev; 950 struct bnxt_qplib_q *sq; 951 int align, ilsize; 952 953 rdev = qp->rdev; 954 qplqp = &qp->qplib_qp; 955 sq = &qplqp->sq; 956 dev_attr = &rdev->dev_attr; 957 958 align = sizeof(struct sq_send_hdr); 959 ilsize = ALIGN(init_attr->cap.max_inline_data, align); 960 961 sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge); 962 if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges)) 963 return -EINVAL; 964 /* For gen p4 and gen p5 backward compatibility mode 965 * wqe size is fixed to 128 bytes 966 */ 967 if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) && 968 qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) 969 sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges); 970 971 if (init_attr->cap.max_inline_data) { 972 qplqp->max_inline_data = sq->wqe_size - 973 sizeof(struct sq_send_hdr); 974 init_attr->cap.max_inline_data = qplqp->max_inline_data; 975 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) 976 sq->max_sge = qplqp->max_inline_data / 977 sizeof(struct sq_sge); 978 } 979 980 return 0; 981 } 982 983 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, 984 struct bnxt_re_qp *qp, struct ib_udata *udata) 985 { 986 struct bnxt_qplib_qp *qplib_qp; 987 struct bnxt_re_ucontext *cntx; 988 struct bnxt_re_qp_req ureq; 989 int bytes = 0, psn_sz; 990 struct ib_umem *umem; 991 int psn_nume; 992 993 qplib_qp = &qp->qplib_qp; 994 cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, 995 ib_uctx); 996 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) 997 return -EFAULT; 998 999 bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size); 1000 /* Consider mapping PSN search memory only for RC QPs. */ 1001 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) { 1002 psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? 1003 sizeof(struct sq_psn_search_ext) : 1004 sizeof(struct sq_psn_search); 1005 psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? 1006 qplib_qp->sq.max_wqe : 1007 ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) / 1008 sizeof(struct bnxt_qplib_sge)); 1009 bytes += (psn_nume * psn_sz); 1010 } 1011 1012 bytes = PAGE_ALIGN(bytes); 1013 umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes, 1014 IB_ACCESS_LOCAL_WRITE); 1015 if (IS_ERR(umem)) 1016 return PTR_ERR(umem); 1017 1018 qp->sumem = umem; 1019 qplib_qp->sq.sg_info.umem = umem; 1020 qplib_qp->sq.sg_info.pgsize = PAGE_SIZE; 1021 qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT; 1022 qplib_qp->qp_handle = ureq.qp_handle; 1023 1024 if (!qp->qplib_qp.srq) { 1025 bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size); 1026 bytes = PAGE_ALIGN(bytes); 1027 umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes, 1028 IB_ACCESS_LOCAL_WRITE); 1029 if (IS_ERR(umem)) 1030 goto rqfail; 1031 qp->rumem = umem; 1032 qplib_qp->rq.sg_info.umem = umem; 1033 qplib_qp->rq.sg_info.pgsize = PAGE_SIZE; 1034 qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT; 1035 } 1036 1037 qplib_qp->dpi = &cntx->dpi; 1038 return 0; 1039 rqfail: 1040 ib_umem_release(qp->sumem); 1041 qp->sumem = NULL; 1042 memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info)); 1043 1044 return PTR_ERR(umem); 1045 } 1046 1047 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah 1048 (struct bnxt_re_pd *pd, 1049 struct bnxt_qplib_res *qp1_res, 1050 struct bnxt_qplib_qp *qp1_qp) 1051 { 1052 struct bnxt_re_dev *rdev = pd->rdev; 1053 struct bnxt_re_ah *ah; 1054 union ib_gid sgid; 1055 int rc; 1056 1057 ah = kzalloc(sizeof(*ah), GFP_KERNEL); 1058 if (!ah) 1059 return NULL; 1060 1061 ah->rdev = rdev; 1062 ah->qplib_ah.pd = &pd->qplib_pd; 1063 1064 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid); 1065 if (rc) 1066 goto fail; 1067 1068 /* supply the dgid data same as sgid */ 1069 memcpy(ah->qplib_ah.dgid.data, &sgid.raw, 1070 sizeof(union ib_gid)); 1071 ah->qplib_ah.sgid_index = 0; 1072 1073 ah->qplib_ah.traffic_class = 0; 1074 ah->qplib_ah.flow_label = 0; 1075 ah->qplib_ah.hop_limit = 1; 1076 ah->qplib_ah.sl = 0; 1077 /* Have DMAC same as SMAC */ 1078 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr); 1079 1080 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false); 1081 if (rc) { 1082 ibdev_err(&rdev->ibdev, 1083 "Failed to allocate HW AH for Shadow QP"); 1084 goto fail; 1085 } 1086 atomic_inc(&rdev->ah_count); 1087 1088 return ah; 1089 1090 fail: 1091 kfree(ah); 1092 return NULL; 1093 } 1094 1095 static struct bnxt_re_qp *bnxt_re_create_shadow_qp 1096 (struct bnxt_re_pd *pd, 1097 struct bnxt_qplib_res *qp1_res, 1098 struct bnxt_qplib_qp *qp1_qp) 1099 { 1100 struct bnxt_re_dev *rdev = pd->rdev; 1101 struct bnxt_re_qp *qp; 1102 int rc; 1103 1104 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 1105 if (!qp) 1106 return NULL; 1107 1108 qp->rdev = rdev; 1109 1110 /* Initialize the shadow QP structure from the QP1 values */ 1111 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr); 1112 1113 qp->qplib_qp.pd = &pd->qplib_pd; 1114 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp); 1115 qp->qplib_qp.type = IB_QPT_UD; 1116 1117 qp->qplib_qp.max_inline_data = 0; 1118 qp->qplib_qp.sig_type = true; 1119 1120 /* Shadow QP SQ depth should be same as QP1 RQ depth */ 1121 qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6); 1122 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; 1123 qp->qplib_qp.sq.max_sge = 2; 1124 /* Q full delta can be 1 since it is internal QP */ 1125 qp->qplib_qp.sq.q_full_delta = 1; 1126 qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE; 1127 qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT; 1128 1129 qp->qplib_qp.scq = qp1_qp->scq; 1130 qp->qplib_qp.rcq = qp1_qp->rcq; 1131 1132 qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6); 1133 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; 1134 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; 1135 /* Q full delta can be 1 since it is internal QP */ 1136 qp->qplib_qp.rq.q_full_delta = 1; 1137 qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE; 1138 qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT; 1139 1140 qp->qplib_qp.mtu = qp1_qp->mtu; 1141 1142 qp->qplib_qp.sq_hdr_buf_size = 0; 1143 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6; 1144 qp->qplib_qp.dpi = &rdev->dpi_privileged; 1145 1146 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp); 1147 if (rc) 1148 goto fail; 1149 1150 spin_lock_init(&qp->sq_lock); 1151 INIT_LIST_HEAD(&qp->list); 1152 mutex_lock(&rdev->qp_lock); 1153 list_add_tail(&qp->list, &rdev->qp_list); 1154 atomic_inc(&rdev->qp_count); 1155 mutex_unlock(&rdev->qp_lock); 1156 return qp; 1157 fail: 1158 kfree(qp); 1159 return NULL; 1160 } 1161 1162 static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp, 1163 struct ib_qp_init_attr *init_attr) 1164 { 1165 struct bnxt_qplib_dev_attr *dev_attr; 1166 struct bnxt_qplib_qp *qplqp; 1167 struct bnxt_re_dev *rdev; 1168 struct bnxt_qplib_q *rq; 1169 int entries; 1170 1171 rdev = qp->rdev; 1172 qplqp = &qp->qplib_qp; 1173 rq = &qplqp->rq; 1174 dev_attr = &rdev->dev_attr; 1175 1176 if (init_attr->srq) { 1177 struct bnxt_re_srq *srq; 1178 1179 srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq); 1180 qplqp->srq = &srq->qplib_srq; 1181 rq->max_wqe = 0; 1182 } else { 1183 rq->max_sge = init_attr->cap.max_recv_sge; 1184 if (rq->max_sge > dev_attr->max_qp_sges) 1185 rq->max_sge = dev_attr->max_qp_sges; 1186 init_attr->cap.max_recv_sge = rq->max_sge; 1187 rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge, 1188 dev_attr->max_qp_sges); 1189 /* Allocate 1 more than what's provided so posting max doesn't 1190 * mean empty. 1191 */ 1192 entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1); 1193 rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); 1194 rq->q_full_delta = 0; 1195 rq->sg_info.pgsize = PAGE_SIZE; 1196 rq->sg_info.pgshft = PAGE_SHIFT; 1197 } 1198 1199 return 0; 1200 } 1201 1202 static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp) 1203 { 1204 struct bnxt_qplib_dev_attr *dev_attr; 1205 struct bnxt_qplib_qp *qplqp; 1206 struct bnxt_re_dev *rdev; 1207 1208 rdev = qp->rdev; 1209 qplqp = &qp->qplib_qp; 1210 dev_attr = &rdev->dev_attr; 1211 1212 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) { 1213 qplqp->rq.max_sge = dev_attr->max_qp_sges; 1214 if (qplqp->rq.max_sge > dev_attr->max_qp_sges) 1215 qplqp->rq.max_sge = dev_attr->max_qp_sges; 1216 qplqp->rq.max_sge = 6; 1217 } 1218 } 1219 1220 static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, 1221 struct ib_qp_init_attr *init_attr, 1222 struct ib_udata *udata) 1223 { 1224 struct bnxt_qplib_dev_attr *dev_attr; 1225 struct bnxt_qplib_qp *qplqp; 1226 struct bnxt_re_dev *rdev; 1227 struct bnxt_qplib_q *sq; 1228 int entries; 1229 int diff; 1230 int rc; 1231 1232 rdev = qp->rdev; 1233 qplqp = &qp->qplib_qp; 1234 sq = &qplqp->sq; 1235 dev_attr = &rdev->dev_attr; 1236 1237 sq->max_sge = init_attr->cap.max_send_sge; 1238 if (sq->max_sge > dev_attr->max_qp_sges) { 1239 sq->max_sge = dev_attr->max_qp_sges; 1240 init_attr->cap.max_send_sge = sq->max_sge; 1241 } 1242 1243 rc = bnxt_re_setup_swqe_size(qp, init_attr); 1244 if (rc) 1245 return rc; 1246 1247 entries = init_attr->cap.max_send_wr; 1248 /* Allocate 128 + 1 more than what's provided */ 1249 diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ? 1250 0 : BNXT_QPLIB_RESERVED_QP_WRS; 1251 entries = roundup_pow_of_two(entries + diff + 1); 1252 sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1); 1253 sq->q_full_delta = diff + 1; 1254 /* 1255 * Reserving one slot for Phantom WQE. Application can 1256 * post one extra entry in this case. But allowing this to avoid 1257 * unexpected Queue full condition 1258 */ 1259 qplqp->sq.q_full_delta -= 1; 1260 qplqp->sq.sg_info.pgsize = PAGE_SIZE; 1261 qplqp->sq.sg_info.pgshft = PAGE_SHIFT; 1262 1263 return 0; 1264 } 1265 1266 static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp, 1267 struct ib_qp_init_attr *init_attr) 1268 { 1269 struct bnxt_qplib_dev_attr *dev_attr; 1270 struct bnxt_qplib_qp *qplqp; 1271 struct bnxt_re_dev *rdev; 1272 int entries; 1273 1274 rdev = qp->rdev; 1275 qplqp = &qp->qplib_qp; 1276 dev_attr = &rdev->dev_attr; 1277 1278 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) { 1279 entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1); 1280 qplqp->sq.max_wqe = min_t(u32, entries, 1281 dev_attr->max_qp_wqes + 1); 1282 qplqp->sq.q_full_delta = qplqp->sq.max_wqe - 1283 init_attr->cap.max_send_wr; 1284 qplqp->sq.max_sge++; /* Need one extra sge to put UD header */ 1285 if (qplqp->sq.max_sge > dev_attr->max_qp_sges) 1286 qplqp->sq.max_sge = dev_attr->max_qp_sges; 1287 } 1288 } 1289 1290 static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev, 1291 struct ib_qp_init_attr *init_attr) 1292 { 1293 struct bnxt_qplib_chip_ctx *chip_ctx; 1294 int qptype; 1295 1296 chip_ctx = rdev->chip_ctx; 1297 1298 qptype = __from_ib_qp_type(init_attr->qp_type); 1299 if (qptype == IB_QPT_MAX) { 1300 ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype); 1301 qptype = -EOPNOTSUPP; 1302 goto out; 1303 } 1304 1305 if (bnxt_qplib_is_chip_gen_p5(chip_ctx) && 1306 init_attr->qp_type == IB_QPT_GSI) 1307 qptype = CMDQ_CREATE_QP_TYPE_GSI; 1308 out: 1309 return qptype; 1310 } 1311 1312 static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, 1313 struct ib_qp_init_attr *init_attr, 1314 struct ib_udata *udata) 1315 { 1316 struct bnxt_qplib_dev_attr *dev_attr; 1317 struct bnxt_qplib_qp *qplqp; 1318 struct bnxt_re_dev *rdev; 1319 struct bnxt_re_cq *cq; 1320 int rc = 0, qptype; 1321 1322 rdev = qp->rdev; 1323 qplqp = &qp->qplib_qp; 1324 dev_attr = &rdev->dev_attr; 1325 1326 /* Setup misc params */ 1327 ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr); 1328 qplqp->pd = &pd->qplib_pd; 1329 qplqp->qp_handle = (u64)qplqp; 1330 qplqp->max_inline_data = init_attr->cap.max_inline_data; 1331 qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1332 true : false); 1333 qptype = bnxt_re_init_qp_type(rdev, init_attr); 1334 if (qptype < 0) { 1335 rc = qptype; 1336 goto out; 1337 } 1338 qplqp->type = (u8)qptype; 1339 qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode; 1340 1341 if (init_attr->qp_type == IB_QPT_RC) { 1342 qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom; 1343 qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; 1344 } 1345 qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); 1346 qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */ 1347 if (init_attr->create_flags) { 1348 ibdev_dbg(&rdev->ibdev, 1349 "QP create flags 0x%x not supported", 1350 init_attr->create_flags); 1351 return -EOPNOTSUPP; 1352 } 1353 1354 /* Setup CQs */ 1355 if (init_attr->send_cq) { 1356 cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq); 1357 qplqp->scq = &cq->qplib_cq; 1358 qp->scq = cq; 1359 } 1360 1361 if (init_attr->recv_cq) { 1362 cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq); 1363 qplqp->rcq = &cq->qplib_cq; 1364 qp->rcq = cq; 1365 } 1366 1367 /* Setup RQ/SRQ */ 1368 rc = bnxt_re_init_rq_attr(qp, init_attr); 1369 if (rc) 1370 goto out; 1371 if (init_attr->qp_type == IB_QPT_GSI) 1372 bnxt_re_adjust_gsi_rq_attr(qp); 1373 1374 /* Setup SQ */ 1375 rc = bnxt_re_init_sq_attr(qp, init_attr, udata); 1376 if (rc) 1377 goto out; 1378 if (init_attr->qp_type == IB_QPT_GSI) 1379 bnxt_re_adjust_gsi_sq_attr(qp, init_attr); 1380 1381 if (udata) /* This will update DPI and qp_handle */ 1382 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata); 1383 out: 1384 return rc; 1385 } 1386 1387 static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp, 1388 struct bnxt_re_pd *pd) 1389 { 1390 struct bnxt_re_sqp_entries *sqp_tbl; 1391 struct bnxt_re_dev *rdev; 1392 struct bnxt_re_qp *sqp; 1393 struct bnxt_re_ah *sah; 1394 int rc = 0; 1395 1396 rdev = qp->rdev; 1397 /* Create a shadow QP to handle the QP1 traffic */ 1398 sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl), 1399 GFP_KERNEL); 1400 if (!sqp_tbl) 1401 return -ENOMEM; 1402 rdev->gsi_ctx.sqp_tbl = sqp_tbl; 1403 1404 sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp); 1405 if (!sqp) { 1406 rc = -ENODEV; 1407 ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1"); 1408 goto out; 1409 } 1410 rdev->gsi_ctx.gsi_sqp = sqp; 1411 1412 sqp->rcq = qp->rcq; 1413 sqp->scq = qp->scq; 1414 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res, 1415 &qp->qplib_qp); 1416 if (!sah) { 1417 bnxt_qplib_destroy_qp(&rdev->qplib_res, 1418 &sqp->qplib_qp); 1419 rc = -ENODEV; 1420 ibdev_err(&rdev->ibdev, 1421 "Failed to create AH entry for ShadowQP"); 1422 goto out; 1423 } 1424 rdev->gsi_ctx.gsi_sah = sah; 1425 1426 return 0; 1427 out: 1428 kfree(sqp_tbl); 1429 return rc; 1430 } 1431 1432 static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, 1433 struct ib_qp_init_attr *init_attr) 1434 { 1435 struct bnxt_re_dev *rdev; 1436 struct bnxt_qplib_qp *qplqp; 1437 int rc = 0; 1438 1439 rdev = qp->rdev; 1440 qplqp = &qp->qplib_qp; 1441 1442 qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; 1443 qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2; 1444 1445 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp); 1446 if (rc) { 1447 ibdev_err(&rdev->ibdev, "create HW QP1 failed!"); 1448 goto out; 1449 } 1450 1451 rc = bnxt_re_create_shadow_gsi(qp, pd); 1452 out: 1453 return rc; 1454 } 1455 1456 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev, 1457 struct ib_qp_init_attr *init_attr, 1458 struct bnxt_qplib_dev_attr *dev_attr) 1459 { 1460 bool rc = true; 1461 1462 if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes || 1463 init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes || 1464 init_attr->cap.max_send_sge > dev_attr->max_qp_sges || 1465 init_attr->cap.max_recv_sge > dev_attr->max_qp_sges || 1466 init_attr->cap.max_inline_data > dev_attr->max_inline_data) { 1467 ibdev_err(&rdev->ibdev, 1468 "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x", 1469 init_attr->cap.max_send_wr, dev_attr->max_qp_wqes, 1470 init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes, 1471 init_attr->cap.max_send_sge, dev_attr->max_qp_sges, 1472 init_attr->cap.max_recv_sge, dev_attr->max_qp_sges, 1473 init_attr->cap.max_inline_data, 1474 dev_attr->max_inline_data); 1475 rc = false; 1476 } 1477 return rc; 1478 } 1479 1480 int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr, 1481 struct ib_udata *udata) 1482 { 1483 struct ib_pd *ib_pd = ib_qp->pd; 1484 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 1485 struct bnxt_re_dev *rdev = pd->rdev; 1486 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; 1487 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 1488 int rc; 1489 1490 rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr); 1491 if (!rc) { 1492 rc = -EINVAL; 1493 goto fail; 1494 } 1495 1496 qp->rdev = rdev; 1497 rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata); 1498 if (rc) 1499 goto fail; 1500 1501 if (qp_init_attr->qp_type == IB_QPT_GSI && 1502 !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) { 1503 rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr); 1504 if (rc == -ENODEV) 1505 goto qp_destroy; 1506 if (rc) 1507 goto fail; 1508 } else { 1509 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); 1510 if (rc) { 1511 ibdev_err(&rdev->ibdev, "Failed to create HW QP"); 1512 goto free_umem; 1513 } 1514 if (udata) { 1515 struct bnxt_re_qp_resp resp; 1516 1517 resp.qpid = qp->qplib_qp.id; 1518 resp.rsvd = 0; 1519 rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); 1520 if (rc) { 1521 ibdev_err(&rdev->ibdev, "Failed to copy QP udata"); 1522 goto qp_destroy; 1523 } 1524 } 1525 } 1526 1527 qp->ib_qp.qp_num = qp->qplib_qp.id; 1528 if (qp_init_attr->qp_type == IB_QPT_GSI) 1529 rdev->gsi_ctx.gsi_qp = qp; 1530 spin_lock_init(&qp->sq_lock); 1531 spin_lock_init(&qp->rq_lock); 1532 INIT_LIST_HEAD(&qp->list); 1533 mutex_lock(&rdev->qp_lock); 1534 list_add_tail(&qp->list, &rdev->qp_list); 1535 mutex_unlock(&rdev->qp_lock); 1536 atomic_inc(&rdev->qp_count); 1537 1538 return 0; 1539 qp_destroy: 1540 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); 1541 free_umem: 1542 ib_umem_release(qp->rumem); 1543 ib_umem_release(qp->sumem); 1544 fail: 1545 return rc; 1546 } 1547 1548 static u8 __from_ib_qp_state(enum ib_qp_state state) 1549 { 1550 switch (state) { 1551 case IB_QPS_RESET: 1552 return CMDQ_MODIFY_QP_NEW_STATE_RESET; 1553 case IB_QPS_INIT: 1554 return CMDQ_MODIFY_QP_NEW_STATE_INIT; 1555 case IB_QPS_RTR: 1556 return CMDQ_MODIFY_QP_NEW_STATE_RTR; 1557 case IB_QPS_RTS: 1558 return CMDQ_MODIFY_QP_NEW_STATE_RTS; 1559 case IB_QPS_SQD: 1560 return CMDQ_MODIFY_QP_NEW_STATE_SQD; 1561 case IB_QPS_SQE: 1562 return CMDQ_MODIFY_QP_NEW_STATE_SQE; 1563 case IB_QPS_ERR: 1564 default: 1565 return CMDQ_MODIFY_QP_NEW_STATE_ERR; 1566 } 1567 } 1568 1569 static enum ib_qp_state __to_ib_qp_state(u8 state) 1570 { 1571 switch (state) { 1572 case CMDQ_MODIFY_QP_NEW_STATE_RESET: 1573 return IB_QPS_RESET; 1574 case CMDQ_MODIFY_QP_NEW_STATE_INIT: 1575 return IB_QPS_INIT; 1576 case CMDQ_MODIFY_QP_NEW_STATE_RTR: 1577 return IB_QPS_RTR; 1578 case CMDQ_MODIFY_QP_NEW_STATE_RTS: 1579 return IB_QPS_RTS; 1580 case CMDQ_MODIFY_QP_NEW_STATE_SQD: 1581 return IB_QPS_SQD; 1582 case CMDQ_MODIFY_QP_NEW_STATE_SQE: 1583 return IB_QPS_SQE; 1584 case CMDQ_MODIFY_QP_NEW_STATE_ERR: 1585 default: 1586 return IB_QPS_ERR; 1587 } 1588 } 1589 1590 static u32 __from_ib_mtu(enum ib_mtu mtu) 1591 { 1592 switch (mtu) { 1593 case IB_MTU_256: 1594 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256; 1595 case IB_MTU_512: 1596 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512; 1597 case IB_MTU_1024: 1598 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024; 1599 case IB_MTU_2048: 1600 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; 1601 case IB_MTU_4096: 1602 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096; 1603 default: 1604 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; 1605 } 1606 } 1607 1608 static enum ib_mtu __to_ib_mtu(u32 mtu) 1609 { 1610 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) { 1611 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256: 1612 return IB_MTU_256; 1613 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512: 1614 return IB_MTU_512; 1615 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024: 1616 return IB_MTU_1024; 1617 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048: 1618 return IB_MTU_2048; 1619 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096: 1620 return IB_MTU_4096; 1621 default: 1622 return IB_MTU_2048; 1623 } 1624 } 1625 1626 /* Shared Receive Queues */ 1627 int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata) 1628 { 1629 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, 1630 ib_srq); 1631 struct bnxt_re_dev *rdev = srq->rdev; 1632 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq; 1633 struct bnxt_qplib_nq *nq = NULL; 1634 1635 if (qplib_srq->cq) 1636 nq = qplib_srq->cq->nq; 1637 bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq); 1638 ib_umem_release(srq->umem); 1639 atomic_dec(&rdev->srq_count); 1640 if (nq) 1641 nq->budget--; 1642 return 0; 1643 } 1644 1645 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev, 1646 struct bnxt_re_pd *pd, 1647 struct bnxt_re_srq *srq, 1648 struct ib_udata *udata) 1649 { 1650 struct bnxt_re_srq_req ureq; 1651 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq; 1652 struct ib_umem *umem; 1653 int bytes = 0; 1654 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context( 1655 udata, struct bnxt_re_ucontext, ib_uctx); 1656 1657 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) 1658 return -EFAULT; 1659 1660 bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size); 1661 bytes = PAGE_ALIGN(bytes); 1662 umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes, 1663 IB_ACCESS_LOCAL_WRITE); 1664 if (IS_ERR(umem)) 1665 return PTR_ERR(umem); 1666 1667 srq->umem = umem; 1668 qplib_srq->sg_info.umem = umem; 1669 qplib_srq->sg_info.pgsize = PAGE_SIZE; 1670 qplib_srq->sg_info.pgshft = PAGE_SHIFT; 1671 qplib_srq->srq_handle = ureq.srq_handle; 1672 qplib_srq->dpi = &cntx->dpi; 1673 1674 return 0; 1675 } 1676 1677 int bnxt_re_create_srq(struct ib_srq *ib_srq, 1678 struct ib_srq_init_attr *srq_init_attr, 1679 struct ib_udata *udata) 1680 { 1681 struct bnxt_qplib_dev_attr *dev_attr; 1682 struct bnxt_qplib_nq *nq = NULL; 1683 struct bnxt_re_dev *rdev; 1684 struct bnxt_re_srq *srq; 1685 struct bnxt_re_pd *pd; 1686 struct ib_pd *ib_pd; 1687 int rc, entries; 1688 1689 ib_pd = ib_srq->pd; 1690 pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 1691 rdev = pd->rdev; 1692 dev_attr = &rdev->dev_attr; 1693 srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); 1694 1695 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) { 1696 ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded"); 1697 rc = -EINVAL; 1698 goto exit; 1699 } 1700 1701 if (srq_init_attr->srq_type != IB_SRQT_BASIC) { 1702 rc = -EOPNOTSUPP; 1703 goto exit; 1704 } 1705 1706 srq->rdev = rdev; 1707 srq->qplib_srq.pd = &pd->qplib_pd; 1708 srq->qplib_srq.dpi = &rdev->dpi_privileged; 1709 /* Allocate 1 more than what's provided so posting max doesn't 1710 * mean empty 1711 */ 1712 entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1); 1713 if (entries > dev_attr->max_srq_wqes + 1) 1714 entries = dev_attr->max_srq_wqes + 1; 1715 srq->qplib_srq.max_wqe = entries; 1716 1717 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge; 1718 /* 128 byte wqe size for SRQ . So use max sges */ 1719 srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges); 1720 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit; 1721 srq->srq_limit = srq_init_attr->attr.srq_limit; 1722 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id; 1723 nq = &rdev->nq[0]; 1724 1725 if (udata) { 1726 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata); 1727 if (rc) 1728 goto fail; 1729 } 1730 1731 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq); 1732 if (rc) { 1733 ibdev_err(&rdev->ibdev, "Create HW SRQ failed!"); 1734 goto fail; 1735 } 1736 1737 if (udata) { 1738 struct bnxt_re_srq_resp resp; 1739 1740 resp.srqid = srq->qplib_srq.id; 1741 rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); 1742 if (rc) { 1743 ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!"); 1744 bnxt_qplib_destroy_srq(&rdev->qplib_res, 1745 &srq->qplib_srq); 1746 goto fail; 1747 } 1748 } 1749 if (nq) 1750 nq->budget++; 1751 atomic_inc(&rdev->srq_count); 1752 spin_lock_init(&srq->lock); 1753 1754 return 0; 1755 1756 fail: 1757 ib_umem_release(srq->umem); 1758 exit: 1759 return rc; 1760 } 1761 1762 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, 1763 enum ib_srq_attr_mask srq_attr_mask, 1764 struct ib_udata *udata) 1765 { 1766 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, 1767 ib_srq); 1768 struct bnxt_re_dev *rdev = srq->rdev; 1769 int rc; 1770 1771 switch (srq_attr_mask) { 1772 case IB_SRQ_MAX_WR: 1773 /* SRQ resize is not supported */ 1774 break; 1775 case IB_SRQ_LIMIT: 1776 /* Change the SRQ threshold */ 1777 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe) 1778 return -EINVAL; 1779 1780 srq->qplib_srq.threshold = srq_attr->srq_limit; 1781 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq); 1782 if (rc) { 1783 ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!"); 1784 return rc; 1785 } 1786 /* On success, update the shadow */ 1787 srq->srq_limit = srq_attr->srq_limit; 1788 /* No need to Build and send response back to udata */ 1789 break; 1790 default: 1791 ibdev_err(&rdev->ibdev, 1792 "Unsupported srq_attr_mask 0x%x", srq_attr_mask); 1793 return -EINVAL; 1794 } 1795 return 0; 1796 } 1797 1798 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr) 1799 { 1800 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, 1801 ib_srq); 1802 struct bnxt_re_srq tsrq; 1803 struct bnxt_re_dev *rdev = srq->rdev; 1804 int rc; 1805 1806 /* Get live SRQ attr */ 1807 tsrq.qplib_srq.id = srq->qplib_srq.id; 1808 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq); 1809 if (rc) { 1810 ibdev_err(&rdev->ibdev, "Query HW SRQ failed!"); 1811 return rc; 1812 } 1813 srq_attr->max_wr = srq->qplib_srq.max_wqe; 1814 srq_attr->max_sge = srq->qplib_srq.max_sge; 1815 srq_attr->srq_limit = tsrq.qplib_srq.threshold; 1816 1817 return 0; 1818 } 1819 1820 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr, 1821 const struct ib_recv_wr **bad_wr) 1822 { 1823 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, 1824 ib_srq); 1825 struct bnxt_qplib_swqe wqe; 1826 unsigned long flags; 1827 int rc = 0; 1828 1829 spin_lock_irqsave(&srq->lock, flags); 1830 while (wr) { 1831 /* Transcribe each ib_recv_wr to qplib_swqe */ 1832 wqe.num_sge = wr->num_sge; 1833 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge); 1834 wqe.wr_id = wr->wr_id; 1835 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; 1836 1837 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe); 1838 if (rc) { 1839 *bad_wr = wr; 1840 break; 1841 } 1842 wr = wr->next; 1843 } 1844 spin_unlock_irqrestore(&srq->lock, flags); 1845 1846 return rc; 1847 } 1848 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, 1849 struct bnxt_re_qp *qp1_qp, 1850 int qp_attr_mask) 1851 { 1852 struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp; 1853 int rc = 0; 1854 1855 if (qp_attr_mask & IB_QP_STATE) { 1856 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; 1857 qp->qplib_qp.state = qp1_qp->qplib_qp.state; 1858 } 1859 if (qp_attr_mask & IB_QP_PKEY_INDEX) { 1860 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; 1861 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index; 1862 } 1863 1864 if (qp_attr_mask & IB_QP_QKEY) { 1865 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY; 1866 /* Using a Random QKEY */ 1867 qp->qplib_qp.qkey = 0x81818181; 1868 } 1869 if (qp_attr_mask & IB_QP_SQ_PSN) { 1870 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; 1871 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn; 1872 } 1873 1874 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); 1875 if (rc) 1876 ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1"); 1877 return rc; 1878 } 1879 1880 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, 1881 int qp_attr_mask, struct ib_udata *udata) 1882 { 1883 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 1884 struct bnxt_re_dev *rdev = qp->rdev; 1885 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; 1886 enum ib_qp_state curr_qp_state, new_qp_state; 1887 int rc, entries; 1888 unsigned int flags; 1889 u8 nw_type; 1890 1891 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 1892 return -EOPNOTSUPP; 1893 1894 qp->qplib_qp.modify_flags = 0; 1895 if (qp_attr_mask & IB_QP_STATE) { 1896 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state); 1897 new_qp_state = qp_attr->qp_state; 1898 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state, 1899 ib_qp->qp_type, qp_attr_mask)) { 1900 ibdev_err(&rdev->ibdev, 1901 "Invalid attribute mask: %#x specified ", 1902 qp_attr_mask); 1903 ibdev_err(&rdev->ibdev, 1904 "for qpn: %#x type: %#x", 1905 ib_qp->qp_num, ib_qp->qp_type); 1906 ibdev_err(&rdev->ibdev, 1907 "curr_qp_state=0x%x, new_qp_state=0x%x\n", 1908 curr_qp_state, new_qp_state); 1909 return -EINVAL; 1910 } 1911 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; 1912 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state); 1913 1914 if (!qp->sumem && 1915 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { 1916 ibdev_dbg(&rdev->ibdev, 1917 "Move QP = %p to flush list\n", qp); 1918 flags = bnxt_re_lock_cqs(qp); 1919 bnxt_qplib_add_flush_qp(&qp->qplib_qp); 1920 bnxt_re_unlock_cqs(qp, flags); 1921 } 1922 if (!qp->sumem && 1923 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { 1924 ibdev_dbg(&rdev->ibdev, 1925 "Move QP = %p out of flush list\n", qp); 1926 flags = bnxt_re_lock_cqs(qp); 1927 bnxt_qplib_clean_qp(&qp->qplib_qp); 1928 bnxt_re_unlock_cqs(qp, flags); 1929 } 1930 } 1931 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { 1932 qp->qplib_qp.modify_flags |= 1933 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY; 1934 qp->qplib_qp.en_sqd_async_notify = true; 1935 } 1936 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) { 1937 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS; 1938 qp->qplib_qp.access = 1939 __from_ib_access_flags(qp_attr->qp_access_flags); 1940 /* LOCAL_WRITE access must be set to allow RC receive */ 1941 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; 1942 /* Temp: Set all params on QP as of now */ 1943 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE; 1944 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ; 1945 } 1946 if (qp_attr_mask & IB_QP_PKEY_INDEX) { 1947 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; 1948 qp->qplib_qp.pkey_index = qp_attr->pkey_index; 1949 } 1950 if (qp_attr_mask & IB_QP_QKEY) { 1951 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY; 1952 qp->qplib_qp.qkey = qp_attr->qkey; 1953 } 1954 if (qp_attr_mask & IB_QP_AV) { 1955 const struct ib_global_route *grh = 1956 rdma_ah_read_grh(&qp_attr->ah_attr); 1957 const struct ib_gid_attr *sgid_attr; 1958 struct bnxt_re_gid_ctx *ctx; 1959 1960 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID | 1961 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL | 1962 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX | 1963 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT | 1964 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS | 1965 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC | 1966 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID; 1967 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw, 1968 sizeof(qp->qplib_qp.ah.dgid.data)); 1969 qp->qplib_qp.ah.flow_label = grh->flow_label; 1970 sgid_attr = grh->sgid_attr; 1971 /* Get the HW context of the GID. The reference 1972 * of GID table entry is already taken by the caller. 1973 */ 1974 ctx = rdma_read_gid_hw_context(sgid_attr); 1975 qp->qplib_qp.ah.sgid_index = ctx->idx; 1976 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index; 1977 qp->qplib_qp.ah.hop_limit = grh->hop_limit; 1978 qp->qplib_qp.ah.traffic_class = grh->traffic_class; 1979 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr); 1980 ether_addr_copy(qp->qplib_qp.ah.dmac, 1981 qp_attr->ah_attr.roce.dmac); 1982 1983 rc = rdma_read_gid_l2_fields(sgid_attr, NULL, 1984 &qp->qplib_qp.smac[0]); 1985 if (rc) 1986 return rc; 1987 1988 nw_type = rdma_gid_attr_network_type(sgid_attr); 1989 switch (nw_type) { 1990 case RDMA_NETWORK_IPV4: 1991 qp->qplib_qp.nw_type = 1992 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4; 1993 break; 1994 case RDMA_NETWORK_IPV6: 1995 qp->qplib_qp.nw_type = 1996 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6; 1997 break; 1998 default: 1999 qp->qplib_qp.nw_type = 2000 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1; 2001 break; 2002 } 2003 } 2004 2005 if (qp_attr_mask & IB_QP_PATH_MTU) { 2006 qp->qplib_qp.modify_flags |= 2007 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; 2008 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu); 2009 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu); 2010 } else if (qp_attr->qp_state == IB_QPS_RTR) { 2011 qp->qplib_qp.modify_flags |= 2012 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; 2013 qp->qplib_qp.path_mtu = 2014 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu)); 2015 qp->qplib_qp.mtu = 2016 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); 2017 } 2018 2019 if (qp_attr_mask & IB_QP_TIMEOUT) { 2020 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT; 2021 qp->qplib_qp.timeout = qp_attr->timeout; 2022 } 2023 if (qp_attr_mask & IB_QP_RETRY_CNT) { 2024 qp->qplib_qp.modify_flags |= 2025 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT; 2026 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt; 2027 } 2028 if (qp_attr_mask & IB_QP_RNR_RETRY) { 2029 qp->qplib_qp.modify_flags |= 2030 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY; 2031 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry; 2032 } 2033 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) { 2034 qp->qplib_qp.modify_flags |= 2035 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER; 2036 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer; 2037 } 2038 if (qp_attr_mask & IB_QP_RQ_PSN) { 2039 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN; 2040 qp->qplib_qp.rq.psn = qp_attr->rq_psn; 2041 } 2042 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 2043 qp->qplib_qp.modify_flags |= 2044 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC; 2045 /* Cap the max_rd_atomic to device max */ 2046 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic, 2047 dev_attr->max_qp_rd_atom); 2048 } 2049 if (qp_attr_mask & IB_QP_SQ_PSN) { 2050 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; 2051 qp->qplib_qp.sq.psn = qp_attr->sq_psn; 2052 } 2053 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 2054 if (qp_attr->max_dest_rd_atomic > 2055 dev_attr->max_qp_init_rd_atom) { 2056 ibdev_err(&rdev->ibdev, 2057 "max_dest_rd_atomic requested%d is > dev_max%d", 2058 qp_attr->max_dest_rd_atomic, 2059 dev_attr->max_qp_init_rd_atom); 2060 return -EINVAL; 2061 } 2062 2063 qp->qplib_qp.modify_flags |= 2064 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC; 2065 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic; 2066 } 2067 if (qp_attr_mask & IB_QP_CAP) { 2068 qp->qplib_qp.modify_flags |= 2069 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE | 2070 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE | 2071 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE | 2072 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE | 2073 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA; 2074 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) || 2075 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) || 2076 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) || 2077 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) || 2078 (qp_attr->cap.max_inline_data >= 2079 dev_attr->max_inline_data)) { 2080 ibdev_err(&rdev->ibdev, 2081 "Create QP failed - max exceeded"); 2082 return -EINVAL; 2083 } 2084 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); 2085 qp->qplib_qp.sq.max_wqe = min_t(u32, entries, 2086 dev_attr->max_qp_wqes + 1); 2087 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - 2088 qp_attr->cap.max_send_wr; 2089 /* 2090 * Reserving one slot for Phantom WQE. Some application can 2091 * post one extra entry in this case. Allowing this to avoid 2092 * unexpected Queue full condition 2093 */ 2094 qp->qplib_qp.sq.q_full_delta -= 1; 2095 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; 2096 if (qp->qplib_qp.rq.max_wqe) { 2097 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); 2098 qp->qplib_qp.rq.max_wqe = 2099 min_t(u32, entries, dev_attr->max_qp_wqes + 1); 2100 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - 2101 qp_attr->cap.max_recv_wr; 2102 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; 2103 } else { 2104 /* SRQ was used prior, just ignore the RQ caps */ 2105 } 2106 } 2107 if (qp_attr_mask & IB_QP_DEST_QPN) { 2108 qp->qplib_qp.modify_flags |= 2109 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID; 2110 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num; 2111 } 2112 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); 2113 if (rc) { 2114 ibdev_err(&rdev->ibdev, "Failed to modify HW QP"); 2115 return rc; 2116 } 2117 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) 2118 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask); 2119 return rc; 2120 } 2121 2122 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, 2123 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 2124 { 2125 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 2126 struct bnxt_re_dev *rdev = qp->rdev; 2127 struct bnxt_qplib_qp *qplib_qp; 2128 int rc; 2129 2130 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL); 2131 if (!qplib_qp) 2132 return -ENOMEM; 2133 2134 qplib_qp->id = qp->qplib_qp.id; 2135 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; 2136 2137 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp); 2138 if (rc) { 2139 ibdev_err(&rdev->ibdev, "Failed to query HW QP"); 2140 goto out; 2141 } 2142 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state); 2143 qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state); 2144 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0; 2145 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access); 2146 qp_attr->pkey_index = qplib_qp->pkey_index; 2147 qp_attr->qkey = qplib_qp->qkey; 2148 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; 2149 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label, 2150 qplib_qp->ah.host_sgid_index, 2151 qplib_qp->ah.hop_limit, 2152 qplib_qp->ah.traffic_class); 2153 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data); 2154 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl); 2155 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac); 2156 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu); 2157 qp_attr->timeout = qplib_qp->timeout; 2158 qp_attr->retry_cnt = qplib_qp->retry_cnt; 2159 qp_attr->rnr_retry = qplib_qp->rnr_retry; 2160 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer; 2161 qp_attr->rq_psn = qplib_qp->rq.psn; 2162 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic; 2163 qp_attr->sq_psn = qplib_qp->sq.psn; 2164 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic; 2165 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR : 2166 IB_SIGNAL_REQ_WR; 2167 qp_attr->dest_qp_num = qplib_qp->dest_qpn; 2168 2169 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; 2170 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; 2171 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe; 2172 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge; 2173 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data; 2174 qp_init_attr->cap = qp_attr->cap; 2175 2176 out: 2177 kfree(qplib_qp); 2178 return rc; 2179 } 2180 2181 /* Routine for sending QP1 packets for RoCE V1 an V2 2182 */ 2183 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp, 2184 const struct ib_send_wr *wr, 2185 struct bnxt_qplib_swqe *wqe, 2186 int payload_size) 2187 { 2188 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, 2189 ib_ah); 2190 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah; 2191 const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr; 2192 struct bnxt_qplib_sge sge; 2193 u8 nw_type; 2194 u16 ether_type; 2195 union ib_gid dgid; 2196 bool is_eth = false; 2197 bool is_vlan = false; 2198 bool is_grh = false; 2199 bool is_udp = false; 2200 u8 ip_version = 0; 2201 u16 vlan_id = 0xFFFF; 2202 void *buf; 2203 int i, rc = 0; 2204 2205 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr)); 2206 2207 rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL); 2208 if (rc) 2209 return rc; 2210 2211 /* Get network header type for this GID */ 2212 nw_type = rdma_gid_attr_network_type(sgid_attr); 2213 switch (nw_type) { 2214 case RDMA_NETWORK_IPV4: 2215 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET; 2216 break; 2217 case RDMA_NETWORK_IPV6: 2218 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET; 2219 break; 2220 default: 2221 nw_type = BNXT_RE_ROCE_V1_PACKET; 2222 break; 2223 } 2224 memcpy(&dgid.raw, &qplib_ah->dgid, 16); 2225 is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP; 2226 if (is_udp) { 2227 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) { 2228 ip_version = 4; 2229 ether_type = ETH_P_IP; 2230 } else { 2231 ip_version = 6; 2232 ether_type = ETH_P_IPV6; 2233 } 2234 is_grh = false; 2235 } else { 2236 ether_type = ETH_P_IBOE; 2237 is_grh = true; 2238 } 2239 2240 is_eth = true; 2241 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false; 2242 2243 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh, 2244 ip_version, is_udp, 0, &qp->qp1_hdr); 2245 2246 /* ETH */ 2247 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac); 2248 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac); 2249 2250 /* For vlan, check the sgid for vlan existence */ 2251 2252 if (!is_vlan) { 2253 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type); 2254 } else { 2255 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type); 2256 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id); 2257 } 2258 2259 if (is_grh || (ip_version == 6)) { 2260 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw, 2261 sizeof(sgid_attr->gid)); 2262 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data, 2263 sizeof(sgid_attr->gid)); 2264 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit; 2265 } 2266 2267 if (ip_version == 4) { 2268 qp->qp1_hdr.ip4.tos = 0; 2269 qp->qp1_hdr.ip4.id = 0; 2270 qp->qp1_hdr.ip4.frag_off = htons(IP_DF); 2271 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit; 2272 2273 memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4); 2274 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4); 2275 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr); 2276 } 2277 2278 if (is_udp) { 2279 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT); 2280 qp->qp1_hdr.udp.sport = htons(0x8CD1); 2281 qp->qp1_hdr.udp.csum = 0; 2282 } 2283 2284 /* BTH */ 2285 if (wr->opcode == IB_WR_SEND_WITH_IMM) { 2286 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 2287 qp->qp1_hdr.immediate_present = 1; 2288 } else { 2289 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY; 2290 } 2291 if (wr->send_flags & IB_SEND_SOLICITED) 2292 qp->qp1_hdr.bth.solicited_event = 1; 2293 /* pad_count */ 2294 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3; 2295 2296 /* P_key for QP1 is for all members */ 2297 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF); 2298 qp->qp1_hdr.bth.destination_qpn = IB_QP1; 2299 qp->qp1_hdr.bth.ack_req = 0; 2300 qp->send_psn++; 2301 qp->send_psn &= BTH_PSN_MASK; 2302 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn); 2303 /* DETH */ 2304 /* Use the priviledged Q_Key for QP1 */ 2305 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY); 2306 qp->qp1_hdr.deth.source_qpn = IB_QP1; 2307 2308 /* Pack the QP1 to the transmit buffer */ 2309 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge); 2310 if (buf) { 2311 ib_ud_header_pack(&qp->qp1_hdr, buf); 2312 for (i = wqe->num_sge; i; i--) { 2313 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr; 2314 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey; 2315 wqe->sg_list[i].size = wqe->sg_list[i - 1].size; 2316 } 2317 2318 /* 2319 * Max Header buf size for IPV6 RoCE V2 is 86, 2320 * which is same as the QP1 SQ header buffer. 2321 * Header buf size for IPV4 RoCE V2 can be 66. 2322 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20). 2323 * Subtract 20 bytes from QP1 SQ header buf size 2324 */ 2325 if (is_udp && ip_version == 4) 2326 sge.size -= 20; 2327 /* 2328 * Max Header buf size for RoCE V1 is 78. 2329 * ETH(14) + VLAN(4) + GRH(40) + BTH(20). 2330 * Subtract 8 bytes from QP1 SQ header buf size 2331 */ 2332 if (!is_udp) 2333 sge.size -= 8; 2334 2335 /* Subtract 4 bytes for non vlan packets */ 2336 if (!is_vlan) 2337 sge.size -= 4; 2338 2339 wqe->sg_list[0].addr = sge.addr; 2340 wqe->sg_list[0].lkey = sge.lkey; 2341 wqe->sg_list[0].size = sge.size; 2342 wqe->num_sge++; 2343 2344 } else { 2345 ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!"); 2346 rc = -ENOMEM; 2347 } 2348 return rc; 2349 } 2350 2351 /* For the MAD layer, it only provides the recv SGE the size of 2352 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH, 2353 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire 2354 * receive packet (334 bytes) with no VLAN and then copy the GRH 2355 * and the MAD datagram out to the provided SGE. 2356 */ 2357 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp, 2358 const struct ib_recv_wr *wr, 2359 struct bnxt_qplib_swqe *wqe, 2360 int payload_size) 2361 { 2362 struct bnxt_re_sqp_entries *sqp_entry; 2363 struct bnxt_qplib_sge ref, sge; 2364 struct bnxt_re_dev *rdev; 2365 u32 rq_prod_index; 2366 2367 rdev = qp->rdev; 2368 2369 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp); 2370 2371 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge)) 2372 return -ENOMEM; 2373 2374 /* Create 1 SGE to receive the entire 2375 * ethernet packet 2376 */ 2377 /* Save the reference from ULP */ 2378 ref.addr = wqe->sg_list[0].addr; 2379 ref.lkey = wqe->sg_list[0].lkey; 2380 ref.size = wqe->sg_list[0].size; 2381 2382 sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index]; 2383 2384 /* SGE 1 */ 2385 wqe->sg_list[0].addr = sge.addr; 2386 wqe->sg_list[0].lkey = sge.lkey; 2387 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; 2388 sge.size -= wqe->sg_list[0].size; 2389 2390 sqp_entry->sge.addr = ref.addr; 2391 sqp_entry->sge.lkey = ref.lkey; 2392 sqp_entry->sge.size = ref.size; 2393 /* Store the wrid for reporting completion */ 2394 sqp_entry->wrid = wqe->wr_id; 2395 /* change the wqe->wrid to table index */ 2396 wqe->wr_id = rq_prod_index; 2397 return 0; 2398 } 2399 2400 static int is_ud_qp(struct bnxt_re_qp *qp) 2401 { 2402 return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD || 2403 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI); 2404 } 2405 2406 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp, 2407 const struct ib_send_wr *wr, 2408 struct bnxt_qplib_swqe *wqe) 2409 { 2410 struct bnxt_re_ah *ah = NULL; 2411 2412 if (is_ud_qp(qp)) { 2413 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah); 2414 wqe->send.q_key = ud_wr(wr)->remote_qkey; 2415 wqe->send.dst_qp = ud_wr(wr)->remote_qpn; 2416 wqe->send.avid = ah->qplib_ah.id; 2417 } 2418 switch (wr->opcode) { 2419 case IB_WR_SEND: 2420 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND; 2421 break; 2422 case IB_WR_SEND_WITH_IMM: 2423 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM; 2424 wqe->send.imm_data = wr->ex.imm_data; 2425 break; 2426 case IB_WR_SEND_WITH_INV: 2427 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV; 2428 wqe->send.inv_key = wr->ex.invalidate_rkey; 2429 break; 2430 default: 2431 return -EINVAL; 2432 } 2433 if (wr->send_flags & IB_SEND_SIGNALED) 2434 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2435 if (wr->send_flags & IB_SEND_FENCE) 2436 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 2437 if (wr->send_flags & IB_SEND_SOLICITED) 2438 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; 2439 if (wr->send_flags & IB_SEND_INLINE) 2440 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE; 2441 2442 return 0; 2443 } 2444 2445 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr, 2446 struct bnxt_qplib_swqe *wqe) 2447 { 2448 switch (wr->opcode) { 2449 case IB_WR_RDMA_WRITE: 2450 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE; 2451 break; 2452 case IB_WR_RDMA_WRITE_WITH_IMM: 2453 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM; 2454 wqe->rdma.imm_data = wr->ex.imm_data; 2455 break; 2456 case IB_WR_RDMA_READ: 2457 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ; 2458 wqe->rdma.inv_key = wr->ex.invalidate_rkey; 2459 break; 2460 default: 2461 return -EINVAL; 2462 } 2463 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr; 2464 wqe->rdma.r_key = rdma_wr(wr)->rkey; 2465 if (wr->send_flags & IB_SEND_SIGNALED) 2466 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2467 if (wr->send_flags & IB_SEND_FENCE) 2468 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 2469 if (wr->send_flags & IB_SEND_SOLICITED) 2470 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; 2471 if (wr->send_flags & IB_SEND_INLINE) 2472 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE; 2473 2474 return 0; 2475 } 2476 2477 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr, 2478 struct bnxt_qplib_swqe *wqe) 2479 { 2480 switch (wr->opcode) { 2481 case IB_WR_ATOMIC_CMP_AND_SWP: 2482 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP; 2483 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; 2484 wqe->atomic.swap_data = atomic_wr(wr)->swap; 2485 break; 2486 case IB_WR_ATOMIC_FETCH_AND_ADD: 2487 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD; 2488 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; 2489 break; 2490 default: 2491 return -EINVAL; 2492 } 2493 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr; 2494 wqe->atomic.r_key = atomic_wr(wr)->rkey; 2495 if (wr->send_flags & IB_SEND_SIGNALED) 2496 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2497 if (wr->send_flags & IB_SEND_FENCE) 2498 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 2499 if (wr->send_flags & IB_SEND_SOLICITED) 2500 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; 2501 return 0; 2502 } 2503 2504 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr, 2505 struct bnxt_qplib_swqe *wqe) 2506 { 2507 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; 2508 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; 2509 2510 /* Need unconditional fence for local invalidate 2511 * opcode to work as expected. 2512 */ 2513 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 2514 2515 if (wr->send_flags & IB_SEND_SIGNALED) 2516 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2517 if (wr->send_flags & IB_SEND_SOLICITED) 2518 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; 2519 2520 return 0; 2521 } 2522 2523 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr, 2524 struct bnxt_qplib_swqe *wqe) 2525 { 2526 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr); 2527 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl; 2528 int access = wr->access; 2529 2530 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0]; 2531 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0]; 2532 wqe->frmr.page_list = mr->pages; 2533 wqe->frmr.page_list_len = mr->npages; 2534 wqe->frmr.levels = qplib_frpl->hwq.level; 2535 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; 2536 2537 /* Need unconditional fence for reg_mr 2538 * opcode to function as expected. 2539 */ 2540 2541 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 2542 2543 if (wr->wr.send_flags & IB_SEND_SIGNALED) 2544 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2545 2546 if (access & IB_ACCESS_LOCAL_WRITE) 2547 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE; 2548 if (access & IB_ACCESS_REMOTE_READ) 2549 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ; 2550 if (access & IB_ACCESS_REMOTE_WRITE) 2551 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE; 2552 if (access & IB_ACCESS_REMOTE_ATOMIC) 2553 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC; 2554 if (access & IB_ACCESS_MW_BIND) 2555 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND; 2556 2557 wqe->frmr.l_key = wr->key; 2558 wqe->frmr.length = wr->mr->length; 2559 wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K); 2560 wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K); 2561 wqe->frmr.va = wr->mr->iova; 2562 return 0; 2563 } 2564 2565 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev, 2566 const struct ib_send_wr *wr, 2567 struct bnxt_qplib_swqe *wqe) 2568 { 2569 /* Copy the inline data to the data field */ 2570 u8 *in_data; 2571 u32 i, sge_len; 2572 void *sge_addr; 2573 2574 in_data = wqe->inline_data; 2575 for (i = 0; i < wr->num_sge; i++) { 2576 sge_addr = (void *)(unsigned long) 2577 wr->sg_list[i].addr; 2578 sge_len = wr->sg_list[i].length; 2579 2580 if ((sge_len + wqe->inline_len) > 2581 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) { 2582 ibdev_err(&rdev->ibdev, 2583 "Inline data size requested > supported value"); 2584 return -EINVAL; 2585 } 2586 sge_len = wr->sg_list[i].length; 2587 2588 memcpy(in_data, sge_addr, sge_len); 2589 in_data += wr->sg_list[i].length; 2590 wqe->inline_len += wr->sg_list[i].length; 2591 } 2592 return wqe->inline_len; 2593 } 2594 2595 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev, 2596 const struct ib_send_wr *wr, 2597 struct bnxt_qplib_swqe *wqe) 2598 { 2599 int payload_sz = 0; 2600 2601 if (wr->send_flags & IB_SEND_INLINE) 2602 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe); 2603 else 2604 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list, 2605 wqe->num_sge); 2606 2607 return payload_sz; 2608 } 2609 2610 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp) 2611 { 2612 if ((qp->ib_qp.qp_type == IB_QPT_UD || 2613 qp->ib_qp.qp_type == IB_QPT_GSI || 2614 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) && 2615 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) { 2616 int qp_attr_mask; 2617 struct ib_qp_attr qp_attr; 2618 2619 qp_attr_mask = IB_QP_STATE; 2620 qp_attr.qp_state = IB_QPS_RTS; 2621 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL); 2622 qp->qplib_qp.wqe_cnt = 0; 2623 } 2624 } 2625 2626 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, 2627 struct bnxt_re_qp *qp, 2628 const struct ib_send_wr *wr) 2629 { 2630 int rc = 0, payload_sz = 0; 2631 unsigned long flags; 2632 2633 spin_lock_irqsave(&qp->sq_lock, flags); 2634 while (wr) { 2635 struct bnxt_qplib_swqe wqe = {}; 2636 2637 /* Common */ 2638 wqe.num_sge = wr->num_sge; 2639 if (wr->num_sge > qp->qplib_qp.sq.max_sge) { 2640 ibdev_err(&rdev->ibdev, 2641 "Limit exceeded for Send SGEs"); 2642 rc = -EINVAL; 2643 goto bad; 2644 } 2645 2646 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe); 2647 if (payload_sz < 0) { 2648 rc = -EINVAL; 2649 goto bad; 2650 } 2651 wqe.wr_id = wr->wr_id; 2652 2653 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND; 2654 2655 rc = bnxt_re_build_send_wqe(qp, wr, &wqe); 2656 if (!rc) 2657 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); 2658 bad: 2659 if (rc) { 2660 ibdev_err(&rdev->ibdev, 2661 "Post send failed opcode = %#x rc = %d", 2662 wr->opcode, rc); 2663 break; 2664 } 2665 wr = wr->next; 2666 } 2667 bnxt_qplib_post_send_db(&qp->qplib_qp); 2668 bnxt_ud_qp_hw_stall_workaround(qp); 2669 spin_unlock_irqrestore(&qp->sq_lock, flags); 2670 return rc; 2671 } 2672 2673 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr, 2674 const struct ib_send_wr **bad_wr) 2675 { 2676 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 2677 struct bnxt_qplib_swqe wqe; 2678 int rc = 0, payload_sz = 0; 2679 unsigned long flags; 2680 2681 spin_lock_irqsave(&qp->sq_lock, flags); 2682 while (wr) { 2683 /* House keeping */ 2684 memset(&wqe, 0, sizeof(wqe)); 2685 2686 /* Common */ 2687 wqe.num_sge = wr->num_sge; 2688 if (wr->num_sge > qp->qplib_qp.sq.max_sge) { 2689 ibdev_err(&qp->rdev->ibdev, 2690 "Limit exceeded for Send SGEs"); 2691 rc = -EINVAL; 2692 goto bad; 2693 } 2694 2695 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe); 2696 if (payload_sz < 0) { 2697 rc = -EINVAL; 2698 goto bad; 2699 } 2700 wqe.wr_id = wr->wr_id; 2701 2702 switch (wr->opcode) { 2703 case IB_WR_SEND: 2704 case IB_WR_SEND_WITH_IMM: 2705 if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) { 2706 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe, 2707 payload_sz); 2708 if (rc) 2709 goto bad; 2710 wqe.rawqp1.lflags |= 2711 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC; 2712 } 2713 switch (wr->send_flags) { 2714 case IB_SEND_IP_CSUM: 2715 wqe.rawqp1.lflags |= 2716 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM; 2717 break; 2718 default: 2719 break; 2720 } 2721 fallthrough; 2722 case IB_WR_SEND_WITH_INV: 2723 rc = bnxt_re_build_send_wqe(qp, wr, &wqe); 2724 break; 2725 case IB_WR_RDMA_WRITE: 2726 case IB_WR_RDMA_WRITE_WITH_IMM: 2727 case IB_WR_RDMA_READ: 2728 rc = bnxt_re_build_rdma_wqe(wr, &wqe); 2729 break; 2730 case IB_WR_ATOMIC_CMP_AND_SWP: 2731 case IB_WR_ATOMIC_FETCH_AND_ADD: 2732 rc = bnxt_re_build_atomic_wqe(wr, &wqe); 2733 break; 2734 case IB_WR_RDMA_READ_WITH_INV: 2735 ibdev_err(&qp->rdev->ibdev, 2736 "RDMA Read with Invalidate is not supported"); 2737 rc = -EINVAL; 2738 goto bad; 2739 case IB_WR_LOCAL_INV: 2740 rc = bnxt_re_build_inv_wqe(wr, &wqe); 2741 break; 2742 case IB_WR_REG_MR: 2743 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe); 2744 break; 2745 default: 2746 /* Unsupported WRs */ 2747 ibdev_err(&qp->rdev->ibdev, 2748 "WR (%#x) is not supported", wr->opcode); 2749 rc = -EINVAL; 2750 goto bad; 2751 } 2752 if (!rc) 2753 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); 2754 bad: 2755 if (rc) { 2756 ibdev_err(&qp->rdev->ibdev, 2757 "post_send failed op:%#x qps = %#x rc = %d\n", 2758 wr->opcode, qp->qplib_qp.state, rc); 2759 *bad_wr = wr; 2760 break; 2761 } 2762 wr = wr->next; 2763 } 2764 bnxt_qplib_post_send_db(&qp->qplib_qp); 2765 bnxt_ud_qp_hw_stall_workaround(qp); 2766 spin_unlock_irqrestore(&qp->sq_lock, flags); 2767 2768 return rc; 2769 } 2770 2771 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev, 2772 struct bnxt_re_qp *qp, 2773 const struct ib_recv_wr *wr) 2774 { 2775 struct bnxt_qplib_swqe wqe; 2776 int rc = 0; 2777 2778 memset(&wqe, 0, sizeof(wqe)); 2779 while (wr) { 2780 /* House keeping */ 2781 memset(&wqe, 0, sizeof(wqe)); 2782 2783 /* Common */ 2784 wqe.num_sge = wr->num_sge; 2785 if (wr->num_sge > qp->qplib_qp.rq.max_sge) { 2786 ibdev_err(&rdev->ibdev, 2787 "Limit exceeded for Receive SGEs"); 2788 rc = -EINVAL; 2789 break; 2790 } 2791 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge); 2792 wqe.wr_id = wr->wr_id; 2793 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; 2794 2795 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe); 2796 if (rc) 2797 break; 2798 2799 wr = wr->next; 2800 } 2801 if (!rc) 2802 bnxt_qplib_post_recv_db(&qp->qplib_qp); 2803 return rc; 2804 } 2805 2806 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr, 2807 const struct ib_recv_wr **bad_wr) 2808 { 2809 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 2810 struct bnxt_qplib_swqe wqe; 2811 int rc = 0, payload_sz = 0; 2812 unsigned long flags; 2813 u32 count = 0; 2814 2815 spin_lock_irqsave(&qp->rq_lock, flags); 2816 while (wr) { 2817 /* House keeping */ 2818 memset(&wqe, 0, sizeof(wqe)); 2819 2820 /* Common */ 2821 wqe.num_sge = wr->num_sge; 2822 if (wr->num_sge > qp->qplib_qp.rq.max_sge) { 2823 ibdev_err(&qp->rdev->ibdev, 2824 "Limit exceeded for Receive SGEs"); 2825 rc = -EINVAL; 2826 *bad_wr = wr; 2827 break; 2828 } 2829 2830 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, 2831 wr->num_sge); 2832 wqe.wr_id = wr->wr_id; 2833 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; 2834 2835 if (ib_qp->qp_type == IB_QPT_GSI && 2836 qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI) 2837 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe, 2838 payload_sz); 2839 if (!rc) 2840 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe); 2841 if (rc) { 2842 *bad_wr = wr; 2843 break; 2844 } 2845 2846 /* Ring DB if the RQEs posted reaches a threshold value */ 2847 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) { 2848 bnxt_qplib_post_recv_db(&qp->qplib_qp); 2849 count = 0; 2850 } 2851 2852 wr = wr->next; 2853 } 2854 2855 if (count) 2856 bnxt_qplib_post_recv_db(&qp->qplib_qp); 2857 2858 spin_unlock_irqrestore(&qp->rq_lock, flags); 2859 2860 return rc; 2861 } 2862 2863 /* Completion Queues */ 2864 int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) 2865 { 2866 struct bnxt_re_cq *cq; 2867 struct bnxt_qplib_nq *nq; 2868 struct bnxt_re_dev *rdev; 2869 2870 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); 2871 rdev = cq->rdev; 2872 nq = cq->qplib_cq.nq; 2873 2874 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); 2875 ib_umem_release(cq->umem); 2876 2877 atomic_dec(&rdev->cq_count); 2878 nq->budget--; 2879 kfree(cq->cql); 2880 return 0; 2881 } 2882 2883 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 2884 struct ib_udata *udata) 2885 { 2886 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev); 2887 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; 2888 struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); 2889 int rc, entries; 2890 int cqe = attr->cqe; 2891 struct bnxt_qplib_nq *nq = NULL; 2892 unsigned int nq_alloc_cnt; 2893 2894 if (attr->flags) 2895 return -EOPNOTSUPP; 2896 2897 /* Validate CQ fields */ 2898 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { 2899 ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded"); 2900 return -EINVAL; 2901 } 2902 2903 cq->rdev = rdev; 2904 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq); 2905 2906 entries = roundup_pow_of_two(cqe + 1); 2907 if (entries > dev_attr->max_cq_wqes + 1) 2908 entries = dev_attr->max_cq_wqes + 1; 2909 2910 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE; 2911 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT; 2912 if (udata) { 2913 struct bnxt_re_cq_req req; 2914 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context( 2915 udata, struct bnxt_re_ucontext, ib_uctx); 2916 if (ib_copy_from_udata(&req, udata, sizeof(req))) { 2917 rc = -EFAULT; 2918 goto fail; 2919 } 2920 2921 cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va, 2922 entries * sizeof(struct cq_base), 2923 IB_ACCESS_LOCAL_WRITE); 2924 if (IS_ERR(cq->umem)) { 2925 rc = PTR_ERR(cq->umem); 2926 goto fail; 2927 } 2928 cq->qplib_cq.sg_info.umem = cq->umem; 2929 cq->qplib_cq.dpi = &uctx->dpi; 2930 } else { 2931 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); 2932 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe), 2933 GFP_KERNEL); 2934 if (!cq->cql) { 2935 rc = -ENOMEM; 2936 goto fail; 2937 } 2938 2939 cq->qplib_cq.dpi = &rdev->dpi_privileged; 2940 } 2941 /* 2942 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a 2943 * used for getting the NQ index. 2944 */ 2945 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt); 2946 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)]; 2947 cq->qplib_cq.max_wqe = entries; 2948 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id; 2949 cq->qplib_cq.nq = nq; 2950 2951 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq); 2952 if (rc) { 2953 ibdev_err(&rdev->ibdev, "Failed to create HW CQ"); 2954 goto fail; 2955 } 2956 2957 cq->ib_cq.cqe = entries; 2958 cq->cq_period = cq->qplib_cq.period; 2959 nq->budget++; 2960 2961 atomic_inc(&rdev->cq_count); 2962 spin_lock_init(&cq->cq_lock); 2963 2964 if (udata) { 2965 struct bnxt_re_cq_resp resp; 2966 2967 resp.cqid = cq->qplib_cq.id; 2968 resp.tail = cq->qplib_cq.hwq.cons; 2969 resp.phase = cq->qplib_cq.period; 2970 resp.rsvd = 0; 2971 rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); 2972 if (rc) { 2973 ibdev_err(&rdev->ibdev, "Failed to copy CQ udata"); 2974 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); 2975 goto c2fail; 2976 } 2977 } 2978 2979 return 0; 2980 2981 c2fail: 2982 ib_umem_release(cq->umem); 2983 fail: 2984 kfree(cq->cql); 2985 return rc; 2986 } 2987 2988 static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq) 2989 { 2990 struct bnxt_re_dev *rdev = cq->rdev; 2991 2992 bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq); 2993 2994 cq->qplib_cq.max_wqe = cq->resize_cqe; 2995 if (cq->resize_umem) { 2996 ib_umem_release(cq->umem); 2997 cq->umem = cq->resize_umem; 2998 cq->resize_umem = NULL; 2999 cq->resize_cqe = 0; 3000 } 3001 } 3002 3003 int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) 3004 { 3005 struct bnxt_qplib_sg_info sg_info = {}; 3006 struct bnxt_qplib_dpi *orig_dpi = NULL; 3007 struct bnxt_qplib_dev_attr *dev_attr; 3008 struct bnxt_re_ucontext *uctx = NULL; 3009 struct bnxt_re_resize_cq_req req; 3010 struct bnxt_re_dev *rdev; 3011 struct bnxt_re_cq *cq; 3012 int rc, entries; 3013 3014 cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); 3015 rdev = cq->rdev; 3016 dev_attr = &rdev->dev_attr; 3017 if (!ibcq->uobject) { 3018 ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported"); 3019 return -EOPNOTSUPP; 3020 } 3021 3022 if (cq->resize_umem) { 3023 ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - Busy", 3024 cq->qplib_cq.id); 3025 return -EBUSY; 3026 } 3027 3028 /* Check the requested cq depth out of supported depth */ 3029 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { 3030 ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d", 3031 cq->qplib_cq.id, cqe); 3032 return -EINVAL; 3033 } 3034 3035 entries = roundup_pow_of_two(cqe + 1); 3036 if (entries > dev_attr->max_cq_wqes + 1) 3037 entries = dev_attr->max_cq_wqes + 1; 3038 3039 uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, 3040 ib_uctx); 3041 /* uverbs consumer */ 3042 if (ib_copy_from_udata(&req, udata, sizeof(req))) { 3043 rc = -EFAULT; 3044 goto fail; 3045 } 3046 3047 cq->resize_umem = ib_umem_get(&rdev->ibdev, req.cq_va, 3048 entries * sizeof(struct cq_base), 3049 IB_ACCESS_LOCAL_WRITE); 3050 if (IS_ERR(cq->resize_umem)) { 3051 rc = PTR_ERR(cq->resize_umem); 3052 cq->resize_umem = NULL; 3053 ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n", 3054 __func__, rc); 3055 goto fail; 3056 } 3057 cq->resize_cqe = entries; 3058 memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info)); 3059 orig_dpi = cq->qplib_cq.dpi; 3060 3061 cq->qplib_cq.sg_info.umem = cq->resize_umem; 3062 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE; 3063 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT; 3064 cq->qplib_cq.dpi = &uctx->dpi; 3065 3066 rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries); 3067 if (rc) { 3068 ibdev_err(&rdev->ibdev, "Resize HW CQ %#x failed!", 3069 cq->qplib_cq.id); 3070 goto fail; 3071 } 3072 3073 cq->ib_cq.cqe = cq->resize_cqe; 3074 3075 return 0; 3076 3077 fail: 3078 if (cq->resize_umem) { 3079 ib_umem_release(cq->resize_umem); 3080 cq->resize_umem = NULL; 3081 cq->resize_cqe = 0; 3082 memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info)); 3083 cq->qplib_cq.dpi = orig_dpi; 3084 } 3085 return rc; 3086 } 3087 3088 static u8 __req_to_ib_wc_status(u8 qstatus) 3089 { 3090 switch (qstatus) { 3091 case CQ_REQ_STATUS_OK: 3092 return IB_WC_SUCCESS; 3093 case CQ_REQ_STATUS_BAD_RESPONSE_ERR: 3094 return IB_WC_BAD_RESP_ERR; 3095 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR: 3096 return IB_WC_LOC_LEN_ERR; 3097 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR: 3098 return IB_WC_LOC_QP_OP_ERR; 3099 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR: 3100 return IB_WC_LOC_PROT_ERR; 3101 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR: 3102 return IB_WC_GENERAL_ERR; 3103 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR: 3104 return IB_WC_REM_INV_REQ_ERR; 3105 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR: 3106 return IB_WC_REM_ACCESS_ERR; 3107 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR: 3108 return IB_WC_REM_OP_ERR; 3109 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR: 3110 return IB_WC_RNR_RETRY_EXC_ERR; 3111 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR: 3112 return IB_WC_RETRY_EXC_ERR; 3113 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR: 3114 return IB_WC_WR_FLUSH_ERR; 3115 default: 3116 return IB_WC_GENERAL_ERR; 3117 } 3118 return 0; 3119 } 3120 3121 static u8 __rawqp1_to_ib_wc_status(u8 qstatus) 3122 { 3123 switch (qstatus) { 3124 case CQ_RES_RAWETH_QP1_STATUS_OK: 3125 return IB_WC_SUCCESS; 3126 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR: 3127 return IB_WC_LOC_ACCESS_ERR; 3128 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR: 3129 return IB_WC_LOC_LEN_ERR; 3130 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR: 3131 return IB_WC_LOC_PROT_ERR; 3132 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR: 3133 return IB_WC_LOC_QP_OP_ERR; 3134 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR: 3135 return IB_WC_GENERAL_ERR; 3136 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR: 3137 return IB_WC_WR_FLUSH_ERR; 3138 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR: 3139 return IB_WC_WR_FLUSH_ERR; 3140 default: 3141 return IB_WC_GENERAL_ERR; 3142 } 3143 } 3144 3145 static u8 __rc_to_ib_wc_status(u8 qstatus) 3146 { 3147 switch (qstatus) { 3148 case CQ_RES_RC_STATUS_OK: 3149 return IB_WC_SUCCESS; 3150 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR: 3151 return IB_WC_LOC_ACCESS_ERR; 3152 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR: 3153 return IB_WC_LOC_LEN_ERR; 3154 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR: 3155 return IB_WC_LOC_PROT_ERR; 3156 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR: 3157 return IB_WC_LOC_QP_OP_ERR; 3158 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR: 3159 return IB_WC_GENERAL_ERR; 3160 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR: 3161 return IB_WC_REM_INV_REQ_ERR; 3162 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR: 3163 return IB_WC_WR_FLUSH_ERR; 3164 case CQ_RES_RC_STATUS_HW_FLUSH_ERR: 3165 return IB_WC_WR_FLUSH_ERR; 3166 default: 3167 return IB_WC_GENERAL_ERR; 3168 } 3169 } 3170 3171 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) 3172 { 3173 switch (cqe->type) { 3174 case BNXT_QPLIB_SWQE_TYPE_SEND: 3175 wc->opcode = IB_WC_SEND; 3176 break; 3177 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM: 3178 wc->opcode = IB_WC_SEND; 3179 wc->wc_flags |= IB_WC_WITH_IMM; 3180 break; 3181 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV: 3182 wc->opcode = IB_WC_SEND; 3183 wc->wc_flags |= IB_WC_WITH_INVALIDATE; 3184 break; 3185 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE: 3186 wc->opcode = IB_WC_RDMA_WRITE; 3187 break; 3188 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM: 3189 wc->opcode = IB_WC_RDMA_WRITE; 3190 wc->wc_flags |= IB_WC_WITH_IMM; 3191 break; 3192 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ: 3193 wc->opcode = IB_WC_RDMA_READ; 3194 break; 3195 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP: 3196 wc->opcode = IB_WC_COMP_SWAP; 3197 break; 3198 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD: 3199 wc->opcode = IB_WC_FETCH_ADD; 3200 break; 3201 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV: 3202 wc->opcode = IB_WC_LOCAL_INV; 3203 break; 3204 case BNXT_QPLIB_SWQE_TYPE_REG_MR: 3205 wc->opcode = IB_WC_REG_MR; 3206 break; 3207 default: 3208 wc->opcode = IB_WC_SEND; 3209 break; 3210 } 3211 3212 wc->status = __req_to_ib_wc_status(cqe->status); 3213 } 3214 3215 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags, 3216 u16 raweth_qp1_flags2) 3217 { 3218 bool is_ipv6 = false, is_ipv4 = false; 3219 3220 /* raweth_qp1_flags Bit 9-6 indicates itype */ 3221 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE) 3222 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE) 3223 return -1; 3224 3225 if (raweth_qp1_flags2 & 3226 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC && 3227 raweth_qp1_flags2 & 3228 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) { 3229 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */ 3230 (raweth_qp1_flags2 & 3231 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ? 3232 (is_ipv6 = true) : (is_ipv4 = true); 3233 return ((is_ipv6) ? 3234 BNXT_RE_ROCEV2_IPV6_PACKET : 3235 BNXT_RE_ROCEV2_IPV4_PACKET); 3236 } else { 3237 return BNXT_RE_ROCE_V1_PACKET; 3238 } 3239 } 3240 3241 static int bnxt_re_to_ib_nw_type(int nw_type) 3242 { 3243 u8 nw_hdr_type = 0xFF; 3244 3245 switch (nw_type) { 3246 case BNXT_RE_ROCE_V1_PACKET: 3247 nw_hdr_type = RDMA_NETWORK_ROCE_V1; 3248 break; 3249 case BNXT_RE_ROCEV2_IPV4_PACKET: 3250 nw_hdr_type = RDMA_NETWORK_IPV4; 3251 break; 3252 case BNXT_RE_ROCEV2_IPV6_PACKET: 3253 nw_hdr_type = RDMA_NETWORK_IPV6; 3254 break; 3255 } 3256 return nw_hdr_type; 3257 } 3258 3259 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev, 3260 void *rq_hdr_buf) 3261 { 3262 u8 *tmp_buf = NULL; 3263 struct ethhdr *eth_hdr; 3264 u16 eth_type; 3265 bool rc = false; 3266 3267 tmp_buf = (u8 *)rq_hdr_buf; 3268 /* 3269 * If dest mac is not same as I/F mac, this could be a 3270 * loopback address or multicast address, check whether 3271 * it is a loopback packet 3272 */ 3273 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) { 3274 tmp_buf += 4; 3275 /* Check the ether type */ 3276 eth_hdr = (struct ethhdr *)tmp_buf; 3277 eth_type = ntohs(eth_hdr->h_proto); 3278 switch (eth_type) { 3279 case ETH_P_IBOE: 3280 rc = true; 3281 break; 3282 case ETH_P_IP: 3283 case ETH_P_IPV6: { 3284 u32 len; 3285 struct udphdr *udp_hdr; 3286 3287 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) : 3288 sizeof(struct ipv6hdr)); 3289 tmp_buf += sizeof(struct ethhdr) + len; 3290 udp_hdr = (struct udphdr *)tmp_buf; 3291 if (ntohs(udp_hdr->dest) == 3292 ROCE_V2_UDP_DPORT) 3293 rc = true; 3294 break; 3295 } 3296 default: 3297 break; 3298 } 3299 } 3300 3301 return rc; 3302 } 3303 3304 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp, 3305 struct bnxt_qplib_cqe *cqe) 3306 { 3307 struct bnxt_re_dev *rdev = gsi_qp->rdev; 3308 struct bnxt_re_sqp_entries *sqp_entry = NULL; 3309 struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp; 3310 struct bnxt_re_ah *gsi_sah; 3311 struct ib_send_wr *swr; 3312 struct ib_ud_wr udwr; 3313 struct ib_recv_wr rwr; 3314 int pkt_type = 0; 3315 u32 tbl_idx; 3316 void *rq_hdr_buf; 3317 dma_addr_t rq_hdr_buf_map; 3318 dma_addr_t shrq_hdr_buf_map; 3319 u32 offset = 0; 3320 u32 skip_bytes = 0; 3321 struct ib_sge s_sge[2]; 3322 struct ib_sge r_sge[2]; 3323 int rc; 3324 3325 memset(&udwr, 0, sizeof(udwr)); 3326 memset(&rwr, 0, sizeof(rwr)); 3327 memset(&s_sge, 0, sizeof(s_sge)); 3328 memset(&r_sge, 0, sizeof(r_sge)); 3329 3330 swr = &udwr.wr; 3331 tbl_idx = cqe->wr_id; 3332 3333 rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf + 3334 (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size); 3335 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp, 3336 tbl_idx); 3337 3338 /* Shadow QP header buffer */ 3339 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp, 3340 tbl_idx); 3341 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; 3342 3343 /* Store this cqe */ 3344 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe)); 3345 sqp_entry->qp1_qp = gsi_qp; 3346 3347 /* Find packet type from the cqe */ 3348 3349 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags, 3350 cqe->raweth_qp1_flags2); 3351 if (pkt_type < 0) { 3352 ibdev_err(&rdev->ibdev, "Invalid packet\n"); 3353 return -EINVAL; 3354 } 3355 3356 /* Adjust the offset for the user buffer and post in the rq */ 3357 3358 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET) 3359 offset = 20; 3360 3361 /* 3362 * QP1 loopback packet has 4 bytes of internal header before 3363 * ether header. Skip these four bytes. 3364 */ 3365 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf)) 3366 skip_bytes = 4; 3367 3368 /* First send SGE . Skip the ether header*/ 3369 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 3370 + skip_bytes; 3371 s_sge[0].lkey = 0xFFFFFFFF; 3372 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 : 3373 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6; 3374 3375 /* Second Send SGE */ 3376 s_sge[1].addr = s_sge[0].addr + s_sge[0].length + 3377 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE; 3378 if (pkt_type != BNXT_RE_ROCE_V1_PACKET) 3379 s_sge[1].addr += 8; 3380 s_sge[1].lkey = 0xFFFFFFFF; 3381 s_sge[1].length = 256; 3382 3383 /* First recv SGE */ 3384 3385 r_sge[0].addr = shrq_hdr_buf_map; 3386 r_sge[0].lkey = 0xFFFFFFFF; 3387 r_sge[0].length = 40; 3388 3389 r_sge[1].addr = sqp_entry->sge.addr + offset; 3390 r_sge[1].lkey = sqp_entry->sge.lkey; 3391 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset; 3392 3393 /* Create receive work request */ 3394 rwr.num_sge = 2; 3395 rwr.sg_list = r_sge; 3396 rwr.wr_id = tbl_idx; 3397 rwr.next = NULL; 3398 3399 rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr); 3400 if (rc) { 3401 ibdev_err(&rdev->ibdev, 3402 "Failed to post Rx buffers to shadow QP"); 3403 return -ENOMEM; 3404 } 3405 3406 swr->num_sge = 2; 3407 swr->sg_list = s_sge; 3408 swr->wr_id = tbl_idx; 3409 swr->opcode = IB_WR_SEND; 3410 swr->next = NULL; 3411 gsi_sah = rdev->gsi_ctx.gsi_sah; 3412 udwr.ah = &gsi_sah->ib_ah; 3413 udwr.remote_qpn = gsi_sqp->qplib_qp.id; 3414 udwr.remote_qkey = gsi_sqp->qplib_qp.qkey; 3415 3416 /* post data received in the send queue */ 3417 return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr); 3418 } 3419 3420 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, 3421 struct bnxt_qplib_cqe *cqe) 3422 { 3423 wc->opcode = IB_WC_RECV; 3424 wc->status = __rawqp1_to_ib_wc_status(cqe->status); 3425 wc->wc_flags |= IB_WC_GRH; 3426 } 3427 3428 static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, 3429 u16 vlan_id) 3430 { 3431 /* 3432 * Check if the vlan is configured in the host. If not configured, it 3433 * can be a transparent VLAN. So dont report the vlan id. 3434 */ 3435 if (!__vlan_find_dev_deep_rcu(rdev->netdev, 3436 htons(ETH_P_8021Q), vlan_id)) 3437 return false; 3438 return true; 3439 } 3440 3441 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe, 3442 u16 *vid, u8 *sl) 3443 { 3444 bool ret = false; 3445 u32 metadata; 3446 u16 tpid; 3447 3448 metadata = orig_cqe->raweth_qp1_metadata; 3449 if (orig_cqe->raweth_qp1_flags2 & 3450 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) { 3451 tpid = ((metadata & 3452 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >> 3453 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT); 3454 if (tpid == ETH_P_8021Q) { 3455 *vid = metadata & 3456 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK; 3457 *sl = (metadata & 3458 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >> 3459 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT; 3460 ret = true; 3461 } 3462 } 3463 3464 return ret; 3465 } 3466 3467 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc, 3468 struct bnxt_qplib_cqe *cqe) 3469 { 3470 wc->opcode = IB_WC_RECV; 3471 wc->status = __rc_to_ib_wc_status(cqe->status); 3472 3473 if (cqe->flags & CQ_RES_RC_FLAGS_IMM) 3474 wc->wc_flags |= IB_WC_WITH_IMM; 3475 if (cqe->flags & CQ_RES_RC_FLAGS_INV) 3476 wc->wc_flags |= IB_WC_WITH_INVALIDATE; 3477 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) == 3478 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) 3479 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 3480 } 3481 3482 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp, 3483 struct ib_wc *wc, 3484 struct bnxt_qplib_cqe *cqe) 3485 { 3486 struct bnxt_re_dev *rdev = gsi_sqp->rdev; 3487 struct bnxt_re_qp *gsi_qp = NULL; 3488 struct bnxt_qplib_cqe *orig_cqe = NULL; 3489 struct bnxt_re_sqp_entries *sqp_entry = NULL; 3490 int nw_type; 3491 u32 tbl_idx; 3492 u16 vlan_id; 3493 u8 sl; 3494 3495 tbl_idx = cqe->wr_id; 3496 3497 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; 3498 gsi_qp = sqp_entry->qp1_qp; 3499 orig_cqe = &sqp_entry->cqe; 3500 3501 wc->wr_id = sqp_entry->wrid; 3502 wc->byte_len = orig_cqe->length; 3503 wc->qp = &gsi_qp->ib_qp; 3504 3505 wc->ex.imm_data = orig_cqe->immdata; 3506 wc->src_qp = orig_cqe->src_qp; 3507 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); 3508 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) { 3509 if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { 3510 wc->vlan_id = vlan_id; 3511 wc->sl = sl; 3512 wc->wc_flags |= IB_WC_WITH_VLAN; 3513 } 3514 } 3515 wc->port_num = 1; 3516 wc->vendor_err = orig_cqe->status; 3517 3518 wc->opcode = IB_WC_RECV; 3519 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status); 3520 wc->wc_flags |= IB_WC_GRH; 3521 3522 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags, 3523 orig_cqe->raweth_qp1_flags2); 3524 if (nw_type >= 0) { 3525 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type); 3526 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; 3527 } 3528 } 3529 3530 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp, 3531 struct ib_wc *wc, 3532 struct bnxt_qplib_cqe *cqe) 3533 { 3534 struct bnxt_re_dev *rdev; 3535 u16 vlan_id = 0; 3536 u8 nw_type; 3537 3538 rdev = qp->rdev; 3539 wc->opcode = IB_WC_RECV; 3540 wc->status = __rc_to_ib_wc_status(cqe->status); 3541 3542 if (cqe->flags & CQ_RES_UD_FLAGS_IMM) 3543 wc->wc_flags |= IB_WC_WITH_IMM; 3544 /* report only on GSI QP for Thor */ 3545 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) { 3546 wc->wc_flags |= IB_WC_GRH; 3547 memcpy(wc->smac, cqe->smac, ETH_ALEN); 3548 wc->wc_flags |= IB_WC_WITH_SMAC; 3549 if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) { 3550 vlan_id = (cqe->cfa_meta & 0xFFF); 3551 } 3552 /* Mark only if vlan_id is non zero */ 3553 if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { 3554 wc->vlan_id = vlan_id; 3555 wc->wc_flags |= IB_WC_WITH_VLAN; 3556 } 3557 nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >> 3558 CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT; 3559 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type); 3560 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; 3561 } 3562 3563 } 3564 3565 static int send_phantom_wqe(struct bnxt_re_qp *qp) 3566 { 3567 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; 3568 unsigned long flags; 3569 int rc = 0; 3570 3571 spin_lock_irqsave(&qp->sq_lock, flags); 3572 3573 rc = bnxt_re_bind_fence_mw(lib_qp); 3574 if (!rc) { 3575 lib_qp->sq.phantom_wqe_cnt++; 3576 ibdev_dbg(&qp->rdev->ibdev, 3577 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", 3578 lib_qp->id, lib_qp->sq.hwq.prod, 3579 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), 3580 lib_qp->sq.phantom_wqe_cnt); 3581 } 3582 3583 spin_unlock_irqrestore(&qp->sq_lock, flags); 3584 return rc; 3585 } 3586 3587 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) 3588 { 3589 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); 3590 struct bnxt_re_qp *qp, *sh_qp; 3591 struct bnxt_qplib_cqe *cqe; 3592 int i, ncqe, budget; 3593 struct bnxt_qplib_q *sq; 3594 struct bnxt_qplib_qp *lib_qp; 3595 u32 tbl_idx; 3596 struct bnxt_re_sqp_entries *sqp_entry = NULL; 3597 unsigned long flags; 3598 3599 /* User CQ; the only processing we do is to 3600 * complete any pending CQ resize operation. 3601 */ 3602 if (cq->umem) { 3603 if (cq->resize_umem) 3604 bnxt_re_resize_cq_complete(cq); 3605 return 0; 3606 } 3607 3608 spin_lock_irqsave(&cq->cq_lock, flags); 3609 budget = min_t(u32, num_entries, cq->max_cql); 3610 num_entries = budget; 3611 if (!cq->cql) { 3612 ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use"); 3613 goto exit; 3614 } 3615 cqe = &cq->cql[0]; 3616 while (budget) { 3617 lib_qp = NULL; 3618 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp); 3619 if (lib_qp) { 3620 sq = &lib_qp->sq; 3621 if (sq->send_phantom) { 3622 qp = container_of(lib_qp, 3623 struct bnxt_re_qp, qplib_qp); 3624 if (send_phantom_wqe(qp) == -ENOMEM) 3625 ibdev_err(&cq->rdev->ibdev, 3626 "Phantom failed! Scheduled to send again\n"); 3627 else 3628 sq->send_phantom = false; 3629 } 3630 } 3631 if (ncqe < budget) 3632 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq, 3633 cqe + ncqe, 3634 budget - ncqe); 3635 3636 if (!ncqe) 3637 break; 3638 3639 for (i = 0; i < ncqe; i++, cqe++) { 3640 /* Transcribe each qplib_wqe back to ib_wc */ 3641 memset(wc, 0, sizeof(*wc)); 3642 3643 wc->wr_id = cqe->wr_id; 3644 wc->byte_len = cqe->length; 3645 qp = container_of 3646 ((struct bnxt_qplib_qp *) 3647 (unsigned long)(cqe->qp_handle), 3648 struct bnxt_re_qp, qplib_qp); 3649 wc->qp = &qp->ib_qp; 3650 wc->ex.imm_data = cqe->immdata; 3651 wc->src_qp = cqe->src_qp; 3652 memcpy(wc->smac, cqe->smac, ETH_ALEN); 3653 wc->port_num = 1; 3654 wc->vendor_err = cqe->status; 3655 3656 switch (cqe->opcode) { 3657 case CQ_BASE_CQE_TYPE_REQ: 3658 sh_qp = qp->rdev->gsi_ctx.gsi_sqp; 3659 if (sh_qp && 3660 qp->qplib_qp.id == sh_qp->qplib_qp.id) { 3661 /* Handle this completion with 3662 * the stored completion 3663 */ 3664 memset(wc, 0, sizeof(*wc)); 3665 continue; 3666 } 3667 bnxt_re_process_req_wc(wc, cqe); 3668 break; 3669 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1: 3670 if (!cqe->status) { 3671 int rc = 0; 3672 3673 rc = bnxt_re_process_raw_qp_pkt_rx 3674 (qp, cqe); 3675 if (!rc) { 3676 memset(wc, 0, sizeof(*wc)); 3677 continue; 3678 } 3679 cqe->status = -1; 3680 } 3681 /* Errors need not be looped back. 3682 * But change the wr_id to the one 3683 * stored in the table 3684 */ 3685 tbl_idx = cqe->wr_id; 3686 sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx]; 3687 wc->wr_id = sqp_entry->wrid; 3688 bnxt_re_process_res_rawqp1_wc(wc, cqe); 3689 break; 3690 case CQ_BASE_CQE_TYPE_RES_RC: 3691 bnxt_re_process_res_rc_wc(wc, cqe); 3692 break; 3693 case CQ_BASE_CQE_TYPE_RES_UD: 3694 sh_qp = qp->rdev->gsi_ctx.gsi_sqp; 3695 if (sh_qp && 3696 qp->qplib_qp.id == sh_qp->qplib_qp.id) { 3697 /* Handle this completion with 3698 * the stored completion 3699 */ 3700 if (cqe->status) { 3701 continue; 3702 } else { 3703 bnxt_re_process_res_shadow_qp_wc 3704 (qp, wc, cqe); 3705 break; 3706 } 3707 } 3708 bnxt_re_process_res_ud_wc(qp, wc, cqe); 3709 break; 3710 default: 3711 ibdev_err(&cq->rdev->ibdev, 3712 "POLL CQ : type 0x%x not handled", 3713 cqe->opcode); 3714 continue; 3715 } 3716 wc++; 3717 budget--; 3718 } 3719 } 3720 exit: 3721 spin_unlock_irqrestore(&cq->cq_lock, flags); 3722 return num_entries - budget; 3723 } 3724 3725 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq, 3726 enum ib_cq_notify_flags ib_cqn_flags) 3727 { 3728 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); 3729 int type = 0, rc = 0; 3730 unsigned long flags; 3731 3732 spin_lock_irqsave(&cq->cq_lock, flags); 3733 /* Trigger on the very next completion */ 3734 if (ib_cqn_flags & IB_CQ_NEXT_COMP) 3735 type = DBC_DBC_TYPE_CQ_ARMALL; 3736 /* Trigger on the next solicited completion */ 3737 else if (ib_cqn_flags & IB_CQ_SOLICITED) 3738 type = DBC_DBC_TYPE_CQ_ARMSE; 3739 3740 /* Poll to see if there are missed events */ 3741 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) && 3742 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) { 3743 rc = 1; 3744 goto exit; 3745 } 3746 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type); 3747 3748 exit: 3749 spin_unlock_irqrestore(&cq->cq_lock, flags); 3750 return rc; 3751 } 3752 3753 /* Memory Regions */ 3754 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags) 3755 { 3756 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 3757 struct bnxt_re_dev *rdev = pd->rdev; 3758 struct bnxt_re_mr *mr; 3759 int rc; 3760 3761 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 3762 if (!mr) 3763 return ERR_PTR(-ENOMEM); 3764 3765 mr->rdev = rdev; 3766 mr->qplib_mr.pd = &pd->qplib_pd; 3767 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); 3768 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; 3769 3770 /* Allocate and register 0 as the address */ 3771 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); 3772 if (rc) 3773 goto fail; 3774 3775 mr->qplib_mr.hwq.level = PBL_LVL_MAX; 3776 mr->qplib_mr.total_size = -1; /* Infinte length */ 3777 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0, 3778 PAGE_SIZE); 3779 if (rc) 3780 goto fail_mr; 3781 3782 mr->ib_mr.lkey = mr->qplib_mr.lkey; 3783 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ | 3784 IB_ACCESS_REMOTE_ATOMIC)) 3785 mr->ib_mr.rkey = mr->ib_mr.lkey; 3786 atomic_inc(&rdev->mr_count); 3787 3788 return &mr->ib_mr; 3789 3790 fail_mr: 3791 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); 3792 fail: 3793 kfree(mr); 3794 return ERR_PTR(rc); 3795 } 3796 3797 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) 3798 { 3799 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); 3800 struct bnxt_re_dev *rdev = mr->rdev; 3801 int rc; 3802 3803 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); 3804 if (rc) { 3805 ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc); 3806 return rc; 3807 } 3808 3809 if (mr->pages) { 3810 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, 3811 &mr->qplib_frpl); 3812 kfree(mr->pages); 3813 mr->npages = 0; 3814 mr->pages = NULL; 3815 } 3816 ib_umem_release(mr->ib_umem); 3817 3818 kfree(mr); 3819 atomic_dec(&rdev->mr_count); 3820 return rc; 3821 } 3822 3823 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr) 3824 { 3825 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); 3826 3827 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs)) 3828 return -ENOMEM; 3829 3830 mr->pages[mr->npages++] = addr; 3831 return 0; 3832 } 3833 3834 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, 3835 unsigned int *sg_offset) 3836 { 3837 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); 3838 3839 mr->npages = 0; 3840 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page); 3841 } 3842 3843 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type, 3844 u32 max_num_sg) 3845 { 3846 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 3847 struct bnxt_re_dev *rdev = pd->rdev; 3848 struct bnxt_re_mr *mr = NULL; 3849 int rc; 3850 3851 if (type != IB_MR_TYPE_MEM_REG) { 3852 ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type); 3853 return ERR_PTR(-EINVAL); 3854 } 3855 if (max_num_sg > MAX_PBL_LVL_1_PGS) 3856 return ERR_PTR(-EINVAL); 3857 3858 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 3859 if (!mr) 3860 return ERR_PTR(-ENOMEM); 3861 3862 mr->rdev = rdev; 3863 mr->qplib_mr.pd = &pd->qplib_pd; 3864 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR; 3865 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; 3866 3867 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); 3868 if (rc) 3869 goto bail; 3870 3871 mr->ib_mr.lkey = mr->qplib_mr.lkey; 3872 mr->ib_mr.rkey = mr->ib_mr.lkey; 3873 3874 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); 3875 if (!mr->pages) { 3876 rc = -ENOMEM; 3877 goto fail; 3878 } 3879 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res, 3880 &mr->qplib_frpl, max_num_sg); 3881 if (rc) { 3882 ibdev_err(&rdev->ibdev, 3883 "Failed to allocate HW FR page list"); 3884 goto fail_mr; 3885 } 3886 3887 atomic_inc(&rdev->mr_count); 3888 return &mr->ib_mr; 3889 3890 fail_mr: 3891 kfree(mr->pages); 3892 fail: 3893 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); 3894 bail: 3895 kfree(mr); 3896 return ERR_PTR(rc); 3897 } 3898 3899 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, 3900 struct ib_udata *udata) 3901 { 3902 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 3903 struct bnxt_re_dev *rdev = pd->rdev; 3904 struct bnxt_re_mw *mw; 3905 int rc; 3906 3907 mw = kzalloc(sizeof(*mw), GFP_KERNEL); 3908 if (!mw) 3909 return ERR_PTR(-ENOMEM); 3910 mw->rdev = rdev; 3911 mw->qplib_mw.pd = &pd->qplib_pd; 3912 3913 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ? 3914 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 : 3915 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B); 3916 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw); 3917 if (rc) { 3918 ibdev_err(&rdev->ibdev, "Allocate MW failed!"); 3919 goto fail; 3920 } 3921 mw->ib_mw.rkey = mw->qplib_mw.rkey; 3922 3923 atomic_inc(&rdev->mw_count); 3924 return &mw->ib_mw; 3925 3926 fail: 3927 kfree(mw); 3928 return ERR_PTR(rc); 3929 } 3930 3931 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) 3932 { 3933 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw); 3934 struct bnxt_re_dev *rdev = mw->rdev; 3935 int rc; 3936 3937 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw); 3938 if (rc) { 3939 ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc); 3940 return rc; 3941 } 3942 3943 kfree(mw); 3944 atomic_dec(&rdev->mw_count); 3945 return rc; 3946 } 3947 3948 /* uverbs */ 3949 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, 3950 u64 virt_addr, int mr_access_flags, 3951 struct ib_udata *udata) 3952 { 3953 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 3954 struct bnxt_re_dev *rdev = pd->rdev; 3955 struct bnxt_re_mr *mr; 3956 struct ib_umem *umem; 3957 unsigned long page_size; 3958 int umem_pgs, rc; 3959 3960 if (length > BNXT_RE_MAX_MR_SIZE) { 3961 ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n", 3962 length, BNXT_RE_MAX_MR_SIZE); 3963 return ERR_PTR(-ENOMEM); 3964 } 3965 3966 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 3967 if (!mr) 3968 return ERR_PTR(-ENOMEM); 3969 3970 mr->rdev = rdev; 3971 mr->qplib_mr.pd = &pd->qplib_pd; 3972 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); 3973 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR; 3974 3975 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); 3976 if (rc) { 3977 ibdev_err(&rdev->ibdev, "Failed to allocate MR"); 3978 goto free_mr; 3979 } 3980 /* The fixed portion of the rkey is the same as the lkey */ 3981 mr->ib_mr.rkey = mr->qplib_mr.rkey; 3982 3983 umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags); 3984 if (IS_ERR(umem)) { 3985 ibdev_err(&rdev->ibdev, "Failed to get umem"); 3986 rc = -EFAULT; 3987 goto free_mrw; 3988 } 3989 mr->ib_umem = umem; 3990 3991 mr->qplib_mr.va = virt_addr; 3992 page_size = ib_umem_find_best_pgsz( 3993 umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr); 3994 if (!page_size) { 3995 ibdev_err(&rdev->ibdev, "umem page size unsupported!"); 3996 rc = -EFAULT; 3997 goto free_umem; 3998 } 3999 mr->qplib_mr.total_size = length; 4000 4001 umem_pgs = ib_umem_num_dma_blocks(umem, page_size); 4002 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem, 4003 umem_pgs, page_size); 4004 if (rc) { 4005 ibdev_err(&rdev->ibdev, "Failed to register user MR"); 4006 goto free_umem; 4007 } 4008 4009 mr->ib_mr.lkey = mr->qplib_mr.lkey; 4010 mr->ib_mr.rkey = mr->qplib_mr.lkey; 4011 atomic_inc(&rdev->mr_count); 4012 4013 return &mr->ib_mr; 4014 free_umem: 4015 ib_umem_release(umem); 4016 free_mrw: 4017 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); 4018 free_mr: 4019 kfree(mr); 4020 return ERR_PTR(rc); 4021 } 4022 4023 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) 4024 { 4025 struct ib_device *ibdev = ctx->device; 4026 struct bnxt_re_ucontext *uctx = 4027 container_of(ctx, struct bnxt_re_ucontext, ib_uctx); 4028 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 4029 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; 4030 struct bnxt_re_user_mmap_entry *entry; 4031 struct bnxt_re_uctx_resp resp = {}; 4032 u32 chip_met_rev_num = 0; 4033 int rc; 4034 4035 ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver); 4036 4037 if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) { 4038 ibdev_dbg(ibdev, " is different from the device %d ", 4039 BNXT_RE_ABI_VERSION); 4040 return -EPERM; 4041 } 4042 4043 uctx->rdev = rdev; 4044 4045 uctx->shpg = (void *)__get_free_page(GFP_KERNEL); 4046 if (!uctx->shpg) { 4047 rc = -ENOMEM; 4048 goto fail; 4049 } 4050 spin_lock_init(&uctx->sh_lock); 4051 4052 resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX; 4053 chip_met_rev_num = rdev->chip_ctx->chip_num; 4054 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) << 4055 BNXT_RE_CHIP_ID0_CHIP_REV_SFT; 4056 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) << 4057 BNXT_RE_CHIP_ID0_CHIP_MET_SFT; 4058 resp.chip_id0 = chip_met_rev_num; 4059 /*Temp, Use xa_alloc instead */ 4060 resp.dev_id = rdev->en_dev->pdev->devfn; 4061 resp.max_qp = rdev->qplib_ctx.qpc_count; 4062 resp.pg_size = PAGE_SIZE; 4063 resp.cqe_sz = sizeof(struct cq_base); 4064 resp.max_cqd = dev_attr->max_cq_wqes; 4065 4066 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE; 4067 resp.mode = rdev->chip_ctx->modes.wqe_mode; 4068 4069 if (rdev->chip_ctx->modes.db_push) 4070 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED; 4071 4072 entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL); 4073 if (!entry) { 4074 rc = -ENOMEM; 4075 goto cfail; 4076 } 4077 uctx->shpage_mmap = &entry->rdma_entry; 4078 4079 rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); 4080 if (rc) { 4081 ibdev_err(ibdev, "Failed to copy user context"); 4082 rc = -EFAULT; 4083 goto cfail; 4084 } 4085 4086 return 0; 4087 cfail: 4088 free_page((unsigned long)uctx->shpg); 4089 uctx->shpg = NULL; 4090 fail: 4091 return rc; 4092 } 4093 4094 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) 4095 { 4096 struct bnxt_re_ucontext *uctx = container_of(ib_uctx, 4097 struct bnxt_re_ucontext, 4098 ib_uctx); 4099 4100 struct bnxt_re_dev *rdev = uctx->rdev; 4101 4102 rdma_user_mmap_entry_remove(uctx->shpage_mmap); 4103 uctx->shpage_mmap = NULL; 4104 if (uctx->shpg) 4105 free_page((unsigned long)uctx->shpg); 4106 4107 if (uctx->dpi.dbr) { 4108 /* Free DPI only if this is the first PD allocated by the 4109 * application and mark the context dpi as NULL 4110 */ 4111 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->dpi); 4112 uctx->dpi.dbr = NULL; 4113 } 4114 } 4115 4116 /* Helper function to mmap the virtual memory from user app */ 4117 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma) 4118 { 4119 struct bnxt_re_ucontext *uctx = container_of(ib_uctx, 4120 struct bnxt_re_ucontext, 4121 ib_uctx); 4122 struct bnxt_re_user_mmap_entry *bnxt_entry; 4123 struct rdma_user_mmap_entry *rdma_entry; 4124 int ret = 0; 4125 u64 pfn; 4126 4127 rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma); 4128 if (!rdma_entry) 4129 return -EINVAL; 4130 4131 bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry, 4132 rdma_entry); 4133 4134 switch (bnxt_entry->mmap_flag) { 4135 case BNXT_RE_MMAP_WC_DB: 4136 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT; 4137 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE, 4138 pgprot_writecombine(vma->vm_page_prot), 4139 rdma_entry); 4140 break; 4141 case BNXT_RE_MMAP_UC_DB: 4142 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT; 4143 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE, 4144 pgprot_noncached(vma->vm_page_prot), 4145 rdma_entry); 4146 break; 4147 case BNXT_RE_MMAP_SH_PAGE: 4148 ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg)); 4149 break; 4150 default: 4151 ret = -EINVAL; 4152 break; 4153 } 4154 4155 rdma_user_mmap_entry_put(rdma_entry); 4156 return ret; 4157 } 4158 4159 void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry) 4160 { 4161 struct bnxt_re_user_mmap_entry *bnxt_entry; 4162 4163 bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry, 4164 rdma_entry); 4165 4166 kfree(bnxt_entry); 4167 } 4168 4169 static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs) 4170 { 4171 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE); 4172 enum bnxt_re_alloc_page_type alloc_type; 4173 struct bnxt_re_user_mmap_entry *entry; 4174 enum bnxt_re_mmap_flag mmap_flag; 4175 struct bnxt_qplib_chip_ctx *cctx; 4176 struct bnxt_re_ucontext *uctx; 4177 struct bnxt_re_dev *rdev; 4178 u64 mmap_offset; 4179 u32 length; 4180 u32 dpi; 4181 u64 dbr; 4182 int err; 4183 4184 uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx); 4185 if (IS_ERR(uctx)) 4186 return PTR_ERR(uctx); 4187 4188 err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE); 4189 if (err) 4190 return err; 4191 4192 rdev = uctx->rdev; 4193 cctx = rdev->chip_ctx; 4194 4195 switch (alloc_type) { 4196 case BNXT_RE_ALLOC_WC_PAGE: 4197 if (cctx->modes.db_push) { 4198 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, &uctx->wcdpi, 4199 uctx, BNXT_QPLIB_DPI_TYPE_WC)) 4200 return -ENOMEM; 4201 length = PAGE_SIZE; 4202 dpi = uctx->wcdpi.dpi; 4203 dbr = (u64)uctx->wcdpi.umdbr; 4204 mmap_flag = BNXT_RE_MMAP_WC_DB; 4205 } else { 4206 return -EINVAL; 4207 } 4208 4209 break; 4210 4211 default: 4212 return -EOPNOTSUPP; 4213 } 4214 4215 entry = bnxt_re_mmap_entry_insert(uctx, dbr, mmap_flag, &mmap_offset); 4216 if (!entry) 4217 return -ENOMEM; 4218 4219 uobj->object = entry; 4220 uverbs_finalize_uobj_create(attrs, BNXT_RE_ALLOC_PAGE_HANDLE); 4221 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_OFFSET, 4222 &mmap_offset, sizeof(mmap_offset)); 4223 if (err) 4224 return err; 4225 4226 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_LENGTH, 4227 &length, sizeof(length)); 4228 if (err) 4229 return err; 4230 4231 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI, 4232 &dpi, sizeof(length)); 4233 if (err) 4234 return err; 4235 4236 return 0; 4237 } 4238 4239 static int alloc_page_obj_cleanup(struct ib_uobject *uobject, 4240 enum rdma_remove_reason why, 4241 struct uverbs_attr_bundle *attrs) 4242 { 4243 struct bnxt_re_user_mmap_entry *entry = uobject->object; 4244 struct bnxt_re_ucontext *uctx = entry->uctx; 4245 4246 switch (entry->mmap_flag) { 4247 case BNXT_RE_MMAP_WC_DB: 4248 if (uctx && uctx->wcdpi.dbr) { 4249 struct bnxt_re_dev *rdev = uctx->rdev; 4250 4251 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->wcdpi); 4252 uctx->wcdpi.dbr = NULL; 4253 } 4254 break; 4255 default: 4256 goto exit; 4257 } 4258 rdma_user_mmap_entry_remove(&entry->rdma_entry); 4259 exit: 4260 return 0; 4261 } 4262 4263 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE, 4264 UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE, 4265 BNXT_RE_OBJECT_ALLOC_PAGE, 4266 UVERBS_ACCESS_NEW, 4267 UA_MANDATORY), 4268 UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE, 4269 enum bnxt_re_alloc_page_type, 4270 UA_MANDATORY), 4271 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET, 4272 UVERBS_ATTR_TYPE(u64), 4273 UA_MANDATORY), 4274 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH, 4275 UVERBS_ATTR_TYPE(u32), 4276 UA_MANDATORY), 4277 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI, 4278 UVERBS_ATTR_TYPE(u32), 4279 UA_MANDATORY)); 4280 4281 DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE, 4282 UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE, 4283 BNXT_RE_OBJECT_ALLOC_PAGE, 4284 UVERBS_ACCESS_DESTROY, 4285 UA_MANDATORY)); 4286 4287 DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE, 4288 UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup), 4289 &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE), 4290 &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE)); 4291 4292 const struct uapi_definition bnxt_re_uapi_defs[] = { 4293 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE), 4294 {} 4295 }; 4296