1 /* This file is part of the Emulex RoCE Device Driver for 2 * RoCE (RDMA over Converged Ethernet) adapters. 3 * Copyright (C) 2012-2015 Emulex. All rights reserved. 4 * EMULEX and SLI are trademarks of Emulex. 5 * www.emulex.com 6 * 7 * This software is available to you under a choice of one of two licenses. 8 * You may choose to be licensed under the terms of the GNU General Public 9 * License (GPL) Version 2, available from the file COPYING in the main 10 * directory of this source tree, or the BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * - Redistributions of source code must retain the above copyright notice, 17 * this list of conditions and the following disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the distribution. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * Contact Information: 36 * linux-drivers@emulex.com 37 * 38 * Emulex 39 * 3333 Susan Street 40 * Costa Mesa, CA 92626 41 */ 42 43 #include <linux/dma-mapping.h> 44 #include <net/addrconf.h> 45 #include <rdma/ib_verbs.h> 46 #include <rdma/ib_user_verbs.h> 47 #include <rdma/iw_cm.h> 48 #include <rdma/ib_umem.h> 49 #include <rdma/ib_addr.h> 50 #include <rdma/ib_cache.h> 51 #include <rdma/uverbs_ioctl.h> 52 53 #include "ocrdma.h" 54 #include "ocrdma_hw.h" 55 #include "ocrdma_verbs.h" 56 #include <rdma/ocrdma-abi.h> 57 58 int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) 59 { 60 if (index > 0) 61 return -EINVAL; 62 63 *pkey = 0xffff; 64 return 0; 65 } 66 67 int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, 68 struct ib_udata *uhw) 69 { 70 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 71 72 if (uhw->inlen || uhw->outlen) 73 return -EINVAL; 74 75 memset(attr, 0, sizeof *attr); 76 memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], 77 min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); 78 addrconf_addr_eui48((u8 *)&attr->sys_image_guid, 79 dev->nic_info.mac_addr); 80 attr->max_mr_size = dev->attr.max_mr_size; 81 attr->page_size_cap = 0xffff000; 82 attr->vendor_id = dev->nic_info.pdev->vendor; 83 attr->vendor_part_id = dev->nic_info.pdev->device; 84 attr->hw_ver = dev->asic_id; 85 attr->max_qp = dev->attr.max_qp; 86 attr->max_ah = OCRDMA_MAX_AH; 87 attr->max_qp_wr = dev->attr.max_wqe; 88 89 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD | 90 IB_DEVICE_RC_RNR_NAK_GEN | 91 IB_DEVICE_SHUTDOWN_PORT | 92 IB_DEVICE_SYS_IMAGE_GUID | 93 IB_DEVICE_MEM_MGT_EXTENSIONS; 94 attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; 95 attr->max_send_sge = dev->attr.max_send_sge; 96 attr->max_recv_sge = dev->attr.max_recv_sge; 97 attr->max_sge_rd = dev->attr.max_rdma_sge; 98 attr->max_cq = dev->attr.max_cq; 99 attr->max_cqe = dev->attr.max_cqe; 100 attr->max_mr = dev->attr.max_mr; 101 attr->max_mw = dev->attr.max_mw; 102 attr->max_pd = dev->attr.max_pd; 103 attr->atomic_cap = 0; 104 attr->max_qp_rd_atom = 105 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); 106 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; 107 attr->max_srq = dev->attr.max_srq; 108 attr->max_srq_sge = dev->attr.max_srq_sge; 109 attr->max_srq_wr = dev->attr.max_rqe; 110 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; 111 attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr; 112 attr->max_pkeys = 1; 113 return 0; 114 } 115 116 static inline void get_link_speed_and_width(struct ocrdma_dev *dev, 117 u16 *ib_speed, u8 *ib_width) 118 { 119 int status; 120 u8 speed; 121 122 status = ocrdma_mbx_get_link_speed(dev, &speed, NULL); 123 if (status) 124 speed = OCRDMA_PHYS_LINK_SPEED_ZERO; 125 126 switch (speed) { 127 case OCRDMA_PHYS_LINK_SPEED_1GBPS: 128 *ib_speed = IB_SPEED_SDR; 129 *ib_width = IB_WIDTH_1X; 130 break; 131 132 case OCRDMA_PHYS_LINK_SPEED_10GBPS: 133 *ib_speed = IB_SPEED_QDR; 134 *ib_width = IB_WIDTH_1X; 135 break; 136 137 case OCRDMA_PHYS_LINK_SPEED_20GBPS: 138 *ib_speed = IB_SPEED_DDR; 139 *ib_width = IB_WIDTH_4X; 140 break; 141 142 case OCRDMA_PHYS_LINK_SPEED_40GBPS: 143 *ib_speed = IB_SPEED_QDR; 144 *ib_width = IB_WIDTH_4X; 145 break; 146 147 default: 148 /* Unsupported */ 149 *ib_speed = IB_SPEED_SDR; 150 *ib_width = IB_WIDTH_1X; 151 } 152 } 153 154 int ocrdma_query_port(struct ib_device *ibdev, 155 u32 port, struct ib_port_attr *props) 156 { 157 enum ib_port_state port_state; 158 struct ocrdma_dev *dev; 159 struct net_device *netdev; 160 161 /* props being zeroed by the caller, avoid zeroing it here */ 162 dev = get_ocrdma_dev(ibdev); 163 netdev = dev->nic_info.netdev; 164 if (netif_running(netdev) && netif_oper_up(netdev)) { 165 port_state = IB_PORT_ACTIVE; 166 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; 167 } else { 168 port_state = IB_PORT_DOWN; 169 props->phys_state = IB_PORT_PHYS_STATE_DISABLED; 170 } 171 props->max_mtu = IB_MTU_4096; 172 props->active_mtu = iboe_get_mtu(netdev->mtu); 173 props->lid = 0; 174 props->lmc = 0; 175 props->sm_lid = 0; 176 props->sm_sl = 0; 177 props->state = port_state; 178 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | 179 IB_PORT_DEVICE_MGMT_SUP | 180 IB_PORT_VENDOR_CLASS_SUP; 181 props->ip_gids = true; 182 props->gid_tbl_len = OCRDMA_MAX_SGID; 183 props->pkey_tbl_len = 1; 184 props->bad_pkey_cntr = 0; 185 props->qkey_viol_cntr = 0; 186 get_link_speed_and_width(dev, &props->active_speed, 187 &props->active_width); 188 props->max_msg_sz = 0x80000000; 189 props->max_vl_num = 4; 190 return 0; 191 } 192 193 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, 194 unsigned long len) 195 { 196 struct ocrdma_mm *mm; 197 198 mm = kzalloc_obj(*mm); 199 if (mm == NULL) 200 return -ENOMEM; 201 mm->key.phy_addr = phy_addr; 202 mm->key.len = len; 203 INIT_LIST_HEAD(&mm->entry); 204 205 mutex_lock(&uctx->mm_list_lock); 206 list_add_tail(&mm->entry, &uctx->mm_head); 207 mutex_unlock(&uctx->mm_list_lock); 208 return 0; 209 } 210 211 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, 212 unsigned long len) 213 { 214 struct ocrdma_mm *mm, *tmp; 215 216 mutex_lock(&uctx->mm_list_lock); 217 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { 218 if (len != mm->key.len && phy_addr != mm->key.phy_addr) 219 continue; 220 221 list_del(&mm->entry); 222 kfree(mm); 223 break; 224 } 225 mutex_unlock(&uctx->mm_list_lock); 226 } 227 228 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, 229 unsigned long len) 230 { 231 bool found = false; 232 struct ocrdma_mm *mm; 233 234 mutex_lock(&uctx->mm_list_lock); 235 list_for_each_entry(mm, &uctx->mm_head, entry) { 236 if (len != mm->key.len && phy_addr != mm->key.phy_addr) 237 continue; 238 239 found = true; 240 break; 241 } 242 mutex_unlock(&uctx->mm_list_lock); 243 return found; 244 } 245 246 247 static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool) 248 { 249 u16 pd_bitmap_idx = 0; 250 unsigned long *pd_bitmap; 251 252 if (dpp_pool) { 253 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap; 254 pd_bitmap_idx = find_first_zero_bit(pd_bitmap, 255 dev->pd_mgr->max_dpp_pd); 256 __set_bit(pd_bitmap_idx, pd_bitmap); 257 dev->pd_mgr->pd_dpp_count++; 258 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh) 259 dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count; 260 } else { 261 pd_bitmap = dev->pd_mgr->pd_norm_bitmap; 262 pd_bitmap_idx = find_first_zero_bit(pd_bitmap, 263 dev->pd_mgr->max_normal_pd); 264 __set_bit(pd_bitmap_idx, pd_bitmap); 265 dev->pd_mgr->pd_norm_count++; 266 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh) 267 dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count; 268 } 269 return pd_bitmap_idx; 270 } 271 272 static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id, 273 bool dpp_pool) 274 { 275 u16 pd_count; 276 u16 pd_bit_index; 277 278 pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count : 279 dev->pd_mgr->pd_norm_count; 280 if (pd_count == 0) 281 return -EINVAL; 282 283 if (dpp_pool) { 284 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start; 285 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) { 286 return -EINVAL; 287 } else { 288 __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap); 289 dev->pd_mgr->pd_dpp_count--; 290 } 291 } else { 292 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start; 293 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) { 294 return -EINVAL; 295 } else { 296 __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap); 297 dev->pd_mgr->pd_norm_count--; 298 } 299 } 300 301 return 0; 302 } 303 304 static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id, 305 bool dpp_pool) 306 { 307 int status; 308 309 mutex_lock(&dev->dev_lock); 310 status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool); 311 mutex_unlock(&dev->dev_lock); 312 return status; 313 } 314 315 static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd) 316 { 317 u16 pd_idx = 0; 318 int status = 0; 319 320 mutex_lock(&dev->dev_lock); 321 if (pd->dpp_enabled) { 322 /* try allocating DPP PD, if not available then normal PD */ 323 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) { 324 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true); 325 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx; 326 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx; 327 } else if (dev->pd_mgr->pd_norm_count < 328 dev->pd_mgr->max_normal_pd) { 329 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); 330 pd->id = dev->pd_mgr->pd_norm_start + pd_idx; 331 pd->dpp_enabled = false; 332 } else { 333 status = -EINVAL; 334 } 335 } else { 336 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) { 337 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); 338 pd->id = dev->pd_mgr->pd_norm_start + pd_idx; 339 } else { 340 status = -EINVAL; 341 } 342 } 343 mutex_unlock(&dev->dev_lock); 344 return status; 345 } 346 347 /* 348 * NOTE: 349 * 350 * ocrdma_ucontext must be used here because this function is also 351 * called from ocrdma_alloc_ucontext where ib_udata does not have 352 * valid ib_ucontext pointer. ib_uverbs_get_context does not call 353 * uobj_{alloc|get_xxx} helpers which are used to store the 354 * ib_ucontext in uverbs_attr_bundle wrapping the ib_udata. so 355 * ib_udata does NOT imply valid ib_ucontext here! 356 */ 357 static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd, 358 struct ocrdma_ucontext *uctx, 359 struct ib_udata *udata) 360 { 361 int status; 362 363 if (udata && uctx && dev->attr.max_dpp_pds) { 364 pd->dpp_enabled = 365 ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; 366 pd->num_dpp_qp = 367 pd->dpp_enabled ? (dev->nic_info.db_page_size / 368 dev->attr.wqe_size) : 0; 369 } 370 371 if (dev->pd_mgr->pd_prealloc_valid) 372 return ocrdma_get_pd_num(dev, pd); 373 374 retry: 375 status = ocrdma_mbx_alloc_pd(dev, pd); 376 if (status) { 377 if (pd->dpp_enabled) { 378 pd->dpp_enabled = false; 379 pd->num_dpp_qp = 0; 380 goto retry; 381 } 382 return status; 383 } 384 385 return 0; 386 } 387 388 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx, 389 struct ocrdma_pd *pd) 390 { 391 return (uctx->cntxt_pd == pd); 392 } 393 394 static void _ocrdma_dealloc_pd(struct ocrdma_dev *dev, 395 struct ocrdma_pd *pd) 396 { 397 if (dev->pd_mgr->pd_prealloc_valid) 398 ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled); 399 else 400 ocrdma_mbx_dealloc_pd(dev, pd); 401 } 402 403 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev, 404 struct ocrdma_ucontext *uctx, 405 struct ib_udata *udata) 406 { 407 struct ib_device *ibdev = &dev->ibdev; 408 struct ib_pd *pd; 409 int status; 410 411 pd = rdma_zalloc_drv_obj(ibdev, ib_pd); 412 if (!pd) 413 return -ENOMEM; 414 415 pd->device = ibdev; 416 uctx->cntxt_pd = get_ocrdma_pd(pd); 417 418 status = _ocrdma_alloc_pd(dev, uctx->cntxt_pd, uctx, udata); 419 if (status) { 420 kfree(uctx->cntxt_pd); 421 goto err; 422 } 423 424 uctx->cntxt_pd->uctx = uctx; 425 uctx->cntxt_pd->ibpd.device = &dev->ibdev; 426 err: 427 return status; 428 } 429 430 static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) 431 { 432 struct ocrdma_pd *pd = uctx->cntxt_pd; 433 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); 434 435 if (uctx->pd_in_use) { 436 pr_err("%s(%d) Freeing in use pdid=0x%x.\n", 437 __func__, dev->id, pd->id); 438 } 439 uctx->cntxt_pd = NULL; 440 _ocrdma_dealloc_pd(dev, pd); 441 kfree(pd); 442 } 443 444 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) 445 { 446 struct ocrdma_pd *pd = NULL; 447 448 mutex_lock(&uctx->mm_list_lock); 449 if (!uctx->pd_in_use) { 450 uctx->pd_in_use = true; 451 pd = uctx->cntxt_pd; 452 } 453 mutex_unlock(&uctx->mm_list_lock); 454 455 return pd; 456 } 457 458 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx) 459 { 460 mutex_lock(&uctx->mm_list_lock); 461 uctx->pd_in_use = false; 462 mutex_unlock(&uctx->mm_list_lock); 463 } 464 465 int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) 466 { 467 struct ib_device *ibdev = uctx->device; 468 int status; 469 struct ocrdma_ucontext *ctx = get_ocrdma_ucontext(uctx); 470 struct ocrdma_alloc_ucontext_resp resp = {}; 471 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 472 struct pci_dev *pdev = dev->nic_info.pdev; 473 u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE); 474 475 if (!udata) 476 return -EFAULT; 477 INIT_LIST_HEAD(&ctx->mm_head); 478 mutex_init(&ctx->mm_list_lock); 479 480 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, 481 &ctx->ah_tbl.pa, GFP_KERNEL); 482 if (!ctx->ah_tbl.va) 483 return -ENOMEM; 484 485 ctx->ah_tbl.len = map_len; 486 487 resp.ah_tbl_len = ctx->ah_tbl.len; 488 resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va); 489 490 status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len); 491 if (status) 492 goto map_err; 493 494 status = ocrdma_alloc_ucontext_pd(dev, ctx, udata); 495 if (status) 496 goto pd_err; 497 498 resp.dev_id = dev->id; 499 resp.max_inline_data = dev->attr.max_inline_data; 500 resp.wqe_size = dev->attr.wqe_size; 501 resp.rqe_size = dev->attr.rqe_size; 502 resp.dpp_wqe_size = dev->attr.wqe_size; 503 504 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); 505 status = ib_copy_to_udata(udata, &resp, sizeof(resp)); 506 if (status) 507 goto cpy_err; 508 return 0; 509 510 cpy_err: 511 ocrdma_dealloc_ucontext_pd(ctx); 512 pd_err: 513 ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len); 514 map_err: 515 dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va, 516 ctx->ah_tbl.pa); 517 return status; 518 } 519 520 void ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx) 521 { 522 struct ocrdma_mm *mm, *tmp; 523 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx); 524 struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device); 525 struct pci_dev *pdev = dev->nic_info.pdev; 526 527 ocrdma_dealloc_ucontext_pd(uctx); 528 529 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len); 530 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va, 531 uctx->ah_tbl.pa); 532 533 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { 534 list_del(&mm->entry); 535 kfree(mm); 536 } 537 } 538 539 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 540 { 541 struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context); 542 struct ocrdma_dev *dev = get_ocrdma_dev(context->device); 543 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; 544 u64 unmapped_db = (u64) dev->nic_info.unmapped_db; 545 unsigned long len = (vma->vm_end - vma->vm_start); 546 int status; 547 bool found; 548 549 if (vma->vm_start & (PAGE_SIZE - 1)) 550 return -EINVAL; 551 found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len); 552 if (!found) 553 return -EINVAL; 554 555 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + 556 dev->nic_info.db_total_size)) && 557 (len <= dev->nic_info.db_page_size)) { 558 if (vma->vm_flags & VM_READ) 559 return -EPERM; 560 561 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 562 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 563 len, vma->vm_page_prot); 564 } else if (dev->nic_info.dpp_unmapped_len && 565 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) && 566 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr + 567 dev->nic_info.dpp_unmapped_len)) && 568 (len <= dev->nic_info.dpp_unmapped_len)) { 569 if (vma->vm_flags & VM_READ) 570 return -EPERM; 571 572 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 573 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 574 len, vma->vm_page_prot); 575 } else { 576 status = remap_pfn_range(vma, vma->vm_start, 577 vma->vm_pgoff, len, vma->vm_page_prot); 578 } 579 return status; 580 } 581 582 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd, 583 struct ib_udata *udata) 584 { 585 int status; 586 u64 db_page_addr; 587 u64 dpp_page_addr = 0; 588 u32 db_page_size; 589 struct ocrdma_alloc_pd_uresp rsp; 590 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( 591 udata, struct ocrdma_ucontext, ibucontext); 592 593 memset(&rsp, 0, sizeof(rsp)); 594 rsp.id = pd->id; 595 rsp.dpp_enabled = pd->dpp_enabled; 596 db_page_addr = ocrdma_get_db_addr(dev, pd->id); 597 db_page_size = dev->nic_info.db_page_size; 598 599 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size); 600 if (status) 601 return status; 602 603 if (pd->dpp_enabled) { 604 dpp_page_addr = dev->nic_info.dpp_unmapped_addr + 605 (pd->id * PAGE_SIZE); 606 status = ocrdma_add_mmap(uctx, dpp_page_addr, 607 PAGE_SIZE); 608 if (status) 609 goto dpp_map_err; 610 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr); 611 rsp.dpp_page_addr_lo = dpp_page_addr; 612 } 613 614 status = ib_copy_to_udata(udata, &rsp, sizeof(rsp)); 615 if (status) 616 goto ucopy_err; 617 618 pd->uctx = uctx; 619 return 0; 620 621 ucopy_err: 622 if (pd->dpp_enabled) 623 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE); 624 dpp_map_err: 625 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size); 626 return status; 627 } 628 629 int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 630 { 631 struct ib_device *ibdev = ibpd->device; 632 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 633 struct ocrdma_pd *pd; 634 int status; 635 u8 is_uctx_pd = false; 636 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( 637 udata, struct ocrdma_ucontext, ibucontext); 638 639 if (udata) { 640 pd = ocrdma_get_ucontext_pd(uctx); 641 if (pd) { 642 is_uctx_pd = true; 643 goto pd_mapping; 644 } 645 } 646 647 pd = get_ocrdma_pd(ibpd); 648 status = _ocrdma_alloc_pd(dev, pd, uctx, udata); 649 if (status) 650 goto exit; 651 652 pd_mapping: 653 if (udata) { 654 status = ocrdma_copy_pd_uresp(dev, pd, udata); 655 if (status) 656 goto err; 657 } 658 return 0; 659 660 err: 661 if (is_uctx_pd) 662 ocrdma_release_ucontext_pd(uctx); 663 else 664 _ocrdma_dealloc_pd(dev, pd); 665 exit: 666 return status; 667 } 668 669 int ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 670 { 671 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 672 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 673 struct ocrdma_ucontext *uctx = NULL; 674 u64 usr_db; 675 676 uctx = pd->uctx; 677 if (uctx) { 678 u64 dpp_db = dev->nic_info.dpp_unmapped_addr + 679 (pd->id * PAGE_SIZE); 680 if (pd->dpp_enabled) 681 ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE); 682 usr_db = ocrdma_get_db_addr(dev, pd->id); 683 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size); 684 685 if (is_ucontext_pd(uctx, pd)) { 686 ocrdma_release_ucontext_pd(uctx); 687 return 0; 688 } 689 } 690 _ocrdma_dealloc_pd(dev, pd); 691 return 0; 692 } 693 694 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr, 695 u32 pdid, int acc, u32 num_pbls, u32 addr_check) 696 { 697 int status; 698 699 mr->hwmr.fr_mr = 0; 700 mr->hwmr.local_rd = 1; 701 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; 702 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; 703 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; 704 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0; 705 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; 706 mr->hwmr.num_pbls = num_pbls; 707 708 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check); 709 if (status) 710 return status; 711 712 mr->ibmr.lkey = mr->hwmr.lkey; 713 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) 714 mr->ibmr.rkey = mr->hwmr.lkey; 715 return 0; 716 } 717 718 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc) 719 { 720 int status; 721 struct ocrdma_mr *mr; 722 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 723 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 724 725 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) { 726 pr_err("%s err, invalid access rights\n", __func__); 727 return ERR_PTR(-EINVAL); 728 } 729 730 mr = kzalloc_obj(*mr); 731 if (!mr) 732 return ERR_PTR(-ENOMEM); 733 734 status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0, 735 OCRDMA_ADDR_CHECK_DISABLE); 736 if (status) { 737 kfree(mr); 738 return ERR_PTR(status); 739 } 740 741 return &mr->ibmr; 742 } 743 744 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev, 745 struct ocrdma_hw_mr *mr) 746 { 747 struct pci_dev *pdev = dev->nic_info.pdev; 748 int i = 0; 749 750 if (mr->pbl_table) { 751 for (i = 0; i < mr->num_pbls; i++) { 752 if (!mr->pbl_table[i].va) 753 continue; 754 dma_free_coherent(&pdev->dev, mr->pbl_size, 755 mr->pbl_table[i].va, 756 mr->pbl_table[i].pa); 757 } 758 kfree(mr->pbl_table); 759 mr->pbl_table = NULL; 760 } 761 } 762 763 static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr, 764 u32 num_pbes) 765 { 766 u32 num_pbls = 0; 767 u32 idx = 0; 768 int status = 0; 769 u32 pbl_size; 770 771 do { 772 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx); 773 if (pbl_size > MAX_OCRDMA_PBL_SIZE) { 774 status = -EFAULT; 775 break; 776 } 777 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64))); 778 num_pbls = num_pbls / (pbl_size / sizeof(u64)); 779 idx++; 780 } while (num_pbls >= dev->attr.max_num_mr_pbl); 781 782 mr->hwmr.num_pbes = num_pbes; 783 mr->hwmr.num_pbls = num_pbls; 784 mr->hwmr.pbl_size = pbl_size; 785 return status; 786 } 787 788 static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr) 789 { 790 int status = 0; 791 int i; 792 u32 dma_len = mr->pbl_size; 793 struct pci_dev *pdev = dev->nic_info.pdev; 794 void *va; 795 dma_addr_t pa; 796 797 mr->pbl_table = kzalloc_objs(struct ocrdma_pbl, mr->num_pbls); 798 799 if (!mr->pbl_table) 800 return -ENOMEM; 801 802 for (i = 0; i < mr->num_pbls; i++) { 803 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); 804 if (!va) { 805 ocrdma_free_mr_pbl_tbl(dev, mr); 806 status = -ENOMEM; 807 break; 808 } 809 mr->pbl_table[i].va = va; 810 mr->pbl_table[i].pa = pa; 811 } 812 return status; 813 } 814 815 static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr) 816 { 817 struct ocrdma_pbe *pbe; 818 struct ib_block_iter biter; 819 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; 820 int pbe_cnt; 821 u64 pg_addr; 822 823 if (!mr->hwmr.num_pbes) 824 return; 825 826 pbe = (struct ocrdma_pbe *)pbl_tbl->va; 827 pbe_cnt = 0; 828 829 rdma_umem_for_each_dma_block (mr->umem, &biter, PAGE_SIZE) { 830 /* store the page address in pbe */ 831 pg_addr = rdma_block_iter_dma_address(&biter); 832 pbe->pa_lo = cpu_to_le32(pg_addr); 833 pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr)); 834 pbe_cnt += 1; 835 pbe++; 836 837 /* if the given pbl is full storing the pbes, 838 * move to next pbl. 839 */ 840 if (pbe_cnt == (mr->hwmr.pbl_size / sizeof(u64))) { 841 pbl_tbl++; 842 pbe = (struct ocrdma_pbe *)pbl_tbl->va; 843 pbe_cnt = 0; 844 } 845 } 846 } 847 848 struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, 849 u64 usr_addr, int acc, struct ib_dmah *dmah, 850 struct ib_udata *udata) 851 { 852 int status = -ENOMEM; 853 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 854 struct ocrdma_mr *mr; 855 struct ocrdma_pd *pd; 856 857 if (dmah) 858 return ERR_PTR(-EOPNOTSUPP); 859 860 pd = get_ocrdma_pd(ibpd); 861 862 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) 863 return ERR_PTR(-EINVAL); 864 865 mr = kzalloc_obj(*mr); 866 if (!mr) 867 return ERR_PTR(status); 868 mr->umem = ib_umem_get(ibpd->device, start, len, acc); 869 if (IS_ERR(mr->umem)) { 870 status = -EFAULT; 871 goto umem_err; 872 } 873 status = ocrdma_get_pbl_info( 874 dev, mr, ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE)); 875 if (status) 876 goto umem_err; 877 878 mr->hwmr.pbe_size = PAGE_SIZE; 879 mr->hwmr.va = usr_addr; 880 mr->hwmr.len = len; 881 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; 882 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; 883 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; 884 mr->hwmr.local_rd = 1; 885 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; 886 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); 887 if (status) 888 goto umem_err; 889 build_user_pbes(dev, mr); 890 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); 891 if (status) 892 goto mbx_err; 893 mr->ibmr.lkey = mr->hwmr.lkey; 894 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) 895 mr->ibmr.rkey = mr->hwmr.lkey; 896 897 return &mr->ibmr; 898 899 mbx_err: 900 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 901 umem_err: 902 kfree(mr); 903 return ERR_PTR(status); 904 } 905 906 int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) 907 { 908 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); 909 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); 910 911 (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); 912 913 kfree(mr->pages); 914 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 915 916 /* it could be user registered memory. */ 917 ib_umem_release(mr->umem); 918 kfree(mr); 919 920 /* Don't stop cleanup, in case FW is unresponsive */ 921 if (dev->mqe_ctx.fw_error_state) { 922 pr_err("%s(%d) fw not responding.\n", 923 __func__, dev->id); 924 } 925 return 0; 926 } 927 928 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, 929 struct ib_udata *udata) 930 { 931 int status; 932 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( 933 udata, struct ocrdma_ucontext, ibucontext); 934 struct ocrdma_create_cq_uresp uresp; 935 936 /* this must be user flow! */ 937 if (!udata) 938 return -EINVAL; 939 940 memset(&uresp, 0, sizeof(uresp)); 941 uresp.cq_id = cq->id; 942 uresp.page_size = PAGE_ALIGN(cq->len); 943 uresp.num_pages = 1; 944 uresp.max_hw_cqe = cq->max_hw_cqe; 945 uresp.page_addr[0] = virt_to_phys(cq->va); 946 uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id); 947 uresp.db_page_size = dev->nic_info.db_page_size; 948 uresp.phase_change = cq->phase_change ? 1 : 0; 949 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 950 if (status) { 951 pr_err("%s(%d) copy error cqid=0x%x.\n", 952 __func__, dev->id, cq->id); 953 goto err; 954 } 955 status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); 956 if (status) 957 goto err; 958 status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size); 959 if (status) { 960 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); 961 goto err; 962 } 963 cq->ucontext = uctx; 964 err: 965 return status; 966 } 967 968 int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 969 struct uverbs_attr_bundle *attrs) 970 { 971 struct ib_udata *udata = &attrs->driver_udata; 972 struct ib_device *ibdev = ibcq->device; 973 int entries = attr->cqe; 974 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 975 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 976 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( 977 udata, struct ocrdma_ucontext, ibucontext); 978 u16 pd_id = 0; 979 int status; 980 struct ocrdma_create_cq_ureq ureq; 981 982 if (attr->flags) 983 return -EOPNOTSUPP; 984 985 if (udata) { 986 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) 987 return -EFAULT; 988 } else 989 ureq.dpp_cq = 0; 990 991 spin_lock_init(&cq->cq_lock); 992 spin_lock_init(&cq->comp_handler_lock); 993 INIT_LIST_HEAD(&cq->sq_head); 994 INIT_LIST_HEAD(&cq->rq_head); 995 996 if (udata) 997 pd_id = uctx->cntxt_pd->id; 998 999 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id); 1000 if (status) 1001 return status; 1002 1003 if (udata) { 1004 status = ocrdma_copy_cq_uresp(dev, cq, udata); 1005 if (status) 1006 goto ctx_err; 1007 } 1008 cq->phase = OCRDMA_CQE_VALID; 1009 dev->cq_tbl[cq->id] = cq; 1010 return 0; 1011 1012 ctx_err: 1013 ocrdma_mbx_destroy_cq(dev, cq); 1014 return status; 1015 } 1016 1017 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt, 1018 struct ib_udata *udata) 1019 { 1020 int status = 0; 1021 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 1022 1023 if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) { 1024 status = -EINVAL; 1025 return status; 1026 } 1027 ibcq->cqe = new_cnt; 1028 return status; 1029 } 1030 1031 static void ocrdma_flush_cq(struct ocrdma_cq *cq) 1032 { 1033 int cqe_cnt; 1034 int valid_count = 0; 1035 unsigned long flags; 1036 1037 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); 1038 struct ocrdma_cqe *cqe = NULL; 1039 1040 cqe = cq->va; 1041 cqe_cnt = cq->cqe_cnt; 1042 1043 /* Last irq might have scheduled a polling thread 1044 * sync-up with it before hard flushing. 1045 */ 1046 spin_lock_irqsave(&cq->cq_lock, flags); 1047 while (cqe_cnt) { 1048 if (is_cqe_valid(cq, cqe)) 1049 valid_count++; 1050 cqe++; 1051 cqe_cnt--; 1052 } 1053 ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count); 1054 spin_unlock_irqrestore(&cq->cq_lock, flags); 1055 } 1056 1057 int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) 1058 { 1059 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 1060 struct ocrdma_eq *eq = NULL; 1061 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); 1062 int pdid = 0; 1063 u32 irq, indx; 1064 1065 dev->cq_tbl[cq->id] = NULL; 1066 indx = ocrdma_get_eq_table_index(dev, cq->eqn); 1067 1068 eq = &dev->eq_tbl[indx]; 1069 irq = ocrdma_get_irq(dev, eq); 1070 synchronize_irq(irq); 1071 ocrdma_flush_cq(cq); 1072 1073 ocrdma_mbx_destroy_cq(dev, cq); 1074 if (cq->ucontext) { 1075 pdid = cq->ucontext->cntxt_pd->id; 1076 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, 1077 PAGE_ALIGN(cq->len)); 1078 ocrdma_del_mmap(cq->ucontext, 1079 ocrdma_get_db_addr(dev, pdid), 1080 dev->nic_info.db_page_size); 1081 } 1082 return 0; 1083 } 1084 1085 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) 1086 { 1087 int status = -EINVAL; 1088 1089 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) { 1090 dev->qp_tbl[qp->id] = qp; 1091 status = 0; 1092 } 1093 return status; 1094 } 1095 1096 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) 1097 { 1098 dev->qp_tbl[qp->id] = NULL; 1099 } 1100 1101 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, 1102 struct ib_qp_init_attr *attrs, 1103 struct ib_udata *udata) 1104 { 1105 if ((attrs->qp_type != IB_QPT_GSI) && 1106 (attrs->qp_type != IB_QPT_RC) && 1107 (attrs->qp_type != IB_QPT_UC) && 1108 (attrs->qp_type != IB_QPT_UD)) { 1109 pr_err("%s(%d) unsupported qp type=0x%x requested\n", 1110 __func__, dev->id, attrs->qp_type); 1111 return -EOPNOTSUPP; 1112 } 1113 /* Skip the check for QP1 to support CM size of 128 */ 1114 if ((attrs->qp_type != IB_QPT_GSI) && 1115 (attrs->cap.max_send_wr > dev->attr.max_wqe)) { 1116 pr_err("%s(%d) unsupported send_wr=0x%x requested\n", 1117 __func__, dev->id, attrs->cap.max_send_wr); 1118 pr_err("%s(%d) supported send_wr=0x%x\n", 1119 __func__, dev->id, dev->attr.max_wqe); 1120 return -EINVAL; 1121 } 1122 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { 1123 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n", 1124 __func__, dev->id, attrs->cap.max_recv_wr); 1125 pr_err("%s(%d) supported recv_wr=0x%x\n", 1126 __func__, dev->id, dev->attr.max_rqe); 1127 return -EINVAL; 1128 } 1129 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) { 1130 pr_err("%s(%d) unsupported inline data size=0x%x requested\n", 1131 __func__, dev->id, attrs->cap.max_inline_data); 1132 pr_err("%s(%d) supported inline data size=0x%x\n", 1133 __func__, dev->id, dev->attr.max_inline_data); 1134 return -EINVAL; 1135 } 1136 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) { 1137 pr_err("%s(%d) unsupported send_sge=0x%x requested\n", 1138 __func__, dev->id, attrs->cap.max_send_sge); 1139 pr_err("%s(%d) supported send_sge=0x%x\n", 1140 __func__, dev->id, dev->attr.max_send_sge); 1141 return -EINVAL; 1142 } 1143 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) { 1144 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n", 1145 __func__, dev->id, attrs->cap.max_recv_sge); 1146 pr_err("%s(%d) supported recv_sge=0x%x\n", 1147 __func__, dev->id, dev->attr.max_recv_sge); 1148 return -EINVAL; 1149 } 1150 /* unprivileged user space cannot create special QP */ 1151 if (udata && attrs->qp_type == IB_QPT_GSI) { 1152 pr_err 1153 ("%s(%d) Userspace can't create special QPs of type=0x%x\n", 1154 __func__, dev->id, attrs->qp_type); 1155 return -EINVAL; 1156 } 1157 /* allow creating only one GSI type of QP */ 1158 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) { 1159 pr_err("%s(%d) GSI special QPs already created.\n", 1160 __func__, dev->id); 1161 return -EINVAL; 1162 } 1163 /* verify consumer QPs are not trying to use GSI QP's CQ */ 1164 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) { 1165 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) || 1166 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) { 1167 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n", 1168 __func__, dev->id); 1169 return -EINVAL; 1170 } 1171 } 1172 return 0; 1173 } 1174 1175 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, 1176 struct ib_udata *udata, int dpp_offset, 1177 int dpp_credit_lmt, int srq) 1178 { 1179 int status; 1180 u64 usr_db; 1181 struct ocrdma_create_qp_uresp uresp; 1182 struct ocrdma_pd *pd = qp->pd; 1183 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); 1184 1185 memset(&uresp, 0, sizeof(uresp)); 1186 usr_db = dev->nic_info.unmapped_db + 1187 (pd->id * dev->nic_info.db_page_size); 1188 uresp.qp_id = qp->id; 1189 uresp.sq_dbid = qp->sq.dbid; 1190 uresp.num_sq_pages = 1; 1191 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len); 1192 uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va); 1193 uresp.num_wqe_allocated = qp->sq.max_cnt; 1194 if (!srq) { 1195 uresp.rq_dbid = qp->rq.dbid; 1196 uresp.num_rq_pages = 1; 1197 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len); 1198 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va); 1199 uresp.num_rqe_allocated = qp->rq.max_cnt; 1200 } 1201 uresp.db_page_addr = usr_db; 1202 uresp.db_page_size = dev->nic_info.db_page_size; 1203 uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET; 1204 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; 1205 uresp.db_shift = OCRDMA_DB_RQ_SHIFT; 1206 1207 if (qp->dpp_enabled) { 1208 uresp.dpp_credit = dpp_credit_lmt; 1209 uresp.dpp_offset = dpp_offset; 1210 } 1211 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1212 if (status) { 1213 pr_err("%s(%d) user copy error.\n", __func__, dev->id); 1214 goto err; 1215 } 1216 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0], 1217 uresp.sq_page_size); 1218 if (status) 1219 goto err; 1220 1221 if (!srq) { 1222 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0], 1223 uresp.rq_page_size); 1224 if (status) 1225 goto rq_map_err; 1226 } 1227 return status; 1228 rq_map_err: 1229 ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size); 1230 err: 1231 return status; 1232 } 1233 1234 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, 1235 struct ocrdma_pd *pd) 1236 { 1237 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { 1238 qp->sq_db = dev->nic_info.db + 1239 (pd->id * dev->nic_info.db_page_size) + 1240 OCRDMA_DB_GEN2_SQ_OFFSET; 1241 qp->rq_db = dev->nic_info.db + 1242 (pd->id * dev->nic_info.db_page_size) + 1243 OCRDMA_DB_GEN2_RQ_OFFSET; 1244 } else { 1245 qp->sq_db = dev->nic_info.db + 1246 (pd->id * dev->nic_info.db_page_size) + 1247 OCRDMA_DB_SQ_OFFSET; 1248 qp->rq_db = dev->nic_info.db + 1249 (pd->id * dev->nic_info.db_page_size) + 1250 OCRDMA_DB_RQ_OFFSET; 1251 } 1252 } 1253 1254 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp) 1255 { 1256 qp->wqe_wr_id_tbl = 1257 kzalloc_objs(*(qp->wqe_wr_id_tbl), qp->sq.max_cnt); 1258 if (qp->wqe_wr_id_tbl == NULL) 1259 return -ENOMEM; 1260 qp->rqe_wr_id_tbl = 1261 kcalloc(qp->rq.max_cnt, sizeof(u64), GFP_KERNEL); 1262 if (qp->rqe_wr_id_tbl == NULL) 1263 return -ENOMEM; 1264 1265 return 0; 1266 } 1267 1268 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp, 1269 struct ocrdma_pd *pd, 1270 struct ib_qp_init_attr *attrs) 1271 { 1272 qp->pd = pd; 1273 spin_lock_init(&qp->q_lock); 1274 INIT_LIST_HEAD(&qp->sq_entry); 1275 INIT_LIST_HEAD(&qp->rq_entry); 1276 1277 qp->qp_type = attrs->qp_type; 1278 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR; 1279 qp->max_inline_data = attrs->cap.max_inline_data; 1280 qp->sq.max_sges = attrs->cap.max_send_sge; 1281 qp->rq.max_sges = attrs->cap.max_recv_sge; 1282 qp->state = OCRDMA_QPS_RST; 1283 qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; 1284 } 1285 1286 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev, 1287 struct ib_qp_init_attr *attrs) 1288 { 1289 if (attrs->qp_type == IB_QPT_GSI) { 1290 dev->gsi_qp_created = 1; 1291 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq); 1292 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq); 1293 } 1294 } 1295 1296 int ocrdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, 1297 struct ib_udata *udata) 1298 { 1299 int status; 1300 struct ib_pd *ibpd = ibqp->pd; 1301 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 1302 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 1303 struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device); 1304 struct ocrdma_create_qp_ureq ureq; 1305 u16 dpp_credit_lmt, dpp_offset; 1306 1307 if (attrs->create_flags) 1308 return -EOPNOTSUPP; 1309 1310 status = ocrdma_check_qp_params(ibpd, dev, attrs, udata); 1311 if (status) 1312 goto gen_err; 1313 1314 memset(&ureq, 0, sizeof(ureq)); 1315 if (udata) { 1316 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) 1317 return -EFAULT; 1318 } 1319 ocrdma_set_qp_init_params(qp, pd, attrs); 1320 if (udata == NULL) 1321 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | 1322 OCRDMA_QP_FAST_REG); 1323 1324 mutex_lock(&dev->dev_lock); 1325 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq, 1326 ureq.dpp_cq_id, 1327 &dpp_offset, &dpp_credit_lmt); 1328 if (status) 1329 goto mbx_err; 1330 1331 /* user space QP's wr_id table are managed in library */ 1332 if (udata == NULL) { 1333 status = ocrdma_alloc_wr_id_tbl(qp); 1334 if (status) 1335 goto map_err; 1336 } 1337 1338 status = ocrdma_add_qpn_map(dev, qp); 1339 if (status) 1340 goto map_err; 1341 ocrdma_set_qp_db(dev, qp, pd); 1342 if (udata) { 1343 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset, 1344 dpp_credit_lmt, 1345 (attrs->srq != NULL)); 1346 if (status) 1347 goto cpy_err; 1348 } 1349 ocrdma_store_gsi_qp_cq(dev, attrs); 1350 qp->ibqp.qp_num = qp->id; 1351 mutex_unlock(&dev->dev_lock); 1352 return 0; 1353 1354 cpy_err: 1355 ocrdma_del_qpn_map(dev, qp); 1356 map_err: 1357 ocrdma_mbx_destroy_qp(dev, qp); 1358 mbx_err: 1359 mutex_unlock(&dev->dev_lock); 1360 kfree(qp->wqe_wr_id_tbl); 1361 kfree(qp->rqe_wr_id_tbl); 1362 pr_err("%s(%d) error=%d\n", __func__, dev->id, status); 1363 gen_err: 1364 return status; 1365 } 1366 1367 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1368 int attr_mask) 1369 { 1370 int status = 0; 1371 struct ocrdma_qp *qp; 1372 struct ocrdma_dev *dev; 1373 enum ib_qp_state old_qps; 1374 1375 qp = get_ocrdma_qp(ibqp); 1376 dev = get_ocrdma_dev(ibqp->device); 1377 if (attr_mask & IB_QP_STATE) 1378 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); 1379 /* if new and previous states are same hw doesn't need to 1380 * know about it. 1381 */ 1382 if (status < 0) 1383 return status; 1384 return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask); 1385 } 1386 1387 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1388 int attr_mask, struct ib_udata *udata) 1389 { 1390 unsigned long flags; 1391 int status = -EINVAL; 1392 struct ocrdma_qp *qp; 1393 struct ocrdma_dev *dev; 1394 enum ib_qp_state old_qps, new_qps; 1395 1396 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 1397 return -EOPNOTSUPP; 1398 1399 qp = get_ocrdma_qp(ibqp); 1400 dev = get_ocrdma_dev(ibqp->device); 1401 1402 /* syncronize with multiple context trying to change, retrive qps */ 1403 mutex_lock(&dev->dev_lock); 1404 /* syncronize with wqe, rqe posting and cqe processing contexts */ 1405 spin_lock_irqsave(&qp->q_lock, flags); 1406 old_qps = get_ibqp_state(qp->state); 1407 if (attr_mask & IB_QP_STATE) 1408 new_qps = attr->qp_state; 1409 else 1410 new_qps = old_qps; 1411 spin_unlock_irqrestore(&qp->q_lock, flags); 1412 1413 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { 1414 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n" 1415 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", 1416 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, 1417 old_qps, new_qps); 1418 goto param_err; 1419 } 1420 1421 status = _ocrdma_modify_qp(ibqp, attr, attr_mask); 1422 if (status > 0) 1423 status = 0; 1424 param_err: 1425 mutex_unlock(&dev->dev_lock); 1426 return status; 1427 } 1428 1429 static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu) 1430 { 1431 switch (mtu) { 1432 case 256: 1433 return IB_MTU_256; 1434 case 512: 1435 return IB_MTU_512; 1436 case 1024: 1437 return IB_MTU_1024; 1438 case 2048: 1439 return IB_MTU_2048; 1440 case 4096: 1441 return IB_MTU_4096; 1442 default: 1443 return IB_MTU_1024; 1444 } 1445 } 1446 1447 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags) 1448 { 1449 int ib_qp_acc_flags = 0; 1450 1451 if (qp_cap_flags & OCRDMA_QP_INB_WR) 1452 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE; 1453 if (qp_cap_flags & OCRDMA_QP_INB_RD) 1454 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE; 1455 return ib_qp_acc_flags; 1456 } 1457 1458 int ocrdma_query_qp(struct ib_qp *ibqp, 1459 struct ib_qp_attr *qp_attr, 1460 int attr_mask, struct ib_qp_init_attr *qp_init_attr) 1461 { 1462 int status; 1463 u32 qp_state; 1464 struct ocrdma_qp_params params; 1465 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 1466 struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device); 1467 1468 memset(¶ms, 0, sizeof(params)); 1469 mutex_lock(&dev->dev_lock); 1470 status = ocrdma_mbx_query_qp(dev, qp, ¶ms); 1471 mutex_unlock(&dev->dev_lock); 1472 if (status) 1473 goto mbx_err; 1474 if (qp->qp_type == IB_QPT_UD) 1475 qp_attr->qkey = params.qkey; 1476 qp_attr->path_mtu = 1477 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & 1478 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> 1479 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT; 1480 qp_attr->path_mig_state = IB_MIG_MIGRATED; 1481 qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK; 1482 qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK; 1483 qp_attr->dest_qp_num = 1484 params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK; 1485 1486 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags); 1487 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1; 1488 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1; 1489 qp_attr->cap.max_send_sge = qp->sq.max_sges; 1490 qp_attr->cap.max_recv_sge = qp->rq.max_sges; 1491 qp_attr->cap.max_inline_data = qp->max_inline_data; 1492 qp_init_attr->cap = qp_attr->cap; 1493 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; 1494 1495 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, 1496 params.rnt_rc_sl_fl & 1497 OCRDMA_QP_PARAMS_FLOW_LABEL_MASK, 1498 qp->sgid_idx, 1499 (params.hop_lmt_rq_psn & 1500 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> 1501 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT, 1502 (params.tclass_sq_psn & 1503 OCRDMA_QP_PARAMS_TCLASS_MASK) >> 1504 OCRDMA_QP_PARAMS_TCLASS_SHIFT); 1505 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid[0]); 1506 1507 rdma_ah_set_port_num(&qp_attr->ah_attr, 1); 1508 rdma_ah_set_sl(&qp_attr->ah_attr, (params.rnt_rc_sl_fl & 1509 OCRDMA_QP_PARAMS_SL_MASK) >> 1510 OCRDMA_QP_PARAMS_SL_SHIFT); 1511 qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn & 1512 OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >> 1513 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT; 1514 qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn & 1515 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >> 1516 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT; 1517 qp_attr->retry_cnt = 1518 (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >> 1519 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT; 1520 qp_attr->min_rnr_timer = 0; 1521 qp_attr->pkey_index = 0; 1522 qp_attr->port_num = 1; 1523 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0); 1524 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0); 1525 qp_attr->alt_pkey_index = 0; 1526 qp_attr->alt_port_num = 0; 1527 qp_attr->alt_timeout = 0; 1528 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); 1529 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> 1530 OCRDMA_QP_PARAMS_STATE_SHIFT; 1531 qp_attr->qp_state = get_ibqp_state(qp_state); 1532 qp_attr->cur_qp_state = qp_attr->qp_state; 1533 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; 1534 qp_attr->max_dest_rd_atomic = 1535 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; 1536 qp_attr->max_rd_atomic = 1537 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; 1538 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & 1539 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; 1540 /* Sync driver QP state with FW */ 1541 ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL); 1542 mbx_err: 1543 return status; 1544 } 1545 1546 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx) 1547 { 1548 unsigned int i = idx / 32; 1549 u32 mask = (1U << (idx % 32)); 1550 1551 srq->idx_bit_fields[i] ^= mask; 1552 } 1553 1554 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) 1555 { 1556 return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt; 1557 } 1558 1559 static int is_hw_sq_empty(struct ocrdma_qp *qp) 1560 { 1561 return (qp->sq.tail == qp->sq.head); 1562 } 1563 1564 static int is_hw_rq_empty(struct ocrdma_qp *qp) 1565 { 1566 return (qp->rq.tail == qp->rq.head); 1567 } 1568 1569 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q) 1570 { 1571 return q->va + (q->head * q->entry_size); 1572 } 1573 1574 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q, 1575 u32 idx) 1576 { 1577 return q->va + (idx * q->entry_size); 1578 } 1579 1580 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q) 1581 { 1582 q->head = (q->head + 1) & q->max_wqe_idx; 1583 } 1584 1585 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q) 1586 { 1587 q->tail = (q->tail + 1) & q->max_wqe_idx; 1588 } 1589 1590 /* discard the cqe for a given QP */ 1591 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) 1592 { 1593 unsigned long cq_flags; 1594 unsigned long flags; 1595 u32 cur_getp, stop_getp; 1596 struct ocrdma_cqe *cqe; 1597 u32 qpn = 0, wqe_idx = 0; 1598 1599 spin_lock_irqsave(&cq->cq_lock, cq_flags); 1600 1601 /* traverse through the CQEs in the hw CQ, 1602 * find the matching CQE for a given qp, 1603 * mark the matching one discarded by clearing qpn. 1604 * ring the doorbell in the poll_cq() as 1605 * we don't complete out of order cqe. 1606 */ 1607 1608 cur_getp = cq->getp; 1609 /* find upto when do we reap the cq. */ 1610 stop_getp = cur_getp; 1611 do { 1612 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp))) 1613 break; 1614 1615 cqe = cq->va + cur_getp; 1616 /* if (a) done reaping whole hw cq, or 1617 * (b) qp_xq becomes empty. 1618 * then exit 1619 */ 1620 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; 1621 /* if previously discarded cqe found, skip that too. */ 1622 /* check for matching qp */ 1623 if (qpn == 0 || qpn != qp->id) 1624 goto skip_cqe; 1625 1626 if (is_cqe_for_sq(cqe)) { 1627 ocrdma_hwq_inc_tail(&qp->sq); 1628 } else { 1629 if (qp->srq) { 1630 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> 1631 OCRDMA_CQE_BUFTAG_SHIFT) & 1632 qp->srq->rq.max_wqe_idx; 1633 BUG_ON(wqe_idx < 1); 1634 spin_lock_irqsave(&qp->srq->q_lock, flags); 1635 ocrdma_hwq_inc_tail(&qp->srq->rq); 1636 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1); 1637 spin_unlock_irqrestore(&qp->srq->q_lock, flags); 1638 1639 } else { 1640 ocrdma_hwq_inc_tail(&qp->rq); 1641 } 1642 } 1643 /* mark cqe discarded so that it is not picked up later 1644 * in the poll_cq(). 1645 */ 1646 cqe->cmn.qpn = 0; 1647 skip_cqe: 1648 cur_getp = (cur_getp + 1) % cq->max_hw_cqe; 1649 } while (cur_getp != stop_getp); 1650 spin_unlock_irqrestore(&cq->cq_lock, cq_flags); 1651 } 1652 1653 void ocrdma_del_flush_qp(struct ocrdma_qp *qp) 1654 { 1655 int found = false; 1656 unsigned long flags; 1657 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 1658 /* sync with any active CQ poll */ 1659 1660 spin_lock_irqsave(&dev->flush_q_lock, flags); 1661 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); 1662 if (found) 1663 list_del(&qp->sq_entry); 1664 if (!qp->srq) { 1665 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); 1666 if (found) 1667 list_del(&qp->rq_entry); 1668 } 1669 spin_unlock_irqrestore(&dev->flush_q_lock, flags); 1670 } 1671 1672 int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) 1673 { 1674 struct ocrdma_pd *pd; 1675 struct ocrdma_qp *qp; 1676 struct ocrdma_dev *dev; 1677 struct ib_qp_attr attrs; 1678 int attr_mask; 1679 unsigned long flags; 1680 1681 qp = get_ocrdma_qp(ibqp); 1682 dev = get_ocrdma_dev(ibqp->device); 1683 1684 pd = qp->pd; 1685 1686 /* change the QP state to ERROR */ 1687 if (qp->state != OCRDMA_QPS_RST) { 1688 attrs.qp_state = IB_QPS_ERR; 1689 attr_mask = IB_QP_STATE; 1690 _ocrdma_modify_qp(ibqp, &attrs, attr_mask); 1691 } 1692 /* ensure that CQEs for newly created QP (whose id may be same with 1693 * one which just getting destroyed are same), dont get 1694 * discarded until the old CQEs are discarded. 1695 */ 1696 mutex_lock(&dev->dev_lock); 1697 (void) ocrdma_mbx_destroy_qp(dev, qp); 1698 1699 /* 1700 * acquire CQ lock while destroy is in progress, in order to 1701 * protect against proessing in-flight CQEs for this QP. 1702 */ 1703 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags); 1704 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) { 1705 spin_lock(&qp->rq_cq->cq_lock); 1706 ocrdma_del_qpn_map(dev, qp); 1707 spin_unlock(&qp->rq_cq->cq_lock); 1708 } else { 1709 ocrdma_del_qpn_map(dev, qp); 1710 } 1711 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags); 1712 1713 if (!pd->uctx) { 1714 ocrdma_discard_cqes(qp, qp->sq_cq); 1715 ocrdma_discard_cqes(qp, qp->rq_cq); 1716 } 1717 mutex_unlock(&dev->dev_lock); 1718 1719 if (pd->uctx) { 1720 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, 1721 PAGE_ALIGN(qp->sq.len)); 1722 if (!qp->srq) 1723 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, 1724 PAGE_ALIGN(qp->rq.len)); 1725 } 1726 1727 ocrdma_del_flush_qp(qp); 1728 1729 kfree(qp->wqe_wr_id_tbl); 1730 kfree(qp->rqe_wr_id_tbl); 1731 return 0; 1732 } 1733 1734 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, 1735 struct ib_udata *udata) 1736 { 1737 int status; 1738 struct ocrdma_create_srq_uresp uresp; 1739 1740 memset(&uresp, 0, sizeof(uresp)); 1741 uresp.rq_dbid = srq->rq.dbid; 1742 uresp.num_rq_pages = 1; 1743 uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va); 1744 uresp.rq_page_size = srq->rq.len; 1745 uresp.db_page_addr = dev->nic_info.unmapped_db + 1746 (srq->pd->id * dev->nic_info.db_page_size); 1747 uresp.db_page_size = dev->nic_info.db_page_size; 1748 uresp.num_rqe_allocated = srq->rq.max_cnt; 1749 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { 1750 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; 1751 uresp.db_shift = 24; 1752 } else { 1753 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; 1754 uresp.db_shift = 16; 1755 } 1756 1757 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1758 if (status) 1759 return status; 1760 status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0], 1761 uresp.rq_page_size); 1762 if (status) 1763 return status; 1764 return status; 1765 } 1766 1767 int ocrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr, 1768 struct ib_udata *udata) 1769 { 1770 int status; 1771 struct ocrdma_pd *pd = get_ocrdma_pd(ibsrq->pd); 1772 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device); 1773 struct ocrdma_srq *srq = get_ocrdma_srq(ibsrq); 1774 1775 if (init_attr->srq_type != IB_SRQT_BASIC) 1776 return -EOPNOTSUPP; 1777 1778 if (init_attr->attr.max_sge > dev->attr.max_recv_sge) 1779 return -EINVAL; 1780 if (init_attr->attr.max_wr > dev->attr.max_rqe) 1781 return -EINVAL; 1782 1783 spin_lock_init(&srq->q_lock); 1784 srq->pd = pd; 1785 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size); 1786 status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd); 1787 if (status) 1788 return status; 1789 1790 if (!udata) { 1791 srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64), 1792 GFP_KERNEL); 1793 if (!srq->rqe_wr_id_tbl) { 1794 status = -ENOMEM; 1795 goto arm_err; 1796 } 1797 1798 srq->bit_fields_len = (srq->rq.max_cnt / 32) + 1799 (srq->rq.max_cnt % 32 ? 1 : 0); 1800 srq->idx_bit_fields = 1801 kmalloc_array(srq->bit_fields_len, sizeof(u32), 1802 GFP_KERNEL); 1803 if (!srq->idx_bit_fields) { 1804 status = -ENOMEM; 1805 goto arm_err; 1806 } 1807 memset(srq->idx_bit_fields, 0xff, 1808 srq->bit_fields_len * sizeof(u32)); 1809 } 1810 1811 if (init_attr->attr.srq_limit) { 1812 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr); 1813 if (status) 1814 goto arm_err; 1815 } 1816 1817 if (udata) { 1818 status = ocrdma_copy_srq_uresp(dev, srq, udata); 1819 if (status) 1820 goto arm_err; 1821 } 1822 1823 return 0; 1824 1825 arm_err: 1826 ocrdma_mbx_destroy_srq(dev, srq); 1827 kfree(srq->rqe_wr_id_tbl); 1828 kfree(srq->idx_bit_fields); 1829 return status; 1830 } 1831 1832 int ocrdma_modify_srq(struct ib_srq *ibsrq, 1833 struct ib_srq_attr *srq_attr, 1834 enum ib_srq_attr_mask srq_attr_mask, 1835 struct ib_udata *udata) 1836 { 1837 int status; 1838 struct ocrdma_srq *srq; 1839 1840 srq = get_ocrdma_srq(ibsrq); 1841 if (srq_attr_mask & IB_SRQ_MAX_WR) 1842 status = -EINVAL; 1843 else 1844 status = ocrdma_mbx_modify_srq(srq, srq_attr); 1845 return status; 1846 } 1847 1848 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) 1849 { 1850 struct ocrdma_srq *srq; 1851 1852 srq = get_ocrdma_srq(ibsrq); 1853 return ocrdma_mbx_query_srq(srq, srq_attr); 1854 } 1855 1856 int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) 1857 { 1858 struct ocrdma_srq *srq; 1859 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device); 1860 1861 srq = get_ocrdma_srq(ibsrq); 1862 1863 ocrdma_mbx_destroy_srq(dev, srq); 1864 1865 if (srq->pd->uctx) 1866 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, 1867 PAGE_ALIGN(srq->rq.len)); 1868 1869 kfree(srq->idx_bit_fields); 1870 kfree(srq->rqe_wr_id_tbl); 1871 return 0; 1872 } 1873 1874 /* unprivileged verbs and their support functions. */ 1875 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, 1876 struct ocrdma_hdr_wqe *hdr, 1877 const struct ib_send_wr *wr) 1878 { 1879 struct ocrdma_ewqe_ud_hdr *ud_hdr = 1880 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1); 1881 struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah); 1882 1883 ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn; 1884 if (qp->qp_type == IB_QPT_GSI) 1885 ud_hdr->qkey = qp->qkey; 1886 else 1887 ud_hdr->qkey = ud_wr(wr)->remote_qkey; 1888 ud_hdr->rsvd_ahid = ah->id; 1889 ud_hdr->hdr_type = ah->hdr_type; 1890 if (ah->av->valid & OCRDMA_AV_VLAN_VALID) 1891 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT); 1892 } 1893 1894 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, 1895 struct ocrdma_sge *sge, int num_sge, 1896 struct ib_sge *sg_list) 1897 { 1898 int i; 1899 1900 for (i = 0; i < num_sge; i++) { 1901 sge[i].lrkey = sg_list[i].lkey; 1902 sge[i].addr_lo = sg_list[i].addr; 1903 sge[i].addr_hi = upper_32_bits(sg_list[i].addr); 1904 sge[i].len = sg_list[i].length; 1905 hdr->total_len += sg_list[i].length; 1906 } 1907 if (num_sge == 0) 1908 memset(sge, 0, sizeof(*sge)); 1909 } 1910 1911 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge) 1912 { 1913 uint32_t total_len = 0, i; 1914 1915 for (i = 0; i < num_sge; i++) 1916 total_len += sg_list[i].length; 1917 return total_len; 1918 } 1919 1920 1921 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, 1922 struct ocrdma_hdr_wqe *hdr, 1923 struct ocrdma_sge *sge, 1924 const struct ib_send_wr *wr, u32 wqe_size) 1925 { 1926 int i; 1927 char *dpp_addr; 1928 1929 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) { 1930 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge); 1931 if (unlikely(hdr->total_len > qp->max_inline_data)) { 1932 pr_err("%s() supported_len=0x%x,\n" 1933 " unsupported len req=0x%x\n", __func__, 1934 qp->max_inline_data, hdr->total_len); 1935 return -EINVAL; 1936 } 1937 dpp_addr = (char *)sge; 1938 for (i = 0; i < wr->num_sge; i++) { 1939 memcpy(dpp_addr, 1940 (void *)(unsigned long)wr->sg_list[i].addr, 1941 wr->sg_list[i].length); 1942 dpp_addr += wr->sg_list[i].length; 1943 } 1944 1945 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES); 1946 if (0 == hdr->total_len) 1947 wqe_size += sizeof(struct ocrdma_sge); 1948 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT); 1949 } else { 1950 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); 1951 if (wr->num_sge) 1952 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge)); 1953 else 1954 wqe_size += sizeof(struct ocrdma_sge); 1955 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); 1956 } 1957 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); 1958 return 0; 1959 } 1960 1961 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, 1962 const struct ib_send_wr *wr) 1963 { 1964 struct ocrdma_sge *sge; 1965 u32 wqe_size = sizeof(*hdr); 1966 1967 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { 1968 ocrdma_build_ud_hdr(qp, hdr, wr); 1969 sge = (struct ocrdma_sge *)(hdr + 2); 1970 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr); 1971 } else { 1972 sge = (struct ocrdma_sge *)(hdr + 1); 1973 } 1974 1975 return ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); 1976 } 1977 1978 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, 1979 const struct ib_send_wr *wr) 1980 { 1981 int status; 1982 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); 1983 struct ocrdma_sge *sge = ext_rw + 1; 1984 u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw); 1985 1986 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); 1987 if (status) 1988 return status; 1989 ext_rw->addr_lo = rdma_wr(wr)->remote_addr; 1990 ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr); 1991 ext_rw->lrkey = rdma_wr(wr)->rkey; 1992 ext_rw->len = hdr->total_len; 1993 return 0; 1994 } 1995 1996 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, 1997 const struct ib_send_wr *wr) 1998 { 1999 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); 2000 struct ocrdma_sge *sge = ext_rw + 1; 2001 u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) + 2002 sizeof(struct ocrdma_hdr_wqe); 2003 2004 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); 2005 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); 2006 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT); 2007 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); 2008 2009 ext_rw->addr_lo = rdma_wr(wr)->remote_addr; 2010 ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr); 2011 ext_rw->lrkey = rdma_wr(wr)->rkey; 2012 ext_rw->len = hdr->total_len; 2013 } 2014 2015 static int get_encoded_page_size(int pg_sz) 2016 { 2017 /* Max size is 256M 4096 << 16 */ 2018 int i = 0; 2019 for (; i < 17; i++) 2020 if (pg_sz == (4096 << i)) 2021 break; 2022 return i; 2023 } 2024 2025 static int ocrdma_build_reg(struct ocrdma_qp *qp, 2026 struct ocrdma_hdr_wqe *hdr, 2027 const struct ib_reg_wr *wr) 2028 { 2029 u64 fbo; 2030 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); 2031 struct ocrdma_mr *mr = get_ocrdma_mr(wr->mr); 2032 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; 2033 struct ocrdma_pbe *pbe; 2034 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr); 2035 int num_pbes = 0, i; 2036 2037 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); 2038 2039 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); 2040 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); 2041 2042 if (wr->access & IB_ACCESS_LOCAL_WRITE) 2043 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR; 2044 if (wr->access & IB_ACCESS_REMOTE_WRITE) 2045 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR; 2046 if (wr->access & IB_ACCESS_REMOTE_READ) 2047 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD; 2048 hdr->lkey = wr->key; 2049 hdr->total_len = mr->ibmr.length; 2050 2051 fbo = mr->ibmr.iova - mr->pages[0]; 2052 2053 fast_reg->va_hi = upper_32_bits(mr->ibmr.iova); 2054 fast_reg->va_lo = (u32) (mr->ibmr.iova & 0xffffffff); 2055 fast_reg->fbo_hi = upper_32_bits(fbo); 2056 fast_reg->fbo_lo = (u32) fbo & 0xffffffff; 2057 fast_reg->num_sges = mr->npages; 2058 fast_reg->size_sge = get_encoded_page_size(mr->ibmr.page_size); 2059 2060 pbe = pbl_tbl->va; 2061 for (i = 0; i < mr->npages; i++) { 2062 u64 buf_addr = mr->pages[i]; 2063 2064 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK)); 2065 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr)); 2066 num_pbes += 1; 2067 pbe++; 2068 2069 /* if the pbl is full storing the pbes, 2070 * move to next pbl. 2071 */ 2072 if (num_pbes == (mr->hwmr.pbl_size/sizeof(u64))) { 2073 pbl_tbl++; 2074 pbe = (struct ocrdma_pbe *)pbl_tbl->va; 2075 } 2076 } 2077 2078 return 0; 2079 } 2080 2081 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp) 2082 { 2083 u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT); 2084 2085 iowrite32(val, qp->sq_db); 2086 } 2087 2088 int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 2089 const struct ib_send_wr **bad_wr) 2090 { 2091 int status = 0; 2092 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 2093 struct ocrdma_hdr_wqe *hdr; 2094 unsigned long flags; 2095 2096 spin_lock_irqsave(&qp->q_lock, flags); 2097 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) { 2098 spin_unlock_irqrestore(&qp->q_lock, flags); 2099 *bad_wr = wr; 2100 return -EINVAL; 2101 } 2102 2103 while (wr) { 2104 if (qp->qp_type == IB_QPT_UD && 2105 (wr->opcode != IB_WR_SEND && 2106 wr->opcode != IB_WR_SEND_WITH_IMM)) { 2107 *bad_wr = wr; 2108 status = -EINVAL; 2109 break; 2110 } 2111 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || 2112 wr->num_sge > qp->sq.max_sges) { 2113 *bad_wr = wr; 2114 status = -ENOMEM; 2115 break; 2116 } 2117 hdr = ocrdma_hwq_head(&qp->sq); 2118 hdr->cw = 0; 2119 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled) 2120 hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); 2121 if (wr->send_flags & IB_SEND_FENCE) 2122 hdr->cw |= 2123 (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT); 2124 if (wr->send_flags & IB_SEND_SOLICITED) 2125 hdr->cw |= 2126 (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT); 2127 hdr->total_len = 0; 2128 switch (wr->opcode) { 2129 case IB_WR_SEND_WITH_IMM: 2130 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); 2131 hdr->immdt = ntohl(wr->ex.imm_data); 2132 fallthrough; 2133 case IB_WR_SEND: 2134 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); 2135 ocrdma_build_send(qp, hdr, wr); 2136 break; 2137 case IB_WR_SEND_WITH_INV: 2138 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); 2139 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); 2140 hdr->lkey = wr->ex.invalidate_rkey; 2141 status = ocrdma_build_send(qp, hdr, wr); 2142 break; 2143 case IB_WR_RDMA_WRITE_WITH_IMM: 2144 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); 2145 hdr->immdt = ntohl(wr->ex.imm_data); 2146 fallthrough; 2147 case IB_WR_RDMA_WRITE: 2148 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); 2149 status = ocrdma_build_write(qp, hdr, wr); 2150 break; 2151 case IB_WR_RDMA_READ: 2152 ocrdma_build_read(qp, hdr, wr); 2153 break; 2154 case IB_WR_LOCAL_INV: 2155 hdr->cw |= 2156 (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT); 2157 hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) + 2158 sizeof(struct ocrdma_sge)) / 2159 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT; 2160 hdr->lkey = wr->ex.invalidate_rkey; 2161 break; 2162 case IB_WR_REG_MR: 2163 status = ocrdma_build_reg(qp, hdr, reg_wr(wr)); 2164 break; 2165 default: 2166 status = -EINVAL; 2167 break; 2168 } 2169 if (status) { 2170 *bad_wr = wr; 2171 break; 2172 } 2173 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled) 2174 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1; 2175 else 2176 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0; 2177 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id; 2178 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) & 2179 OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE); 2180 /* make sure wqe is written before adapter can access it */ 2181 wmb(); 2182 /* inform hw to start processing it */ 2183 ocrdma_ring_sq_db(qp); 2184 2185 /* update pointer, counter for next wr */ 2186 ocrdma_hwq_inc_head(&qp->sq); 2187 wr = wr->next; 2188 } 2189 spin_unlock_irqrestore(&qp->q_lock, flags); 2190 return status; 2191 } 2192 2193 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) 2194 { 2195 u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT); 2196 2197 iowrite32(val, qp->rq_db); 2198 } 2199 2200 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, 2201 const struct ib_recv_wr *wr, u16 tag) 2202 { 2203 u32 wqe_size = 0; 2204 struct ocrdma_sge *sge; 2205 if (wr->num_sge) 2206 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe); 2207 else 2208 wqe_size = sizeof(*sge) + sizeof(*rqe); 2209 2210 rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) << 2211 OCRDMA_WQE_SIZE_SHIFT); 2212 rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); 2213 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); 2214 rqe->total_len = 0; 2215 rqe->rsvd_tag = tag; 2216 sge = (struct ocrdma_sge *)(rqe + 1); 2217 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list); 2218 ocrdma_cpu_to_le32(rqe, wqe_size); 2219 } 2220 2221 int ocrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 2222 const struct ib_recv_wr **bad_wr) 2223 { 2224 int status = 0; 2225 unsigned long flags; 2226 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 2227 struct ocrdma_hdr_wqe *rqe; 2228 2229 spin_lock_irqsave(&qp->q_lock, flags); 2230 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) { 2231 spin_unlock_irqrestore(&qp->q_lock, flags); 2232 *bad_wr = wr; 2233 return -EINVAL; 2234 } 2235 while (wr) { 2236 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 || 2237 wr->num_sge > qp->rq.max_sges) { 2238 *bad_wr = wr; 2239 status = -ENOMEM; 2240 break; 2241 } 2242 rqe = ocrdma_hwq_head(&qp->rq); 2243 ocrdma_build_rqe(rqe, wr, 0); 2244 2245 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id; 2246 /* make sure rqe is written before adapter can access it */ 2247 wmb(); 2248 2249 /* inform hw to start processing it */ 2250 ocrdma_ring_rq_db(qp); 2251 2252 /* update pointer, counter for next wr */ 2253 ocrdma_hwq_inc_head(&qp->rq); 2254 wr = wr->next; 2255 } 2256 spin_unlock_irqrestore(&qp->q_lock, flags); 2257 return status; 2258 } 2259 2260 /* cqe for srq's rqe can potentially arrive out of order. 2261 * index gives the entry in the shadow table where to store 2262 * the wr_id. tag/index is returned in cqe to reference back 2263 * for a given rqe. 2264 */ 2265 static int ocrdma_srq_get_idx(struct ocrdma_srq *srq) 2266 { 2267 int row = 0; 2268 int indx = 0; 2269 2270 for (row = 0; row < srq->bit_fields_len; row++) { 2271 if (srq->idx_bit_fields[row]) { 2272 indx = ffs(srq->idx_bit_fields[row]); 2273 indx = (row * 32) + (indx - 1); 2274 BUG_ON(indx >= srq->rq.max_cnt); 2275 ocrdma_srq_toggle_bit(srq, indx); 2276 break; 2277 } 2278 } 2279 2280 BUG_ON(row == srq->bit_fields_len); 2281 return indx + 1; /* Use from index 1 */ 2282 } 2283 2284 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq) 2285 { 2286 u32 val = srq->rq.dbid | (1 << 16); 2287 2288 iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET); 2289 } 2290 2291 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, 2292 const struct ib_recv_wr **bad_wr) 2293 { 2294 int status = 0; 2295 unsigned long flags; 2296 struct ocrdma_srq *srq; 2297 struct ocrdma_hdr_wqe *rqe; 2298 u16 tag; 2299 2300 srq = get_ocrdma_srq(ibsrq); 2301 2302 spin_lock_irqsave(&srq->q_lock, flags); 2303 while (wr) { 2304 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 || 2305 wr->num_sge > srq->rq.max_sges) { 2306 status = -ENOMEM; 2307 *bad_wr = wr; 2308 break; 2309 } 2310 tag = ocrdma_srq_get_idx(srq); 2311 rqe = ocrdma_hwq_head(&srq->rq); 2312 ocrdma_build_rqe(rqe, wr, tag); 2313 2314 srq->rqe_wr_id_tbl[tag] = wr->wr_id; 2315 /* make sure rqe is written before adapter can perform DMA */ 2316 wmb(); 2317 /* inform hw to start processing it */ 2318 ocrdma_ring_srq_db(srq); 2319 /* update pointer, counter for next wr */ 2320 ocrdma_hwq_inc_head(&srq->rq); 2321 wr = wr->next; 2322 } 2323 spin_unlock_irqrestore(&srq->q_lock, flags); 2324 return status; 2325 } 2326 2327 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status) 2328 { 2329 enum ib_wc_status ibwc_status; 2330 2331 switch (status) { 2332 case OCRDMA_CQE_GENERAL_ERR: 2333 ibwc_status = IB_WC_GENERAL_ERR; 2334 break; 2335 case OCRDMA_CQE_LOC_LEN_ERR: 2336 ibwc_status = IB_WC_LOC_LEN_ERR; 2337 break; 2338 case OCRDMA_CQE_LOC_QP_OP_ERR: 2339 ibwc_status = IB_WC_LOC_QP_OP_ERR; 2340 break; 2341 case OCRDMA_CQE_LOC_EEC_OP_ERR: 2342 ibwc_status = IB_WC_LOC_EEC_OP_ERR; 2343 break; 2344 case OCRDMA_CQE_LOC_PROT_ERR: 2345 ibwc_status = IB_WC_LOC_PROT_ERR; 2346 break; 2347 case OCRDMA_CQE_WR_FLUSH_ERR: 2348 ibwc_status = IB_WC_WR_FLUSH_ERR; 2349 break; 2350 case OCRDMA_CQE_MW_BIND_ERR: 2351 ibwc_status = IB_WC_MW_BIND_ERR; 2352 break; 2353 case OCRDMA_CQE_BAD_RESP_ERR: 2354 ibwc_status = IB_WC_BAD_RESP_ERR; 2355 break; 2356 case OCRDMA_CQE_LOC_ACCESS_ERR: 2357 ibwc_status = IB_WC_LOC_ACCESS_ERR; 2358 break; 2359 case OCRDMA_CQE_REM_INV_REQ_ERR: 2360 ibwc_status = IB_WC_REM_INV_REQ_ERR; 2361 break; 2362 case OCRDMA_CQE_REM_ACCESS_ERR: 2363 ibwc_status = IB_WC_REM_ACCESS_ERR; 2364 break; 2365 case OCRDMA_CQE_REM_OP_ERR: 2366 ibwc_status = IB_WC_REM_OP_ERR; 2367 break; 2368 case OCRDMA_CQE_RETRY_EXC_ERR: 2369 ibwc_status = IB_WC_RETRY_EXC_ERR; 2370 break; 2371 case OCRDMA_CQE_RNR_RETRY_EXC_ERR: 2372 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR; 2373 break; 2374 case OCRDMA_CQE_LOC_RDD_VIOL_ERR: 2375 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR; 2376 break; 2377 case OCRDMA_CQE_REM_INV_RD_REQ_ERR: 2378 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR; 2379 break; 2380 case OCRDMA_CQE_REM_ABORT_ERR: 2381 ibwc_status = IB_WC_REM_ABORT_ERR; 2382 break; 2383 case OCRDMA_CQE_INV_EECN_ERR: 2384 ibwc_status = IB_WC_INV_EECN_ERR; 2385 break; 2386 case OCRDMA_CQE_INV_EEC_STATE_ERR: 2387 ibwc_status = IB_WC_INV_EEC_STATE_ERR; 2388 break; 2389 case OCRDMA_CQE_FATAL_ERR: 2390 ibwc_status = IB_WC_FATAL_ERR; 2391 break; 2392 case OCRDMA_CQE_RESP_TIMEOUT_ERR: 2393 ibwc_status = IB_WC_RESP_TIMEOUT_ERR; 2394 break; 2395 default: 2396 ibwc_status = IB_WC_GENERAL_ERR; 2397 break; 2398 } 2399 return ibwc_status; 2400 } 2401 2402 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, 2403 u32 wqe_idx) 2404 { 2405 struct ocrdma_hdr_wqe *hdr; 2406 struct ocrdma_sge *rw; 2407 int opcode; 2408 2409 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx); 2410 2411 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid; 2412 /* Undo the hdr->cw swap */ 2413 opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK; 2414 switch (opcode) { 2415 case OCRDMA_WRITE: 2416 ibwc->opcode = IB_WC_RDMA_WRITE; 2417 break; 2418 case OCRDMA_READ: 2419 rw = (struct ocrdma_sge *)(hdr + 1); 2420 ibwc->opcode = IB_WC_RDMA_READ; 2421 ibwc->byte_len = rw->len; 2422 break; 2423 case OCRDMA_SEND: 2424 ibwc->opcode = IB_WC_SEND; 2425 break; 2426 case OCRDMA_FR_MR: 2427 ibwc->opcode = IB_WC_REG_MR; 2428 break; 2429 case OCRDMA_LKEY_INV: 2430 ibwc->opcode = IB_WC_LOCAL_INV; 2431 break; 2432 default: 2433 ibwc->status = IB_WC_GENERAL_ERR; 2434 pr_err("%s() invalid opcode received = 0x%x\n", 2435 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); 2436 break; 2437 } 2438 } 2439 2440 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, 2441 struct ocrdma_cqe *cqe) 2442 { 2443 if (is_cqe_for_sq(cqe)) { 2444 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2445 cqe->flags_status_srcqpn) & 2446 ~OCRDMA_CQE_STATUS_MASK); 2447 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2448 cqe->flags_status_srcqpn) | 2449 (OCRDMA_CQE_WR_FLUSH_ERR << 2450 OCRDMA_CQE_STATUS_SHIFT)); 2451 } else { 2452 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { 2453 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2454 cqe->flags_status_srcqpn) & 2455 ~OCRDMA_CQE_UD_STATUS_MASK); 2456 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2457 cqe->flags_status_srcqpn) | 2458 (OCRDMA_CQE_WR_FLUSH_ERR << 2459 OCRDMA_CQE_UD_STATUS_SHIFT)); 2460 } else { 2461 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2462 cqe->flags_status_srcqpn) & 2463 ~OCRDMA_CQE_STATUS_MASK); 2464 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2465 cqe->flags_status_srcqpn) | 2466 (OCRDMA_CQE_WR_FLUSH_ERR << 2467 OCRDMA_CQE_STATUS_SHIFT)); 2468 } 2469 } 2470 } 2471 2472 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, 2473 struct ocrdma_qp *qp, int status) 2474 { 2475 bool expand = false; 2476 2477 ibwc->byte_len = 0; 2478 ibwc->qp = &qp->ibqp; 2479 ibwc->status = ocrdma_to_ibwc_err(status); 2480 2481 ocrdma_flush_qp(qp); 2482 ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL); 2483 2484 /* if wqe/rqe pending for which cqe needs to be returned, 2485 * trigger inflating it. 2486 */ 2487 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) { 2488 expand = true; 2489 ocrdma_set_cqe_status_flushed(qp, cqe); 2490 } 2491 return expand; 2492 } 2493 2494 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, 2495 struct ocrdma_qp *qp, int status) 2496 { 2497 ibwc->opcode = IB_WC_RECV; 2498 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; 2499 ocrdma_hwq_inc_tail(&qp->rq); 2500 2501 return ocrdma_update_err_cqe(ibwc, cqe, qp, status); 2502 } 2503 2504 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, 2505 struct ocrdma_qp *qp, int status) 2506 { 2507 ocrdma_update_wc(qp, ibwc, qp->sq.tail); 2508 ocrdma_hwq_inc_tail(&qp->sq); 2509 2510 return ocrdma_update_err_cqe(ibwc, cqe, qp, status); 2511 } 2512 2513 2514 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, 2515 struct ocrdma_cqe *cqe, struct ib_wc *ibwc, 2516 bool *polled, bool *stop) 2517 { 2518 bool expand; 2519 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2520 int status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2521 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2522 if (status < OCRDMA_MAX_CQE_ERR) 2523 atomic_inc(&dev->cqe_err_stats[status]); 2524 2525 /* when hw sq is empty, but rq is not empty, so we continue 2526 * to keep the cqe in order to get the cq event again. 2527 */ 2528 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) { 2529 /* when cq for rq and sq is same, it is safe to return 2530 * flush cqe for RQEs. 2531 */ 2532 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { 2533 *polled = true; 2534 status = OCRDMA_CQE_WR_FLUSH_ERR; 2535 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); 2536 } else { 2537 /* stop processing further cqe as this cqe is used for 2538 * triggering cq event on buddy cq of RQ. 2539 * When QP is destroyed, this cqe will be removed 2540 * from the cq's hardware q. 2541 */ 2542 *polled = false; 2543 *stop = true; 2544 expand = false; 2545 } 2546 } else if (is_hw_sq_empty(qp)) { 2547 /* Do nothing */ 2548 expand = false; 2549 *polled = false; 2550 *stop = false; 2551 } else { 2552 *polled = true; 2553 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); 2554 } 2555 return expand; 2556 } 2557 2558 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp, 2559 struct ocrdma_cqe *cqe, 2560 struct ib_wc *ibwc, bool *polled) 2561 { 2562 bool expand = false; 2563 int tail = qp->sq.tail; 2564 u32 wqe_idx; 2565 2566 if (!qp->wqe_wr_id_tbl[tail].signaled) { 2567 *polled = false; /* WC cannot be consumed yet */ 2568 } else { 2569 ibwc->status = IB_WC_SUCCESS; 2570 ibwc->wc_flags = 0; 2571 ibwc->qp = &qp->ibqp; 2572 ocrdma_update_wc(qp, ibwc, tail); 2573 *polled = true; 2574 } 2575 wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) & 2576 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx; 2577 if (tail != wqe_idx) 2578 expand = true; /* Coalesced CQE can't be consumed yet */ 2579 2580 ocrdma_hwq_inc_tail(&qp->sq); 2581 return expand; 2582 } 2583 2584 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, 2585 struct ib_wc *ibwc, bool *polled, bool *stop) 2586 { 2587 int status; 2588 bool expand; 2589 2590 status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2591 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2592 2593 if (status == OCRDMA_CQE_SUCCESS) 2594 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled); 2595 else 2596 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop); 2597 return expand; 2598 } 2599 2600 static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc, 2601 struct ocrdma_cqe *cqe) 2602 { 2603 int status; 2604 u16 hdr_type = 0; 2605 2606 status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2607 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; 2608 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) & 2609 OCRDMA_CQE_SRCQP_MASK; 2610 ibwc->pkey_index = 0; 2611 ibwc->wc_flags = IB_WC_GRH; 2612 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >> 2613 OCRDMA_CQE_UD_XFER_LEN_SHIFT) & 2614 OCRDMA_CQE_UD_XFER_LEN_MASK; 2615 2616 if (ocrdma_is_udp_encap_supported(dev)) { 2617 hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >> 2618 OCRDMA_CQE_UD_L3TYPE_SHIFT) & 2619 OCRDMA_CQE_UD_L3TYPE_MASK; 2620 ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; 2621 ibwc->network_hdr_type = hdr_type; 2622 } 2623 2624 return status; 2625 } 2626 2627 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc, 2628 struct ocrdma_cqe *cqe, 2629 struct ocrdma_qp *qp) 2630 { 2631 unsigned long flags; 2632 struct ocrdma_srq *srq; 2633 u32 wqe_idx; 2634 2635 srq = get_ocrdma_srq(qp->ibqp.srq); 2636 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> 2637 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx; 2638 BUG_ON(wqe_idx < 1); 2639 2640 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx]; 2641 spin_lock_irqsave(&srq->q_lock, flags); 2642 ocrdma_srq_toggle_bit(srq, wqe_idx - 1); 2643 spin_unlock_irqrestore(&srq->q_lock, flags); 2644 ocrdma_hwq_inc_tail(&srq->rq); 2645 } 2646 2647 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, 2648 struct ib_wc *ibwc, bool *polled, bool *stop, 2649 int status) 2650 { 2651 bool expand; 2652 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2653 2654 if (status < OCRDMA_MAX_CQE_ERR) 2655 atomic_inc(&dev->cqe_err_stats[status]); 2656 2657 /* when hw_rq is empty, but wq is not empty, so continue 2658 * to keep the cqe to get the cq event again. 2659 */ 2660 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) { 2661 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { 2662 *polled = true; 2663 status = OCRDMA_CQE_WR_FLUSH_ERR; 2664 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); 2665 } else { 2666 *polled = false; 2667 *stop = true; 2668 expand = false; 2669 } 2670 } else if (is_hw_rq_empty(qp)) { 2671 /* Do nothing */ 2672 expand = false; 2673 *polled = false; 2674 *stop = false; 2675 } else { 2676 *polled = true; 2677 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); 2678 } 2679 return expand; 2680 } 2681 2682 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp, 2683 struct ocrdma_cqe *cqe, struct ib_wc *ibwc) 2684 { 2685 struct ocrdma_dev *dev; 2686 2687 dev = get_ocrdma_dev(qp->ibqp.device); 2688 ibwc->opcode = IB_WC_RECV; 2689 ibwc->qp = &qp->ibqp; 2690 ibwc->status = IB_WC_SUCCESS; 2691 2692 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) 2693 ocrdma_update_ud_rcqe(dev, ibwc, cqe); 2694 else 2695 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen); 2696 2697 if (is_cqe_imm(cqe)) { 2698 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); 2699 ibwc->wc_flags |= IB_WC_WITH_IMM; 2700 } else if (is_cqe_wr_imm(cqe)) { 2701 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 2702 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); 2703 ibwc->wc_flags |= IB_WC_WITH_IMM; 2704 } else if (is_cqe_invalidated(cqe)) { 2705 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt); 2706 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE; 2707 } 2708 if (qp->ibqp.srq) { 2709 ocrdma_update_free_srq_cqe(ibwc, cqe, qp); 2710 } else { 2711 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; 2712 ocrdma_hwq_inc_tail(&qp->rq); 2713 } 2714 } 2715 2716 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, 2717 struct ib_wc *ibwc, bool *polled, bool *stop) 2718 { 2719 int status; 2720 bool expand = false; 2721 2722 ibwc->wc_flags = 0; 2723 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { 2724 status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2725 OCRDMA_CQE_UD_STATUS_MASK) >> 2726 OCRDMA_CQE_UD_STATUS_SHIFT; 2727 } else { 2728 status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2729 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2730 } 2731 2732 if (status == OCRDMA_CQE_SUCCESS) { 2733 *polled = true; 2734 ocrdma_poll_success_rcqe(qp, cqe, ibwc); 2735 } else { 2736 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop, 2737 status); 2738 } 2739 return expand; 2740 } 2741 2742 static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe, 2743 u16 cur_getp) 2744 { 2745 if (cq->phase_change) { 2746 if (cur_getp == 0) 2747 cq->phase = (~cq->phase & OCRDMA_CQE_VALID); 2748 } else { 2749 /* clear valid bit */ 2750 cqe->flags_status_srcqpn = 0; 2751 } 2752 } 2753 2754 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries, 2755 struct ib_wc *ibwc) 2756 { 2757 u16 qpn = 0; 2758 int i = 0; 2759 bool expand = false; 2760 int polled_hw_cqes = 0; 2761 struct ocrdma_qp *qp = NULL; 2762 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); 2763 struct ocrdma_cqe *cqe; 2764 u16 cur_getp; bool polled = false; bool stop = false; 2765 2766 cur_getp = cq->getp; 2767 while (num_entries) { 2768 cqe = cq->va + cur_getp; 2769 /* check whether valid cqe or not */ 2770 if (!is_cqe_valid(cq, cqe)) 2771 break; 2772 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK); 2773 /* ignore discarded cqe */ 2774 if (qpn == 0) 2775 goto skip_cqe; 2776 qp = dev->qp_tbl[qpn]; 2777 BUG_ON(qp == NULL); 2778 2779 if (is_cqe_for_sq(cqe)) { 2780 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled, 2781 &stop); 2782 } else { 2783 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled, 2784 &stop); 2785 } 2786 if (expand) 2787 goto expand_cqe; 2788 if (stop) 2789 goto stop_cqe; 2790 /* clear qpn to avoid duplicate processing by discard_cqe() */ 2791 cqe->cmn.qpn = 0; 2792 skip_cqe: 2793 polled_hw_cqes += 1; 2794 cur_getp = (cur_getp + 1) % cq->max_hw_cqe; 2795 ocrdma_change_cq_phase(cq, cqe, cur_getp); 2796 expand_cqe: 2797 if (polled) { 2798 num_entries -= 1; 2799 i += 1; 2800 ibwc = ibwc + 1; 2801 polled = false; 2802 } 2803 } 2804 stop_cqe: 2805 cq->getp = cur_getp; 2806 2807 if (polled_hw_cqes) 2808 ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes); 2809 2810 return i; 2811 } 2812 2813 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */ 2814 static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries, 2815 struct ocrdma_qp *qp, struct ib_wc *ibwc) 2816 { 2817 int err_cqes = 0; 2818 2819 while (num_entries) { 2820 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp)) 2821 break; 2822 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) { 2823 ocrdma_update_wc(qp, ibwc, qp->sq.tail); 2824 ocrdma_hwq_inc_tail(&qp->sq); 2825 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) { 2826 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; 2827 ocrdma_hwq_inc_tail(&qp->rq); 2828 } else { 2829 return err_cqes; 2830 } 2831 ibwc->byte_len = 0; 2832 ibwc->status = IB_WC_WR_FLUSH_ERR; 2833 ibwc = ibwc + 1; 2834 err_cqes += 1; 2835 num_entries -= 1; 2836 } 2837 return err_cqes; 2838 } 2839 2840 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 2841 { 2842 int cqes_to_poll = num_entries; 2843 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 2844 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); 2845 int num_os_cqe = 0, err_cqes = 0; 2846 struct ocrdma_qp *qp; 2847 unsigned long flags; 2848 2849 /* poll cqes from adapter CQ */ 2850 spin_lock_irqsave(&cq->cq_lock, flags); 2851 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc); 2852 spin_unlock_irqrestore(&cq->cq_lock, flags); 2853 cqes_to_poll -= num_os_cqe; 2854 2855 if (cqes_to_poll) { 2856 wc = wc + num_os_cqe; 2857 /* adapter returns single error cqe when qp moves to 2858 * error state. So insert error cqes with wc_status as 2859 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ 2860 * respectively which uses this CQ. 2861 */ 2862 spin_lock_irqsave(&dev->flush_q_lock, flags); 2863 list_for_each_entry(qp, &cq->sq_head, sq_entry) { 2864 if (cqes_to_poll == 0) 2865 break; 2866 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc); 2867 cqes_to_poll -= err_cqes; 2868 num_os_cqe += err_cqes; 2869 wc = wc + err_cqes; 2870 } 2871 spin_unlock_irqrestore(&dev->flush_q_lock, flags); 2872 } 2873 return num_os_cqe; 2874 } 2875 2876 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) 2877 { 2878 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 2879 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); 2880 u16 cq_id; 2881 unsigned long flags; 2882 bool arm_needed = false, sol_needed = false; 2883 2884 cq_id = cq->id; 2885 2886 spin_lock_irqsave(&cq->cq_lock, flags); 2887 if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED) 2888 arm_needed = true; 2889 if (cq_flags & IB_CQ_SOLICITED) 2890 sol_needed = true; 2891 2892 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); 2893 spin_unlock_irqrestore(&cq->cq_lock, flags); 2894 2895 return 0; 2896 } 2897 2898 struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, 2899 u32 max_num_sg) 2900 { 2901 int status; 2902 struct ocrdma_mr *mr; 2903 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 2904 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 2905 2906 if (mr_type != IB_MR_TYPE_MEM_REG) 2907 return ERR_PTR(-EINVAL); 2908 2909 if (max_num_sg > dev->attr.max_pages_per_frmr) 2910 return ERR_PTR(-EINVAL); 2911 2912 mr = kzalloc_obj(*mr); 2913 if (!mr) 2914 return ERR_PTR(-ENOMEM); 2915 2916 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); 2917 if (!mr->pages) { 2918 status = -ENOMEM; 2919 goto pl_err; 2920 } 2921 2922 status = ocrdma_get_pbl_info(dev, mr, max_num_sg); 2923 if (status) 2924 goto pbl_err; 2925 mr->hwmr.fr_mr = 1; 2926 mr->hwmr.remote_rd = 0; 2927 mr->hwmr.remote_wr = 0; 2928 mr->hwmr.local_rd = 0; 2929 mr->hwmr.local_wr = 0; 2930 mr->hwmr.mw_bind = 0; 2931 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); 2932 if (status) 2933 goto pbl_err; 2934 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0); 2935 if (status) 2936 goto mbx_err; 2937 mr->ibmr.rkey = mr->hwmr.lkey; 2938 mr->ibmr.lkey = mr->hwmr.lkey; 2939 dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = 2940 (unsigned long) mr; 2941 return &mr->ibmr; 2942 mbx_err: 2943 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 2944 pbl_err: 2945 kfree(mr->pages); 2946 pl_err: 2947 kfree(mr); 2948 return ERR_PTR(-ENOMEM); 2949 } 2950 2951 static int ocrdma_set_page(struct ib_mr *ibmr, u64 addr) 2952 { 2953 struct ocrdma_mr *mr = get_ocrdma_mr(ibmr); 2954 2955 if (unlikely(mr->npages == mr->hwmr.num_pbes)) 2956 return -ENOMEM; 2957 2958 mr->pages[mr->npages++] = addr; 2959 2960 return 0; 2961 } 2962 2963 int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 2964 unsigned int *sg_offset) 2965 { 2966 struct ocrdma_mr *mr = get_ocrdma_mr(ibmr); 2967 2968 mr->npages = 0; 2969 2970 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ocrdma_set_page); 2971 } 2972