1 /******************************************************************* 2 * This file is part of the Emulex RoCE Device Driver for * 3 * RoCE (RDMA over Converged Ethernet) adapters. * 4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * 8 * This program is free software; you can redistribute it and/or * 9 * modify it under the terms of version 2 of the GNU General * 10 * Public License as published by the Free Software Foundation. * 11 * This program is distributed in the hope that it will be useful. * 12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 * more details, a copy of which can be found in the file COPYING * 18 * included with this package. * 19 * 20 * Contact Information: 21 * linux-drivers@emulex.com 22 * 23 * Emulex 24 * 3333 Susan Street 25 * Costa Mesa, CA 92626 26 *******************************************************************/ 27 28 #include <linux/dma-mapping.h> 29 #include <rdma/ib_verbs.h> 30 #include <rdma/ib_user_verbs.h> 31 #include <rdma/iw_cm.h> 32 #include <rdma/ib_umem.h> 33 #include <rdma/ib_addr.h> 34 35 #include "ocrdma.h" 36 #include "ocrdma_hw.h" 37 #include "ocrdma_verbs.h" 38 #include "ocrdma_abi.h" 39 40 int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) 41 { 42 if (index > 1) 43 return -EINVAL; 44 45 *pkey = 0xffff; 46 return 0; 47 } 48 49 int ocrdma_query_gid(struct ib_device *ibdev, u8 port, 50 int index, union ib_gid *sgid) 51 { 52 struct ocrdma_dev *dev; 53 54 dev = get_ocrdma_dev(ibdev); 55 memset(sgid, 0, sizeof(*sgid)); 56 if (index >= OCRDMA_MAX_SGID) 57 return -EINVAL; 58 59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); 60 61 return 0; 62 } 63 64 int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, 65 struct ib_udata *uhw) 66 { 67 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 68 69 if (uhw->inlen || uhw->outlen) 70 return -EINVAL; 71 72 memset(attr, 0, sizeof *attr); 73 memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], 74 min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); 75 ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); 76 attr->max_mr_size = dev->attr.max_mr_size; 77 attr->page_size_cap = 0xffff000; 78 attr->vendor_id = dev->nic_info.pdev->vendor; 79 attr->vendor_part_id = dev->nic_info.pdev->device; 80 attr->hw_ver = dev->asic_id; 81 attr->max_qp = dev->attr.max_qp; 82 attr->max_ah = OCRDMA_MAX_AH; 83 attr->max_qp_wr = dev->attr.max_wqe; 84 85 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD | 86 IB_DEVICE_RC_RNR_NAK_GEN | 87 IB_DEVICE_SHUTDOWN_PORT | 88 IB_DEVICE_SYS_IMAGE_GUID | 89 IB_DEVICE_LOCAL_DMA_LKEY | 90 IB_DEVICE_MEM_MGT_EXTENSIONS; 91 attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge); 92 attr->max_sge_rd = 0; 93 attr->max_cq = dev->attr.max_cq; 94 attr->max_cqe = dev->attr.max_cqe; 95 attr->max_mr = dev->attr.max_mr; 96 attr->max_mw = dev->attr.max_mw; 97 attr->max_pd = dev->attr.max_pd; 98 attr->atomic_cap = 0; 99 attr->max_fmr = 0; 100 attr->max_map_per_fmr = 0; 101 attr->max_qp_rd_atom = 102 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); 103 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; 104 attr->max_srq = dev->attr.max_srq; 105 attr->max_srq_sge = dev->attr.max_srq_sge; 106 attr->max_srq_wr = dev->attr.max_rqe; 107 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; 108 attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr; 109 attr->max_pkeys = 1; 110 return 0; 111 } 112 113 static inline void get_link_speed_and_width(struct ocrdma_dev *dev, 114 u8 *ib_speed, u8 *ib_width) 115 { 116 int status; 117 u8 speed; 118 119 status = ocrdma_mbx_get_link_speed(dev, &speed); 120 if (status) 121 speed = OCRDMA_PHYS_LINK_SPEED_ZERO; 122 123 switch (speed) { 124 case OCRDMA_PHYS_LINK_SPEED_1GBPS: 125 *ib_speed = IB_SPEED_SDR; 126 *ib_width = IB_WIDTH_1X; 127 break; 128 129 case OCRDMA_PHYS_LINK_SPEED_10GBPS: 130 *ib_speed = IB_SPEED_QDR; 131 *ib_width = IB_WIDTH_1X; 132 break; 133 134 case OCRDMA_PHYS_LINK_SPEED_20GBPS: 135 *ib_speed = IB_SPEED_DDR; 136 *ib_width = IB_WIDTH_4X; 137 break; 138 139 case OCRDMA_PHYS_LINK_SPEED_40GBPS: 140 *ib_speed = IB_SPEED_QDR; 141 *ib_width = IB_WIDTH_4X; 142 break; 143 144 default: 145 /* Unsupported */ 146 *ib_speed = IB_SPEED_SDR; 147 *ib_width = IB_WIDTH_1X; 148 } 149 } 150 151 int ocrdma_query_port(struct ib_device *ibdev, 152 u8 port, struct ib_port_attr *props) 153 { 154 enum ib_port_state port_state; 155 struct ocrdma_dev *dev; 156 struct net_device *netdev; 157 158 dev = get_ocrdma_dev(ibdev); 159 if (port > 1) { 160 pr_err("%s(%d) invalid_port=0x%x\n", __func__, 161 dev->id, port); 162 return -EINVAL; 163 } 164 netdev = dev->nic_info.netdev; 165 if (netif_running(netdev) && netif_oper_up(netdev)) { 166 port_state = IB_PORT_ACTIVE; 167 props->phys_state = 5; 168 } else { 169 port_state = IB_PORT_DOWN; 170 props->phys_state = 3; 171 } 172 props->max_mtu = IB_MTU_4096; 173 props->active_mtu = iboe_get_mtu(netdev->mtu); 174 props->lid = 0; 175 props->lmc = 0; 176 props->sm_lid = 0; 177 props->sm_sl = 0; 178 props->state = port_state; 179 props->port_cap_flags = 180 IB_PORT_CM_SUP | 181 IB_PORT_REINIT_SUP | 182 IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS; 183 props->gid_tbl_len = OCRDMA_MAX_SGID; 184 props->pkey_tbl_len = 1; 185 props->bad_pkey_cntr = 0; 186 props->qkey_viol_cntr = 0; 187 get_link_speed_and_width(dev, &props->active_speed, 188 &props->active_width); 189 props->max_msg_sz = 0x80000000; 190 props->max_vl_num = 4; 191 return 0; 192 } 193 194 int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, 195 struct ib_port_modify *props) 196 { 197 struct ocrdma_dev *dev; 198 199 dev = get_ocrdma_dev(ibdev); 200 if (port > 1) { 201 pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port); 202 return -EINVAL; 203 } 204 return 0; 205 } 206 207 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, 208 unsigned long len) 209 { 210 struct ocrdma_mm *mm; 211 212 mm = kzalloc(sizeof(*mm), GFP_KERNEL); 213 if (mm == NULL) 214 return -ENOMEM; 215 mm->key.phy_addr = phy_addr; 216 mm->key.len = len; 217 INIT_LIST_HEAD(&mm->entry); 218 219 mutex_lock(&uctx->mm_list_lock); 220 list_add_tail(&mm->entry, &uctx->mm_head); 221 mutex_unlock(&uctx->mm_list_lock); 222 return 0; 223 } 224 225 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, 226 unsigned long len) 227 { 228 struct ocrdma_mm *mm, *tmp; 229 230 mutex_lock(&uctx->mm_list_lock); 231 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { 232 if (len != mm->key.len && phy_addr != mm->key.phy_addr) 233 continue; 234 235 list_del(&mm->entry); 236 kfree(mm); 237 break; 238 } 239 mutex_unlock(&uctx->mm_list_lock); 240 } 241 242 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, 243 unsigned long len) 244 { 245 bool found = false; 246 struct ocrdma_mm *mm; 247 248 mutex_lock(&uctx->mm_list_lock); 249 list_for_each_entry(mm, &uctx->mm_head, entry) { 250 if (len != mm->key.len && phy_addr != mm->key.phy_addr) 251 continue; 252 253 found = true; 254 break; 255 } 256 mutex_unlock(&uctx->mm_list_lock); 257 return found; 258 } 259 260 261 static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool) 262 { 263 u16 pd_bitmap_idx = 0; 264 const unsigned long *pd_bitmap; 265 266 if (dpp_pool) { 267 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap; 268 pd_bitmap_idx = find_first_zero_bit(pd_bitmap, 269 dev->pd_mgr->max_dpp_pd); 270 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap); 271 dev->pd_mgr->pd_dpp_count++; 272 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh) 273 dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count; 274 } else { 275 pd_bitmap = dev->pd_mgr->pd_norm_bitmap; 276 pd_bitmap_idx = find_first_zero_bit(pd_bitmap, 277 dev->pd_mgr->max_normal_pd); 278 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap); 279 dev->pd_mgr->pd_norm_count++; 280 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh) 281 dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count; 282 } 283 return pd_bitmap_idx; 284 } 285 286 static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id, 287 bool dpp_pool) 288 { 289 u16 pd_count; 290 u16 pd_bit_index; 291 292 pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count : 293 dev->pd_mgr->pd_norm_count; 294 if (pd_count == 0) 295 return -EINVAL; 296 297 if (dpp_pool) { 298 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start; 299 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) { 300 return -EINVAL; 301 } else { 302 __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap); 303 dev->pd_mgr->pd_dpp_count--; 304 } 305 } else { 306 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start; 307 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) { 308 return -EINVAL; 309 } else { 310 __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap); 311 dev->pd_mgr->pd_norm_count--; 312 } 313 } 314 315 return 0; 316 } 317 318 static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id, 319 bool dpp_pool) 320 { 321 int status; 322 323 mutex_lock(&dev->dev_lock); 324 status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool); 325 mutex_unlock(&dev->dev_lock); 326 return status; 327 } 328 329 static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd) 330 { 331 u16 pd_idx = 0; 332 int status = 0; 333 334 mutex_lock(&dev->dev_lock); 335 if (pd->dpp_enabled) { 336 /* try allocating DPP PD, if not available then normal PD */ 337 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) { 338 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true); 339 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx; 340 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx; 341 } else if (dev->pd_mgr->pd_norm_count < 342 dev->pd_mgr->max_normal_pd) { 343 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); 344 pd->id = dev->pd_mgr->pd_norm_start + pd_idx; 345 pd->dpp_enabled = false; 346 } else { 347 status = -EINVAL; 348 } 349 } else { 350 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) { 351 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); 352 pd->id = dev->pd_mgr->pd_norm_start + pd_idx; 353 } else { 354 status = -EINVAL; 355 } 356 } 357 mutex_unlock(&dev->dev_lock); 358 return status; 359 } 360 361 static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, 362 struct ocrdma_ucontext *uctx, 363 struct ib_udata *udata) 364 { 365 struct ocrdma_pd *pd = NULL; 366 int status = 0; 367 368 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 369 if (!pd) 370 return ERR_PTR(-ENOMEM); 371 372 if (udata && uctx && dev->attr.max_dpp_pds) { 373 pd->dpp_enabled = 374 ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; 375 pd->num_dpp_qp = 376 pd->dpp_enabled ? (dev->nic_info.db_page_size / 377 dev->attr.wqe_size) : 0; 378 } 379 380 if (dev->pd_mgr->pd_prealloc_valid) { 381 status = ocrdma_get_pd_num(dev, pd); 382 if (status == 0) { 383 return pd; 384 } else { 385 kfree(pd); 386 return ERR_PTR(status); 387 } 388 } 389 390 retry: 391 status = ocrdma_mbx_alloc_pd(dev, pd); 392 if (status) { 393 if (pd->dpp_enabled) { 394 pd->dpp_enabled = false; 395 pd->num_dpp_qp = 0; 396 goto retry; 397 } else { 398 kfree(pd); 399 return ERR_PTR(status); 400 } 401 } 402 403 return pd; 404 } 405 406 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx, 407 struct ocrdma_pd *pd) 408 { 409 return (uctx->cntxt_pd == pd ? true : false); 410 } 411 412 static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev, 413 struct ocrdma_pd *pd) 414 { 415 int status = 0; 416 417 if (dev->pd_mgr->pd_prealloc_valid) 418 status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled); 419 else 420 status = ocrdma_mbx_dealloc_pd(dev, pd); 421 422 kfree(pd); 423 return status; 424 } 425 426 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev, 427 struct ocrdma_ucontext *uctx, 428 struct ib_udata *udata) 429 { 430 int status = 0; 431 432 uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata); 433 if (IS_ERR(uctx->cntxt_pd)) { 434 status = PTR_ERR(uctx->cntxt_pd); 435 uctx->cntxt_pd = NULL; 436 goto err; 437 } 438 439 uctx->cntxt_pd->uctx = uctx; 440 uctx->cntxt_pd->ibpd.device = &dev->ibdev; 441 err: 442 return status; 443 } 444 445 static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) 446 { 447 struct ocrdma_pd *pd = uctx->cntxt_pd; 448 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); 449 450 if (uctx->pd_in_use) { 451 pr_err("%s(%d) Freeing in use pdid=0x%x.\n", 452 __func__, dev->id, pd->id); 453 } 454 uctx->cntxt_pd = NULL; 455 (void)_ocrdma_dealloc_pd(dev, pd); 456 return 0; 457 } 458 459 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) 460 { 461 struct ocrdma_pd *pd = NULL; 462 463 mutex_lock(&uctx->mm_list_lock); 464 if (!uctx->pd_in_use) { 465 uctx->pd_in_use = true; 466 pd = uctx->cntxt_pd; 467 } 468 mutex_unlock(&uctx->mm_list_lock); 469 470 return pd; 471 } 472 473 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx) 474 { 475 mutex_lock(&uctx->mm_list_lock); 476 uctx->pd_in_use = false; 477 mutex_unlock(&uctx->mm_list_lock); 478 } 479 480 struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, 481 struct ib_udata *udata) 482 { 483 int status; 484 struct ocrdma_ucontext *ctx; 485 struct ocrdma_alloc_ucontext_resp resp; 486 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 487 struct pci_dev *pdev = dev->nic_info.pdev; 488 u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE); 489 490 if (!udata) 491 return ERR_PTR(-EFAULT); 492 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 493 if (!ctx) 494 return ERR_PTR(-ENOMEM); 495 INIT_LIST_HEAD(&ctx->mm_head); 496 mutex_init(&ctx->mm_list_lock); 497 498 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, 499 &ctx->ah_tbl.pa, GFP_KERNEL); 500 if (!ctx->ah_tbl.va) { 501 kfree(ctx); 502 return ERR_PTR(-ENOMEM); 503 } 504 memset(ctx->ah_tbl.va, 0, map_len); 505 ctx->ah_tbl.len = map_len; 506 507 memset(&resp, 0, sizeof(resp)); 508 resp.ah_tbl_len = ctx->ah_tbl.len; 509 resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va); 510 511 status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len); 512 if (status) 513 goto map_err; 514 515 status = ocrdma_alloc_ucontext_pd(dev, ctx, udata); 516 if (status) 517 goto pd_err; 518 519 resp.dev_id = dev->id; 520 resp.max_inline_data = dev->attr.max_inline_data; 521 resp.wqe_size = dev->attr.wqe_size; 522 resp.rqe_size = dev->attr.rqe_size; 523 resp.dpp_wqe_size = dev->attr.wqe_size; 524 525 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); 526 status = ib_copy_to_udata(udata, &resp, sizeof(resp)); 527 if (status) 528 goto cpy_err; 529 return &ctx->ibucontext; 530 531 cpy_err: 532 pd_err: 533 ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len); 534 map_err: 535 dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va, 536 ctx->ah_tbl.pa); 537 kfree(ctx); 538 return ERR_PTR(status); 539 } 540 541 int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx) 542 { 543 int status = 0; 544 struct ocrdma_mm *mm, *tmp; 545 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx); 546 struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device); 547 struct pci_dev *pdev = dev->nic_info.pdev; 548 549 status = ocrdma_dealloc_ucontext_pd(uctx); 550 551 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len); 552 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va, 553 uctx->ah_tbl.pa); 554 555 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { 556 list_del(&mm->entry); 557 kfree(mm); 558 } 559 kfree(uctx); 560 return status; 561 } 562 563 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 564 { 565 struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context); 566 struct ocrdma_dev *dev = get_ocrdma_dev(context->device); 567 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; 568 u64 unmapped_db = (u64) dev->nic_info.unmapped_db; 569 unsigned long len = (vma->vm_end - vma->vm_start); 570 int status = 0; 571 bool found; 572 573 if (vma->vm_start & (PAGE_SIZE - 1)) 574 return -EINVAL; 575 found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len); 576 if (!found) 577 return -EINVAL; 578 579 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + 580 dev->nic_info.db_total_size)) && 581 (len <= dev->nic_info.db_page_size)) { 582 if (vma->vm_flags & VM_READ) 583 return -EPERM; 584 585 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 586 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 587 len, vma->vm_page_prot); 588 } else if (dev->nic_info.dpp_unmapped_len && 589 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) && 590 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr + 591 dev->nic_info.dpp_unmapped_len)) && 592 (len <= dev->nic_info.dpp_unmapped_len)) { 593 if (vma->vm_flags & VM_READ) 594 return -EPERM; 595 596 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 597 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 598 len, vma->vm_page_prot); 599 } else { 600 status = remap_pfn_range(vma, vma->vm_start, 601 vma->vm_pgoff, len, vma->vm_page_prot); 602 } 603 return status; 604 } 605 606 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd, 607 struct ib_ucontext *ib_ctx, 608 struct ib_udata *udata) 609 { 610 int status; 611 u64 db_page_addr; 612 u64 dpp_page_addr = 0; 613 u32 db_page_size; 614 struct ocrdma_alloc_pd_uresp rsp; 615 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); 616 617 memset(&rsp, 0, sizeof(rsp)); 618 rsp.id = pd->id; 619 rsp.dpp_enabled = pd->dpp_enabled; 620 db_page_addr = ocrdma_get_db_addr(dev, pd->id); 621 db_page_size = dev->nic_info.db_page_size; 622 623 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size); 624 if (status) 625 return status; 626 627 if (pd->dpp_enabled) { 628 dpp_page_addr = dev->nic_info.dpp_unmapped_addr + 629 (pd->id * PAGE_SIZE); 630 status = ocrdma_add_mmap(uctx, dpp_page_addr, 631 PAGE_SIZE); 632 if (status) 633 goto dpp_map_err; 634 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr); 635 rsp.dpp_page_addr_lo = dpp_page_addr; 636 } 637 638 status = ib_copy_to_udata(udata, &rsp, sizeof(rsp)); 639 if (status) 640 goto ucopy_err; 641 642 pd->uctx = uctx; 643 return 0; 644 645 ucopy_err: 646 if (pd->dpp_enabled) 647 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE); 648 dpp_map_err: 649 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size); 650 return status; 651 } 652 653 struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev, 654 struct ib_ucontext *context, 655 struct ib_udata *udata) 656 { 657 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 658 struct ocrdma_pd *pd; 659 struct ocrdma_ucontext *uctx = NULL; 660 int status; 661 u8 is_uctx_pd = false; 662 663 if (udata && context) { 664 uctx = get_ocrdma_ucontext(context); 665 pd = ocrdma_get_ucontext_pd(uctx); 666 if (pd) { 667 is_uctx_pd = true; 668 goto pd_mapping; 669 } 670 } 671 672 pd = _ocrdma_alloc_pd(dev, uctx, udata); 673 if (IS_ERR(pd)) { 674 status = PTR_ERR(pd); 675 goto exit; 676 } 677 678 pd_mapping: 679 if (udata && context) { 680 status = ocrdma_copy_pd_uresp(dev, pd, context, udata); 681 if (status) 682 goto err; 683 } 684 return &pd->ibpd; 685 686 err: 687 if (is_uctx_pd) { 688 ocrdma_release_ucontext_pd(uctx); 689 } else { 690 status = _ocrdma_dealloc_pd(dev, pd); 691 } 692 exit: 693 return ERR_PTR(status); 694 } 695 696 int ocrdma_dealloc_pd(struct ib_pd *ibpd) 697 { 698 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 699 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 700 struct ocrdma_ucontext *uctx = NULL; 701 int status = 0; 702 u64 usr_db; 703 704 uctx = pd->uctx; 705 if (uctx) { 706 u64 dpp_db = dev->nic_info.dpp_unmapped_addr + 707 (pd->id * PAGE_SIZE); 708 if (pd->dpp_enabled) 709 ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE); 710 usr_db = ocrdma_get_db_addr(dev, pd->id); 711 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size); 712 713 if (is_ucontext_pd(uctx, pd)) { 714 ocrdma_release_ucontext_pd(uctx); 715 return status; 716 } 717 } 718 status = _ocrdma_dealloc_pd(dev, pd); 719 return status; 720 } 721 722 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr, 723 u32 pdid, int acc, u32 num_pbls, u32 addr_check) 724 { 725 int status; 726 727 mr->hwmr.fr_mr = 0; 728 mr->hwmr.local_rd = 1; 729 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; 730 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; 731 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; 732 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0; 733 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; 734 mr->hwmr.num_pbls = num_pbls; 735 736 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check); 737 if (status) 738 return status; 739 740 mr->ibmr.lkey = mr->hwmr.lkey; 741 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) 742 mr->ibmr.rkey = mr->hwmr.lkey; 743 return 0; 744 } 745 746 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc) 747 { 748 int status; 749 struct ocrdma_mr *mr; 750 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 751 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 752 753 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) { 754 pr_err("%s err, invalid access rights\n", __func__); 755 return ERR_PTR(-EINVAL); 756 } 757 758 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 759 if (!mr) 760 return ERR_PTR(-ENOMEM); 761 762 status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0, 763 OCRDMA_ADDR_CHECK_DISABLE); 764 if (status) { 765 kfree(mr); 766 return ERR_PTR(status); 767 } 768 769 return &mr->ibmr; 770 } 771 772 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev, 773 struct ocrdma_hw_mr *mr) 774 { 775 struct pci_dev *pdev = dev->nic_info.pdev; 776 int i = 0; 777 778 if (mr->pbl_table) { 779 for (i = 0; i < mr->num_pbls; i++) { 780 if (!mr->pbl_table[i].va) 781 continue; 782 dma_free_coherent(&pdev->dev, mr->pbl_size, 783 mr->pbl_table[i].va, 784 mr->pbl_table[i].pa); 785 } 786 kfree(mr->pbl_table); 787 mr->pbl_table = NULL; 788 } 789 } 790 791 static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr, 792 u32 num_pbes) 793 { 794 u32 num_pbls = 0; 795 u32 idx = 0; 796 int status = 0; 797 u32 pbl_size; 798 799 do { 800 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx); 801 if (pbl_size > MAX_OCRDMA_PBL_SIZE) { 802 status = -EFAULT; 803 break; 804 } 805 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64))); 806 num_pbls = num_pbls / (pbl_size / sizeof(u64)); 807 idx++; 808 } while (num_pbls >= dev->attr.max_num_mr_pbl); 809 810 mr->hwmr.num_pbes = num_pbes; 811 mr->hwmr.num_pbls = num_pbls; 812 mr->hwmr.pbl_size = pbl_size; 813 return status; 814 } 815 816 static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr) 817 { 818 int status = 0; 819 int i; 820 u32 dma_len = mr->pbl_size; 821 struct pci_dev *pdev = dev->nic_info.pdev; 822 void *va; 823 dma_addr_t pa; 824 825 mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) * 826 mr->num_pbls, GFP_KERNEL); 827 828 if (!mr->pbl_table) 829 return -ENOMEM; 830 831 for (i = 0; i < mr->num_pbls; i++) { 832 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); 833 if (!va) { 834 ocrdma_free_mr_pbl_tbl(dev, mr); 835 status = -ENOMEM; 836 break; 837 } 838 memset(va, 0, dma_len); 839 mr->pbl_table[i].va = va; 840 mr->pbl_table[i].pa = pa; 841 } 842 return status; 843 } 844 845 static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, 846 u32 num_pbes) 847 { 848 struct ocrdma_pbe *pbe; 849 struct scatterlist *sg; 850 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; 851 struct ib_umem *umem = mr->umem; 852 int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0; 853 854 if (!mr->hwmr.num_pbes) 855 return; 856 857 pbe = (struct ocrdma_pbe *)pbl_tbl->va; 858 pbe_cnt = 0; 859 860 shift = ilog2(umem->page_size); 861 862 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 863 pages = sg_dma_len(sg) >> shift; 864 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { 865 /* store the page address in pbe */ 866 pbe->pa_lo = 867 cpu_to_le32(sg_dma_address 868 (sg) + 869 (umem->page_size * pg_cnt)); 870 pbe->pa_hi = 871 cpu_to_le32(upper_32_bits 872 ((sg_dma_address 873 (sg) + 874 umem->page_size * pg_cnt))); 875 pbe_cnt += 1; 876 total_num_pbes += 1; 877 pbe++; 878 879 /* if done building pbes, issue the mbx cmd. */ 880 if (total_num_pbes == num_pbes) 881 return; 882 883 /* if the given pbl is full storing the pbes, 884 * move to next pbl. 885 */ 886 if (pbe_cnt == 887 (mr->hwmr.pbl_size / sizeof(u64))) { 888 pbl_tbl++; 889 pbe = (struct ocrdma_pbe *)pbl_tbl->va; 890 pbe_cnt = 0; 891 } 892 893 } 894 } 895 } 896 897 struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, 898 u64 usr_addr, int acc, struct ib_udata *udata) 899 { 900 int status = -ENOMEM; 901 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 902 struct ocrdma_mr *mr; 903 struct ocrdma_pd *pd; 904 u32 num_pbes; 905 906 pd = get_ocrdma_pd(ibpd); 907 908 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) 909 return ERR_PTR(-EINVAL); 910 911 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 912 if (!mr) 913 return ERR_PTR(status); 914 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0); 915 if (IS_ERR(mr->umem)) { 916 status = -EFAULT; 917 goto umem_err; 918 } 919 num_pbes = ib_umem_page_count(mr->umem); 920 status = ocrdma_get_pbl_info(dev, mr, num_pbes); 921 if (status) 922 goto umem_err; 923 924 mr->hwmr.pbe_size = mr->umem->page_size; 925 mr->hwmr.fbo = ib_umem_offset(mr->umem); 926 mr->hwmr.va = usr_addr; 927 mr->hwmr.len = len; 928 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; 929 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; 930 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; 931 mr->hwmr.local_rd = 1; 932 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; 933 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); 934 if (status) 935 goto umem_err; 936 build_user_pbes(dev, mr, num_pbes); 937 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); 938 if (status) 939 goto mbx_err; 940 mr->ibmr.lkey = mr->hwmr.lkey; 941 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) 942 mr->ibmr.rkey = mr->hwmr.lkey; 943 944 return &mr->ibmr; 945 946 mbx_err: 947 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 948 umem_err: 949 kfree(mr); 950 return ERR_PTR(status); 951 } 952 953 int ocrdma_dereg_mr(struct ib_mr *ib_mr) 954 { 955 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); 956 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); 957 958 (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); 959 960 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 961 962 /* it could be user registered memory. */ 963 if (mr->umem) 964 ib_umem_release(mr->umem); 965 kfree(mr); 966 967 /* Don't stop cleanup, in case FW is unresponsive */ 968 if (dev->mqe_ctx.fw_error_state) { 969 pr_err("%s(%d) fw not responding.\n", 970 __func__, dev->id); 971 } 972 return 0; 973 } 974 975 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, 976 struct ib_udata *udata, 977 struct ib_ucontext *ib_ctx) 978 { 979 int status; 980 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); 981 struct ocrdma_create_cq_uresp uresp; 982 983 memset(&uresp, 0, sizeof(uresp)); 984 uresp.cq_id = cq->id; 985 uresp.page_size = PAGE_ALIGN(cq->len); 986 uresp.num_pages = 1; 987 uresp.max_hw_cqe = cq->max_hw_cqe; 988 uresp.page_addr[0] = virt_to_phys(cq->va); 989 uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id); 990 uresp.db_page_size = dev->nic_info.db_page_size; 991 uresp.phase_change = cq->phase_change ? 1 : 0; 992 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 993 if (status) { 994 pr_err("%s(%d) copy error cqid=0x%x.\n", 995 __func__, dev->id, cq->id); 996 goto err; 997 } 998 status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); 999 if (status) 1000 goto err; 1001 status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size); 1002 if (status) { 1003 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); 1004 goto err; 1005 } 1006 cq->ucontext = uctx; 1007 err: 1008 return status; 1009 } 1010 1011 struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, 1012 const struct ib_cq_init_attr *attr, 1013 struct ib_ucontext *ib_ctx, 1014 struct ib_udata *udata) 1015 { 1016 int entries = attr->cqe; 1017 struct ocrdma_cq *cq; 1018 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 1019 struct ocrdma_ucontext *uctx = NULL; 1020 u16 pd_id = 0; 1021 int status; 1022 struct ocrdma_create_cq_ureq ureq; 1023 1024 if (attr->flags) 1025 return ERR_PTR(-EINVAL); 1026 1027 if (udata) { 1028 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) 1029 return ERR_PTR(-EFAULT); 1030 } else 1031 ureq.dpp_cq = 0; 1032 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 1033 if (!cq) 1034 return ERR_PTR(-ENOMEM); 1035 1036 spin_lock_init(&cq->cq_lock); 1037 spin_lock_init(&cq->comp_handler_lock); 1038 INIT_LIST_HEAD(&cq->sq_head); 1039 INIT_LIST_HEAD(&cq->rq_head); 1040 cq->first_arm = true; 1041 1042 if (ib_ctx) { 1043 uctx = get_ocrdma_ucontext(ib_ctx); 1044 pd_id = uctx->cntxt_pd->id; 1045 } 1046 1047 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id); 1048 if (status) { 1049 kfree(cq); 1050 return ERR_PTR(status); 1051 } 1052 if (ib_ctx) { 1053 status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx); 1054 if (status) 1055 goto ctx_err; 1056 } 1057 cq->phase = OCRDMA_CQE_VALID; 1058 dev->cq_tbl[cq->id] = cq; 1059 return &cq->ibcq; 1060 1061 ctx_err: 1062 ocrdma_mbx_destroy_cq(dev, cq); 1063 kfree(cq); 1064 return ERR_PTR(status); 1065 } 1066 1067 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt, 1068 struct ib_udata *udata) 1069 { 1070 int status = 0; 1071 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 1072 1073 if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) { 1074 status = -EINVAL; 1075 return status; 1076 } 1077 ibcq->cqe = new_cnt; 1078 return status; 1079 } 1080 1081 static void ocrdma_flush_cq(struct ocrdma_cq *cq) 1082 { 1083 int cqe_cnt; 1084 int valid_count = 0; 1085 unsigned long flags; 1086 1087 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); 1088 struct ocrdma_cqe *cqe = NULL; 1089 1090 cqe = cq->va; 1091 cqe_cnt = cq->cqe_cnt; 1092 1093 /* Last irq might have scheduled a polling thread 1094 * sync-up with it before hard flushing. 1095 */ 1096 spin_lock_irqsave(&cq->cq_lock, flags); 1097 while (cqe_cnt) { 1098 if (is_cqe_valid(cq, cqe)) 1099 valid_count++; 1100 cqe++; 1101 cqe_cnt--; 1102 } 1103 ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count); 1104 spin_unlock_irqrestore(&cq->cq_lock, flags); 1105 } 1106 1107 int ocrdma_destroy_cq(struct ib_cq *ibcq) 1108 { 1109 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 1110 struct ocrdma_eq *eq = NULL; 1111 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); 1112 int pdid = 0; 1113 u32 irq, indx; 1114 1115 dev->cq_tbl[cq->id] = NULL; 1116 indx = ocrdma_get_eq_table_index(dev, cq->eqn); 1117 if (indx == -EINVAL) 1118 BUG(); 1119 1120 eq = &dev->eq_tbl[indx]; 1121 irq = ocrdma_get_irq(dev, eq); 1122 synchronize_irq(irq); 1123 ocrdma_flush_cq(cq); 1124 1125 (void)ocrdma_mbx_destroy_cq(dev, cq); 1126 if (cq->ucontext) { 1127 pdid = cq->ucontext->cntxt_pd->id; 1128 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, 1129 PAGE_ALIGN(cq->len)); 1130 ocrdma_del_mmap(cq->ucontext, 1131 ocrdma_get_db_addr(dev, pdid), 1132 dev->nic_info.db_page_size); 1133 } 1134 1135 kfree(cq); 1136 return 0; 1137 } 1138 1139 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) 1140 { 1141 int status = -EINVAL; 1142 1143 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) { 1144 dev->qp_tbl[qp->id] = qp; 1145 status = 0; 1146 } 1147 return status; 1148 } 1149 1150 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) 1151 { 1152 dev->qp_tbl[qp->id] = NULL; 1153 } 1154 1155 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, 1156 struct ib_qp_init_attr *attrs) 1157 { 1158 if ((attrs->qp_type != IB_QPT_GSI) && 1159 (attrs->qp_type != IB_QPT_RC) && 1160 (attrs->qp_type != IB_QPT_UC) && 1161 (attrs->qp_type != IB_QPT_UD)) { 1162 pr_err("%s(%d) unsupported qp type=0x%x requested\n", 1163 __func__, dev->id, attrs->qp_type); 1164 return -EINVAL; 1165 } 1166 /* Skip the check for QP1 to support CM size of 128 */ 1167 if ((attrs->qp_type != IB_QPT_GSI) && 1168 (attrs->cap.max_send_wr > dev->attr.max_wqe)) { 1169 pr_err("%s(%d) unsupported send_wr=0x%x requested\n", 1170 __func__, dev->id, attrs->cap.max_send_wr); 1171 pr_err("%s(%d) supported send_wr=0x%x\n", 1172 __func__, dev->id, dev->attr.max_wqe); 1173 return -EINVAL; 1174 } 1175 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { 1176 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n", 1177 __func__, dev->id, attrs->cap.max_recv_wr); 1178 pr_err("%s(%d) supported recv_wr=0x%x\n", 1179 __func__, dev->id, dev->attr.max_rqe); 1180 return -EINVAL; 1181 } 1182 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) { 1183 pr_err("%s(%d) unsupported inline data size=0x%x requested\n", 1184 __func__, dev->id, attrs->cap.max_inline_data); 1185 pr_err("%s(%d) supported inline data size=0x%x\n", 1186 __func__, dev->id, dev->attr.max_inline_data); 1187 return -EINVAL; 1188 } 1189 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) { 1190 pr_err("%s(%d) unsupported send_sge=0x%x requested\n", 1191 __func__, dev->id, attrs->cap.max_send_sge); 1192 pr_err("%s(%d) supported send_sge=0x%x\n", 1193 __func__, dev->id, dev->attr.max_send_sge); 1194 return -EINVAL; 1195 } 1196 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) { 1197 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n", 1198 __func__, dev->id, attrs->cap.max_recv_sge); 1199 pr_err("%s(%d) supported recv_sge=0x%x\n", 1200 __func__, dev->id, dev->attr.max_recv_sge); 1201 return -EINVAL; 1202 } 1203 /* unprivileged user space cannot create special QP */ 1204 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { 1205 pr_err 1206 ("%s(%d) Userspace can't create special QPs of type=0x%x\n", 1207 __func__, dev->id, attrs->qp_type); 1208 return -EINVAL; 1209 } 1210 /* allow creating only one GSI type of QP */ 1211 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) { 1212 pr_err("%s(%d) GSI special QPs already created.\n", 1213 __func__, dev->id); 1214 return -EINVAL; 1215 } 1216 /* verify consumer QPs are not trying to use GSI QP's CQ */ 1217 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) { 1218 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) || 1219 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) { 1220 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n", 1221 __func__, dev->id); 1222 return -EINVAL; 1223 } 1224 } 1225 return 0; 1226 } 1227 1228 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, 1229 struct ib_udata *udata, int dpp_offset, 1230 int dpp_credit_lmt, int srq) 1231 { 1232 int status = 0; 1233 u64 usr_db; 1234 struct ocrdma_create_qp_uresp uresp; 1235 struct ocrdma_pd *pd = qp->pd; 1236 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); 1237 1238 memset(&uresp, 0, sizeof(uresp)); 1239 usr_db = dev->nic_info.unmapped_db + 1240 (pd->id * dev->nic_info.db_page_size); 1241 uresp.qp_id = qp->id; 1242 uresp.sq_dbid = qp->sq.dbid; 1243 uresp.num_sq_pages = 1; 1244 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len); 1245 uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va); 1246 uresp.num_wqe_allocated = qp->sq.max_cnt; 1247 if (!srq) { 1248 uresp.rq_dbid = qp->rq.dbid; 1249 uresp.num_rq_pages = 1; 1250 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len); 1251 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va); 1252 uresp.num_rqe_allocated = qp->rq.max_cnt; 1253 } 1254 uresp.db_page_addr = usr_db; 1255 uresp.db_page_size = dev->nic_info.db_page_size; 1256 uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET; 1257 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; 1258 uresp.db_shift = OCRDMA_DB_RQ_SHIFT; 1259 1260 if (qp->dpp_enabled) { 1261 uresp.dpp_credit = dpp_credit_lmt; 1262 uresp.dpp_offset = dpp_offset; 1263 } 1264 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1265 if (status) { 1266 pr_err("%s(%d) user copy error.\n", __func__, dev->id); 1267 goto err; 1268 } 1269 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0], 1270 uresp.sq_page_size); 1271 if (status) 1272 goto err; 1273 1274 if (!srq) { 1275 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0], 1276 uresp.rq_page_size); 1277 if (status) 1278 goto rq_map_err; 1279 } 1280 return status; 1281 rq_map_err: 1282 ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size); 1283 err: 1284 return status; 1285 } 1286 1287 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, 1288 struct ocrdma_pd *pd) 1289 { 1290 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { 1291 qp->sq_db = dev->nic_info.db + 1292 (pd->id * dev->nic_info.db_page_size) + 1293 OCRDMA_DB_GEN2_SQ_OFFSET; 1294 qp->rq_db = dev->nic_info.db + 1295 (pd->id * dev->nic_info.db_page_size) + 1296 OCRDMA_DB_GEN2_RQ_OFFSET; 1297 } else { 1298 qp->sq_db = dev->nic_info.db + 1299 (pd->id * dev->nic_info.db_page_size) + 1300 OCRDMA_DB_SQ_OFFSET; 1301 qp->rq_db = dev->nic_info.db + 1302 (pd->id * dev->nic_info.db_page_size) + 1303 OCRDMA_DB_RQ_OFFSET; 1304 } 1305 } 1306 1307 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp) 1308 { 1309 qp->wqe_wr_id_tbl = 1310 kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt, 1311 GFP_KERNEL); 1312 if (qp->wqe_wr_id_tbl == NULL) 1313 return -ENOMEM; 1314 qp->rqe_wr_id_tbl = 1315 kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL); 1316 if (qp->rqe_wr_id_tbl == NULL) 1317 return -ENOMEM; 1318 1319 return 0; 1320 } 1321 1322 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp, 1323 struct ocrdma_pd *pd, 1324 struct ib_qp_init_attr *attrs) 1325 { 1326 qp->pd = pd; 1327 spin_lock_init(&qp->q_lock); 1328 INIT_LIST_HEAD(&qp->sq_entry); 1329 INIT_LIST_HEAD(&qp->rq_entry); 1330 1331 qp->qp_type = attrs->qp_type; 1332 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR; 1333 qp->max_inline_data = attrs->cap.max_inline_data; 1334 qp->sq.max_sges = attrs->cap.max_send_sge; 1335 qp->rq.max_sges = attrs->cap.max_recv_sge; 1336 qp->state = OCRDMA_QPS_RST; 1337 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false; 1338 } 1339 1340 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev, 1341 struct ib_qp_init_attr *attrs) 1342 { 1343 if (attrs->qp_type == IB_QPT_GSI) { 1344 dev->gsi_qp_created = 1; 1345 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq); 1346 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq); 1347 } 1348 } 1349 1350 struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd, 1351 struct ib_qp_init_attr *attrs, 1352 struct ib_udata *udata) 1353 { 1354 int status; 1355 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 1356 struct ocrdma_qp *qp; 1357 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 1358 struct ocrdma_create_qp_ureq ureq; 1359 u16 dpp_credit_lmt, dpp_offset; 1360 1361 status = ocrdma_check_qp_params(ibpd, dev, attrs); 1362 if (status) 1363 goto gen_err; 1364 1365 memset(&ureq, 0, sizeof(ureq)); 1366 if (udata) { 1367 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) 1368 return ERR_PTR(-EFAULT); 1369 } 1370 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 1371 if (!qp) { 1372 status = -ENOMEM; 1373 goto gen_err; 1374 } 1375 ocrdma_set_qp_init_params(qp, pd, attrs); 1376 if (udata == NULL) 1377 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | 1378 OCRDMA_QP_FAST_REG); 1379 1380 mutex_lock(&dev->dev_lock); 1381 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq, 1382 ureq.dpp_cq_id, 1383 &dpp_offset, &dpp_credit_lmt); 1384 if (status) 1385 goto mbx_err; 1386 1387 /* user space QP's wr_id table are managed in library */ 1388 if (udata == NULL) { 1389 status = ocrdma_alloc_wr_id_tbl(qp); 1390 if (status) 1391 goto map_err; 1392 } 1393 1394 status = ocrdma_add_qpn_map(dev, qp); 1395 if (status) 1396 goto map_err; 1397 ocrdma_set_qp_db(dev, qp, pd); 1398 if (udata) { 1399 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset, 1400 dpp_credit_lmt, 1401 (attrs->srq != NULL)); 1402 if (status) 1403 goto cpy_err; 1404 } 1405 ocrdma_store_gsi_qp_cq(dev, attrs); 1406 qp->ibqp.qp_num = qp->id; 1407 mutex_unlock(&dev->dev_lock); 1408 return &qp->ibqp; 1409 1410 cpy_err: 1411 ocrdma_del_qpn_map(dev, qp); 1412 map_err: 1413 ocrdma_mbx_destroy_qp(dev, qp); 1414 mbx_err: 1415 mutex_unlock(&dev->dev_lock); 1416 kfree(qp->wqe_wr_id_tbl); 1417 kfree(qp->rqe_wr_id_tbl); 1418 kfree(qp); 1419 pr_err("%s(%d) error=%d\n", __func__, dev->id, status); 1420 gen_err: 1421 return ERR_PTR(status); 1422 } 1423 1424 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1425 int attr_mask) 1426 { 1427 int status = 0; 1428 struct ocrdma_qp *qp; 1429 struct ocrdma_dev *dev; 1430 enum ib_qp_state old_qps; 1431 1432 qp = get_ocrdma_qp(ibqp); 1433 dev = get_ocrdma_dev(ibqp->device); 1434 if (attr_mask & IB_QP_STATE) 1435 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); 1436 /* if new and previous states are same hw doesn't need to 1437 * know about it. 1438 */ 1439 if (status < 0) 1440 return status; 1441 status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask); 1442 1443 return status; 1444 } 1445 1446 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1447 int attr_mask, struct ib_udata *udata) 1448 { 1449 unsigned long flags; 1450 int status = -EINVAL; 1451 struct ocrdma_qp *qp; 1452 struct ocrdma_dev *dev; 1453 enum ib_qp_state old_qps, new_qps; 1454 1455 qp = get_ocrdma_qp(ibqp); 1456 dev = get_ocrdma_dev(ibqp->device); 1457 1458 /* syncronize with multiple context trying to change, retrive qps */ 1459 mutex_lock(&dev->dev_lock); 1460 /* syncronize with wqe, rqe posting and cqe processing contexts */ 1461 spin_lock_irqsave(&qp->q_lock, flags); 1462 old_qps = get_ibqp_state(qp->state); 1463 if (attr_mask & IB_QP_STATE) 1464 new_qps = attr->qp_state; 1465 else 1466 new_qps = old_qps; 1467 spin_unlock_irqrestore(&qp->q_lock, flags); 1468 1469 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask, 1470 IB_LINK_LAYER_ETHERNET)) { 1471 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n" 1472 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", 1473 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, 1474 old_qps, new_qps); 1475 goto param_err; 1476 } 1477 1478 status = _ocrdma_modify_qp(ibqp, attr, attr_mask); 1479 if (status > 0) 1480 status = 0; 1481 param_err: 1482 mutex_unlock(&dev->dev_lock); 1483 return status; 1484 } 1485 1486 static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu) 1487 { 1488 switch (mtu) { 1489 case 256: 1490 return IB_MTU_256; 1491 case 512: 1492 return IB_MTU_512; 1493 case 1024: 1494 return IB_MTU_1024; 1495 case 2048: 1496 return IB_MTU_2048; 1497 case 4096: 1498 return IB_MTU_4096; 1499 default: 1500 return IB_MTU_1024; 1501 } 1502 } 1503 1504 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags) 1505 { 1506 int ib_qp_acc_flags = 0; 1507 1508 if (qp_cap_flags & OCRDMA_QP_INB_WR) 1509 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE; 1510 if (qp_cap_flags & OCRDMA_QP_INB_RD) 1511 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE; 1512 return ib_qp_acc_flags; 1513 } 1514 1515 int ocrdma_query_qp(struct ib_qp *ibqp, 1516 struct ib_qp_attr *qp_attr, 1517 int attr_mask, struct ib_qp_init_attr *qp_init_attr) 1518 { 1519 int status; 1520 u32 qp_state; 1521 struct ocrdma_qp_params params; 1522 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 1523 struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device); 1524 1525 memset(¶ms, 0, sizeof(params)); 1526 mutex_lock(&dev->dev_lock); 1527 status = ocrdma_mbx_query_qp(dev, qp, ¶ms); 1528 mutex_unlock(&dev->dev_lock); 1529 if (status) 1530 goto mbx_err; 1531 if (qp->qp_type == IB_QPT_UD) 1532 qp_attr->qkey = params.qkey; 1533 qp_attr->path_mtu = 1534 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & 1535 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> 1536 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT; 1537 qp_attr->path_mig_state = IB_MIG_MIGRATED; 1538 qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK; 1539 qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK; 1540 qp_attr->dest_qp_num = 1541 params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK; 1542 1543 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags); 1544 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1; 1545 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1; 1546 qp_attr->cap.max_send_sge = qp->sq.max_sges; 1547 qp_attr->cap.max_recv_sge = qp->rq.max_sges; 1548 qp_attr->cap.max_inline_data = qp->max_inline_data; 1549 qp_init_attr->cap = qp_attr->cap; 1550 memcpy(&qp_attr->ah_attr.grh.dgid, ¶ms.dgid[0], 1551 sizeof(params.dgid)); 1552 qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl & 1553 OCRDMA_QP_PARAMS_FLOW_LABEL_MASK; 1554 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx; 1555 qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn & 1556 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> 1557 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; 1558 qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & 1559 OCRDMA_QP_PARAMS_TCLASS_MASK) >> 1560 OCRDMA_QP_PARAMS_TCLASS_SHIFT; 1561 1562 qp_attr->ah_attr.ah_flags = IB_AH_GRH; 1563 qp_attr->ah_attr.port_num = 1; 1564 qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl & 1565 OCRDMA_QP_PARAMS_SL_MASK) >> 1566 OCRDMA_QP_PARAMS_SL_SHIFT; 1567 qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn & 1568 OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >> 1569 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT; 1570 qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn & 1571 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >> 1572 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT; 1573 qp_attr->retry_cnt = 1574 (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >> 1575 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT; 1576 qp_attr->min_rnr_timer = 0; 1577 qp_attr->pkey_index = 0; 1578 qp_attr->port_num = 1; 1579 qp_attr->ah_attr.src_path_bits = 0; 1580 qp_attr->ah_attr.static_rate = 0; 1581 qp_attr->alt_pkey_index = 0; 1582 qp_attr->alt_port_num = 0; 1583 qp_attr->alt_timeout = 0; 1584 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); 1585 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> 1586 OCRDMA_QP_PARAMS_STATE_SHIFT; 1587 qp_attr->qp_state = get_ibqp_state(qp_state); 1588 qp_attr->cur_qp_state = qp_attr->qp_state; 1589 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; 1590 qp_attr->max_dest_rd_atomic = 1591 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; 1592 qp_attr->max_rd_atomic = 1593 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; 1594 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & 1595 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; 1596 /* Sync driver QP state with FW */ 1597 ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL); 1598 mbx_err: 1599 return status; 1600 } 1601 1602 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx) 1603 { 1604 unsigned int i = idx / 32; 1605 u32 mask = (1U << (idx % 32)); 1606 1607 srq->idx_bit_fields[i] ^= mask; 1608 } 1609 1610 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) 1611 { 1612 return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt; 1613 } 1614 1615 static int is_hw_sq_empty(struct ocrdma_qp *qp) 1616 { 1617 return (qp->sq.tail == qp->sq.head); 1618 } 1619 1620 static int is_hw_rq_empty(struct ocrdma_qp *qp) 1621 { 1622 return (qp->rq.tail == qp->rq.head); 1623 } 1624 1625 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q) 1626 { 1627 return q->va + (q->head * q->entry_size); 1628 } 1629 1630 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q, 1631 u32 idx) 1632 { 1633 return q->va + (idx * q->entry_size); 1634 } 1635 1636 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q) 1637 { 1638 q->head = (q->head + 1) & q->max_wqe_idx; 1639 } 1640 1641 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q) 1642 { 1643 q->tail = (q->tail + 1) & q->max_wqe_idx; 1644 } 1645 1646 /* discard the cqe for a given QP */ 1647 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) 1648 { 1649 unsigned long cq_flags; 1650 unsigned long flags; 1651 int discard_cnt = 0; 1652 u32 cur_getp, stop_getp; 1653 struct ocrdma_cqe *cqe; 1654 u32 qpn = 0, wqe_idx = 0; 1655 1656 spin_lock_irqsave(&cq->cq_lock, cq_flags); 1657 1658 /* traverse through the CQEs in the hw CQ, 1659 * find the matching CQE for a given qp, 1660 * mark the matching one discarded by clearing qpn. 1661 * ring the doorbell in the poll_cq() as 1662 * we don't complete out of order cqe. 1663 */ 1664 1665 cur_getp = cq->getp; 1666 /* find upto when do we reap the cq. */ 1667 stop_getp = cur_getp; 1668 do { 1669 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp))) 1670 break; 1671 1672 cqe = cq->va + cur_getp; 1673 /* if (a) done reaping whole hw cq, or 1674 * (b) qp_xq becomes empty. 1675 * then exit 1676 */ 1677 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; 1678 /* if previously discarded cqe found, skip that too. */ 1679 /* check for matching qp */ 1680 if (qpn == 0 || qpn != qp->id) 1681 goto skip_cqe; 1682 1683 if (is_cqe_for_sq(cqe)) { 1684 ocrdma_hwq_inc_tail(&qp->sq); 1685 } else { 1686 if (qp->srq) { 1687 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> 1688 OCRDMA_CQE_BUFTAG_SHIFT) & 1689 qp->srq->rq.max_wqe_idx; 1690 if (wqe_idx < 1) 1691 BUG(); 1692 spin_lock_irqsave(&qp->srq->q_lock, flags); 1693 ocrdma_hwq_inc_tail(&qp->srq->rq); 1694 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1); 1695 spin_unlock_irqrestore(&qp->srq->q_lock, flags); 1696 1697 } else { 1698 ocrdma_hwq_inc_tail(&qp->rq); 1699 } 1700 } 1701 /* mark cqe discarded so that it is not picked up later 1702 * in the poll_cq(). 1703 */ 1704 discard_cnt += 1; 1705 cqe->cmn.qpn = 0; 1706 skip_cqe: 1707 cur_getp = (cur_getp + 1) % cq->max_hw_cqe; 1708 } while (cur_getp != stop_getp); 1709 spin_unlock_irqrestore(&cq->cq_lock, cq_flags); 1710 } 1711 1712 void ocrdma_del_flush_qp(struct ocrdma_qp *qp) 1713 { 1714 int found = false; 1715 unsigned long flags; 1716 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 1717 /* sync with any active CQ poll */ 1718 1719 spin_lock_irqsave(&dev->flush_q_lock, flags); 1720 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); 1721 if (found) 1722 list_del(&qp->sq_entry); 1723 if (!qp->srq) { 1724 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); 1725 if (found) 1726 list_del(&qp->rq_entry); 1727 } 1728 spin_unlock_irqrestore(&dev->flush_q_lock, flags); 1729 } 1730 1731 int ocrdma_destroy_qp(struct ib_qp *ibqp) 1732 { 1733 struct ocrdma_pd *pd; 1734 struct ocrdma_qp *qp; 1735 struct ocrdma_dev *dev; 1736 struct ib_qp_attr attrs; 1737 int attr_mask; 1738 unsigned long flags; 1739 1740 qp = get_ocrdma_qp(ibqp); 1741 dev = get_ocrdma_dev(ibqp->device); 1742 1743 pd = qp->pd; 1744 1745 /* change the QP state to ERROR */ 1746 if (qp->state != OCRDMA_QPS_RST) { 1747 attrs.qp_state = IB_QPS_ERR; 1748 attr_mask = IB_QP_STATE; 1749 _ocrdma_modify_qp(ibqp, &attrs, attr_mask); 1750 } 1751 /* ensure that CQEs for newly created QP (whose id may be same with 1752 * one which just getting destroyed are same), dont get 1753 * discarded until the old CQEs are discarded. 1754 */ 1755 mutex_lock(&dev->dev_lock); 1756 (void) ocrdma_mbx_destroy_qp(dev, qp); 1757 1758 /* 1759 * acquire CQ lock while destroy is in progress, in order to 1760 * protect against proessing in-flight CQEs for this QP. 1761 */ 1762 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags); 1763 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) 1764 spin_lock(&qp->rq_cq->cq_lock); 1765 1766 ocrdma_del_qpn_map(dev, qp); 1767 1768 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) 1769 spin_unlock(&qp->rq_cq->cq_lock); 1770 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags); 1771 1772 if (!pd->uctx) { 1773 ocrdma_discard_cqes(qp, qp->sq_cq); 1774 ocrdma_discard_cqes(qp, qp->rq_cq); 1775 } 1776 mutex_unlock(&dev->dev_lock); 1777 1778 if (pd->uctx) { 1779 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, 1780 PAGE_ALIGN(qp->sq.len)); 1781 if (!qp->srq) 1782 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, 1783 PAGE_ALIGN(qp->rq.len)); 1784 } 1785 1786 ocrdma_del_flush_qp(qp); 1787 1788 kfree(qp->wqe_wr_id_tbl); 1789 kfree(qp->rqe_wr_id_tbl); 1790 kfree(qp); 1791 return 0; 1792 } 1793 1794 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, 1795 struct ib_udata *udata) 1796 { 1797 int status; 1798 struct ocrdma_create_srq_uresp uresp; 1799 1800 memset(&uresp, 0, sizeof(uresp)); 1801 uresp.rq_dbid = srq->rq.dbid; 1802 uresp.num_rq_pages = 1; 1803 uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va); 1804 uresp.rq_page_size = srq->rq.len; 1805 uresp.db_page_addr = dev->nic_info.unmapped_db + 1806 (srq->pd->id * dev->nic_info.db_page_size); 1807 uresp.db_page_size = dev->nic_info.db_page_size; 1808 uresp.num_rqe_allocated = srq->rq.max_cnt; 1809 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { 1810 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; 1811 uresp.db_shift = 24; 1812 } else { 1813 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; 1814 uresp.db_shift = 16; 1815 } 1816 1817 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1818 if (status) 1819 return status; 1820 status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0], 1821 uresp.rq_page_size); 1822 if (status) 1823 return status; 1824 return status; 1825 } 1826 1827 struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd, 1828 struct ib_srq_init_attr *init_attr, 1829 struct ib_udata *udata) 1830 { 1831 int status = -ENOMEM; 1832 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 1833 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 1834 struct ocrdma_srq *srq; 1835 1836 if (init_attr->attr.max_sge > dev->attr.max_recv_sge) 1837 return ERR_PTR(-EINVAL); 1838 if (init_attr->attr.max_wr > dev->attr.max_rqe) 1839 return ERR_PTR(-EINVAL); 1840 1841 srq = kzalloc(sizeof(*srq), GFP_KERNEL); 1842 if (!srq) 1843 return ERR_PTR(status); 1844 1845 spin_lock_init(&srq->q_lock); 1846 srq->pd = pd; 1847 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size); 1848 status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd); 1849 if (status) 1850 goto err; 1851 1852 if (udata == NULL) { 1853 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt, 1854 GFP_KERNEL); 1855 if (srq->rqe_wr_id_tbl == NULL) 1856 goto arm_err; 1857 1858 srq->bit_fields_len = (srq->rq.max_cnt / 32) + 1859 (srq->rq.max_cnt % 32 ? 1 : 0); 1860 srq->idx_bit_fields = 1861 kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL); 1862 if (srq->idx_bit_fields == NULL) 1863 goto arm_err; 1864 memset(srq->idx_bit_fields, 0xff, 1865 srq->bit_fields_len * sizeof(u32)); 1866 } 1867 1868 if (init_attr->attr.srq_limit) { 1869 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr); 1870 if (status) 1871 goto arm_err; 1872 } 1873 1874 if (udata) { 1875 status = ocrdma_copy_srq_uresp(dev, srq, udata); 1876 if (status) 1877 goto arm_err; 1878 } 1879 1880 return &srq->ibsrq; 1881 1882 arm_err: 1883 ocrdma_mbx_destroy_srq(dev, srq); 1884 err: 1885 kfree(srq->rqe_wr_id_tbl); 1886 kfree(srq->idx_bit_fields); 1887 kfree(srq); 1888 return ERR_PTR(status); 1889 } 1890 1891 int ocrdma_modify_srq(struct ib_srq *ibsrq, 1892 struct ib_srq_attr *srq_attr, 1893 enum ib_srq_attr_mask srq_attr_mask, 1894 struct ib_udata *udata) 1895 { 1896 int status = 0; 1897 struct ocrdma_srq *srq; 1898 1899 srq = get_ocrdma_srq(ibsrq); 1900 if (srq_attr_mask & IB_SRQ_MAX_WR) 1901 status = -EINVAL; 1902 else 1903 status = ocrdma_mbx_modify_srq(srq, srq_attr); 1904 return status; 1905 } 1906 1907 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) 1908 { 1909 int status; 1910 struct ocrdma_srq *srq; 1911 1912 srq = get_ocrdma_srq(ibsrq); 1913 status = ocrdma_mbx_query_srq(srq, srq_attr); 1914 return status; 1915 } 1916 1917 int ocrdma_destroy_srq(struct ib_srq *ibsrq) 1918 { 1919 int status; 1920 struct ocrdma_srq *srq; 1921 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device); 1922 1923 srq = get_ocrdma_srq(ibsrq); 1924 1925 status = ocrdma_mbx_destroy_srq(dev, srq); 1926 1927 if (srq->pd->uctx) 1928 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, 1929 PAGE_ALIGN(srq->rq.len)); 1930 1931 kfree(srq->idx_bit_fields); 1932 kfree(srq->rqe_wr_id_tbl); 1933 kfree(srq); 1934 return status; 1935 } 1936 1937 /* unprivileged verbs and their support functions. */ 1938 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, 1939 struct ocrdma_hdr_wqe *hdr, 1940 struct ib_send_wr *wr) 1941 { 1942 struct ocrdma_ewqe_ud_hdr *ud_hdr = 1943 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1); 1944 struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah); 1945 1946 ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn; 1947 if (qp->qp_type == IB_QPT_GSI) 1948 ud_hdr->qkey = qp->qkey; 1949 else 1950 ud_hdr->qkey = wr->wr.ud.remote_qkey; 1951 ud_hdr->rsvd_ahid = ah->id; 1952 if (ah->av->valid & OCRDMA_AV_VLAN_VALID) 1953 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT); 1954 } 1955 1956 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, 1957 struct ocrdma_sge *sge, int num_sge, 1958 struct ib_sge *sg_list) 1959 { 1960 int i; 1961 1962 for (i = 0; i < num_sge; i++) { 1963 sge[i].lrkey = sg_list[i].lkey; 1964 sge[i].addr_lo = sg_list[i].addr; 1965 sge[i].addr_hi = upper_32_bits(sg_list[i].addr); 1966 sge[i].len = sg_list[i].length; 1967 hdr->total_len += sg_list[i].length; 1968 } 1969 if (num_sge == 0) 1970 memset(sge, 0, sizeof(*sge)); 1971 } 1972 1973 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge) 1974 { 1975 uint32_t total_len = 0, i; 1976 1977 for (i = 0; i < num_sge; i++) 1978 total_len += sg_list[i].length; 1979 return total_len; 1980 } 1981 1982 1983 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, 1984 struct ocrdma_hdr_wqe *hdr, 1985 struct ocrdma_sge *sge, 1986 struct ib_send_wr *wr, u32 wqe_size) 1987 { 1988 int i; 1989 char *dpp_addr; 1990 1991 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) { 1992 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge); 1993 if (unlikely(hdr->total_len > qp->max_inline_data)) { 1994 pr_err("%s() supported_len=0x%x,\n" 1995 " unsupported len req=0x%x\n", __func__, 1996 qp->max_inline_data, hdr->total_len); 1997 return -EINVAL; 1998 } 1999 dpp_addr = (char *)sge; 2000 for (i = 0; i < wr->num_sge; i++) { 2001 memcpy(dpp_addr, 2002 (void *)(unsigned long)wr->sg_list[i].addr, 2003 wr->sg_list[i].length); 2004 dpp_addr += wr->sg_list[i].length; 2005 } 2006 2007 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES); 2008 if (0 == hdr->total_len) 2009 wqe_size += sizeof(struct ocrdma_sge); 2010 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT); 2011 } else { 2012 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); 2013 if (wr->num_sge) 2014 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge)); 2015 else 2016 wqe_size += sizeof(struct ocrdma_sge); 2017 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); 2018 } 2019 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); 2020 return 0; 2021 } 2022 2023 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, 2024 struct ib_send_wr *wr) 2025 { 2026 int status; 2027 struct ocrdma_sge *sge; 2028 u32 wqe_size = sizeof(*hdr); 2029 2030 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { 2031 ocrdma_build_ud_hdr(qp, hdr, wr); 2032 sge = (struct ocrdma_sge *)(hdr + 2); 2033 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr); 2034 } else { 2035 sge = (struct ocrdma_sge *)(hdr + 1); 2036 } 2037 2038 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); 2039 return status; 2040 } 2041 2042 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, 2043 struct ib_send_wr *wr) 2044 { 2045 int status; 2046 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); 2047 struct ocrdma_sge *sge = ext_rw + 1; 2048 u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw); 2049 2050 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); 2051 if (status) 2052 return status; 2053 ext_rw->addr_lo = wr->wr.rdma.remote_addr; 2054 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); 2055 ext_rw->lrkey = wr->wr.rdma.rkey; 2056 ext_rw->len = hdr->total_len; 2057 return 0; 2058 } 2059 2060 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, 2061 struct ib_send_wr *wr) 2062 { 2063 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); 2064 struct ocrdma_sge *sge = ext_rw + 1; 2065 u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) + 2066 sizeof(struct ocrdma_hdr_wqe); 2067 2068 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); 2069 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); 2070 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT); 2071 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); 2072 2073 ext_rw->addr_lo = wr->wr.rdma.remote_addr; 2074 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); 2075 ext_rw->lrkey = wr->wr.rdma.rkey; 2076 ext_rw->len = hdr->total_len; 2077 } 2078 2079 static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl, 2080 struct ocrdma_hw_mr *hwmr) 2081 { 2082 int i; 2083 u64 buf_addr = 0; 2084 int num_pbes; 2085 struct ocrdma_pbe *pbe; 2086 2087 pbe = (struct ocrdma_pbe *)pbl_tbl->va; 2088 num_pbes = 0; 2089 2090 /* go through the OS phy regions & fill hw pbe entries into pbls. */ 2091 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { 2092 /* number of pbes can be more for one OS buf, when 2093 * buffers are of different sizes. 2094 * split the ib_buf to one or more pbes. 2095 */ 2096 buf_addr = wr->wr.fast_reg.page_list->page_list[i]; 2097 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK)); 2098 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr)); 2099 num_pbes += 1; 2100 pbe++; 2101 2102 /* if the pbl is full storing the pbes, 2103 * move to next pbl. 2104 */ 2105 if (num_pbes == (hwmr->pbl_size/sizeof(u64))) { 2106 pbl_tbl++; 2107 pbe = (struct ocrdma_pbe *)pbl_tbl->va; 2108 } 2109 } 2110 return; 2111 } 2112 2113 static int get_encoded_page_size(int pg_sz) 2114 { 2115 /* Max size is 256M 4096 << 16 */ 2116 int i = 0; 2117 for (; i < 17; i++) 2118 if (pg_sz == (4096 << i)) 2119 break; 2120 return i; 2121 } 2122 2123 2124 static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, 2125 struct ib_send_wr *wr) 2126 { 2127 u64 fbo; 2128 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); 2129 struct ocrdma_mr *mr; 2130 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2131 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr); 2132 2133 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); 2134 2135 if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr) 2136 return -EINVAL; 2137 2138 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); 2139 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); 2140 2141 if (wr->wr.fast_reg.page_list_len == 0) 2142 BUG(); 2143 if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE) 2144 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR; 2145 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE) 2146 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR; 2147 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ) 2148 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD; 2149 hdr->lkey = wr->wr.fast_reg.rkey; 2150 hdr->total_len = wr->wr.fast_reg.length; 2151 2152 fbo = wr->wr.fast_reg.iova_start - 2153 (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK); 2154 2155 fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start); 2156 fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff); 2157 fast_reg->fbo_hi = upper_32_bits(fbo); 2158 fast_reg->fbo_lo = (u32) fbo & 0xffffffff; 2159 fast_reg->num_sges = wr->wr.fast_reg.page_list_len; 2160 fast_reg->size_sge = 2161 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); 2162 mr = (struct ocrdma_mr *) (unsigned long) 2163 dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; 2164 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); 2165 return 0; 2166 } 2167 2168 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp) 2169 { 2170 u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT); 2171 2172 iowrite32(val, qp->sq_db); 2173 } 2174 2175 int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 2176 struct ib_send_wr **bad_wr) 2177 { 2178 int status = 0; 2179 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 2180 struct ocrdma_hdr_wqe *hdr; 2181 unsigned long flags; 2182 2183 spin_lock_irqsave(&qp->q_lock, flags); 2184 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) { 2185 spin_unlock_irqrestore(&qp->q_lock, flags); 2186 *bad_wr = wr; 2187 return -EINVAL; 2188 } 2189 2190 while (wr) { 2191 if (qp->qp_type == IB_QPT_UD && 2192 (wr->opcode != IB_WR_SEND && 2193 wr->opcode != IB_WR_SEND_WITH_IMM)) { 2194 *bad_wr = wr; 2195 status = -EINVAL; 2196 break; 2197 } 2198 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || 2199 wr->num_sge > qp->sq.max_sges) { 2200 *bad_wr = wr; 2201 status = -ENOMEM; 2202 break; 2203 } 2204 hdr = ocrdma_hwq_head(&qp->sq); 2205 hdr->cw = 0; 2206 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled) 2207 hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); 2208 if (wr->send_flags & IB_SEND_FENCE) 2209 hdr->cw |= 2210 (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT); 2211 if (wr->send_flags & IB_SEND_SOLICITED) 2212 hdr->cw |= 2213 (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT); 2214 hdr->total_len = 0; 2215 switch (wr->opcode) { 2216 case IB_WR_SEND_WITH_IMM: 2217 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); 2218 hdr->immdt = ntohl(wr->ex.imm_data); 2219 case IB_WR_SEND: 2220 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); 2221 ocrdma_build_send(qp, hdr, wr); 2222 break; 2223 case IB_WR_SEND_WITH_INV: 2224 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); 2225 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); 2226 hdr->lkey = wr->ex.invalidate_rkey; 2227 status = ocrdma_build_send(qp, hdr, wr); 2228 break; 2229 case IB_WR_RDMA_WRITE_WITH_IMM: 2230 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); 2231 hdr->immdt = ntohl(wr->ex.imm_data); 2232 case IB_WR_RDMA_WRITE: 2233 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); 2234 status = ocrdma_build_write(qp, hdr, wr); 2235 break; 2236 case IB_WR_RDMA_READ: 2237 ocrdma_build_read(qp, hdr, wr); 2238 break; 2239 case IB_WR_LOCAL_INV: 2240 hdr->cw |= 2241 (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT); 2242 hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) + 2243 sizeof(struct ocrdma_sge)) / 2244 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT; 2245 hdr->lkey = wr->ex.invalidate_rkey; 2246 break; 2247 case IB_WR_FAST_REG_MR: 2248 status = ocrdma_build_fr(qp, hdr, wr); 2249 break; 2250 default: 2251 status = -EINVAL; 2252 break; 2253 } 2254 if (status) { 2255 *bad_wr = wr; 2256 break; 2257 } 2258 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled) 2259 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1; 2260 else 2261 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0; 2262 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id; 2263 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) & 2264 OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE); 2265 /* make sure wqe is written before adapter can access it */ 2266 wmb(); 2267 /* inform hw to start processing it */ 2268 ocrdma_ring_sq_db(qp); 2269 2270 /* update pointer, counter for next wr */ 2271 ocrdma_hwq_inc_head(&qp->sq); 2272 wr = wr->next; 2273 } 2274 spin_unlock_irqrestore(&qp->q_lock, flags); 2275 return status; 2276 } 2277 2278 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) 2279 { 2280 u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT); 2281 2282 iowrite32(val, qp->rq_db); 2283 } 2284 2285 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr, 2286 u16 tag) 2287 { 2288 u32 wqe_size = 0; 2289 struct ocrdma_sge *sge; 2290 if (wr->num_sge) 2291 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe); 2292 else 2293 wqe_size = sizeof(*sge) + sizeof(*rqe); 2294 2295 rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) << 2296 OCRDMA_WQE_SIZE_SHIFT); 2297 rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); 2298 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); 2299 rqe->total_len = 0; 2300 rqe->rsvd_tag = tag; 2301 sge = (struct ocrdma_sge *)(rqe + 1); 2302 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list); 2303 ocrdma_cpu_to_le32(rqe, wqe_size); 2304 } 2305 2306 int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 2307 struct ib_recv_wr **bad_wr) 2308 { 2309 int status = 0; 2310 unsigned long flags; 2311 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 2312 struct ocrdma_hdr_wqe *rqe; 2313 2314 spin_lock_irqsave(&qp->q_lock, flags); 2315 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) { 2316 spin_unlock_irqrestore(&qp->q_lock, flags); 2317 *bad_wr = wr; 2318 return -EINVAL; 2319 } 2320 while (wr) { 2321 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 || 2322 wr->num_sge > qp->rq.max_sges) { 2323 *bad_wr = wr; 2324 status = -ENOMEM; 2325 break; 2326 } 2327 rqe = ocrdma_hwq_head(&qp->rq); 2328 ocrdma_build_rqe(rqe, wr, 0); 2329 2330 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id; 2331 /* make sure rqe is written before adapter can access it */ 2332 wmb(); 2333 2334 /* inform hw to start processing it */ 2335 ocrdma_ring_rq_db(qp); 2336 2337 /* update pointer, counter for next wr */ 2338 ocrdma_hwq_inc_head(&qp->rq); 2339 wr = wr->next; 2340 } 2341 spin_unlock_irqrestore(&qp->q_lock, flags); 2342 return status; 2343 } 2344 2345 /* cqe for srq's rqe can potentially arrive out of order. 2346 * index gives the entry in the shadow table where to store 2347 * the wr_id. tag/index is returned in cqe to reference back 2348 * for a given rqe. 2349 */ 2350 static int ocrdma_srq_get_idx(struct ocrdma_srq *srq) 2351 { 2352 int row = 0; 2353 int indx = 0; 2354 2355 for (row = 0; row < srq->bit_fields_len; row++) { 2356 if (srq->idx_bit_fields[row]) { 2357 indx = ffs(srq->idx_bit_fields[row]); 2358 indx = (row * 32) + (indx - 1); 2359 if (indx >= srq->rq.max_cnt) 2360 BUG(); 2361 ocrdma_srq_toggle_bit(srq, indx); 2362 break; 2363 } 2364 } 2365 2366 if (row == srq->bit_fields_len) 2367 BUG(); 2368 return indx + 1; /* Use from index 1 */ 2369 } 2370 2371 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq) 2372 { 2373 u32 val = srq->rq.dbid | (1 << 16); 2374 2375 iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET); 2376 } 2377 2378 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 2379 struct ib_recv_wr **bad_wr) 2380 { 2381 int status = 0; 2382 unsigned long flags; 2383 struct ocrdma_srq *srq; 2384 struct ocrdma_hdr_wqe *rqe; 2385 u16 tag; 2386 2387 srq = get_ocrdma_srq(ibsrq); 2388 2389 spin_lock_irqsave(&srq->q_lock, flags); 2390 while (wr) { 2391 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 || 2392 wr->num_sge > srq->rq.max_sges) { 2393 status = -ENOMEM; 2394 *bad_wr = wr; 2395 break; 2396 } 2397 tag = ocrdma_srq_get_idx(srq); 2398 rqe = ocrdma_hwq_head(&srq->rq); 2399 ocrdma_build_rqe(rqe, wr, tag); 2400 2401 srq->rqe_wr_id_tbl[tag] = wr->wr_id; 2402 /* make sure rqe is written before adapter can perform DMA */ 2403 wmb(); 2404 /* inform hw to start processing it */ 2405 ocrdma_ring_srq_db(srq); 2406 /* update pointer, counter for next wr */ 2407 ocrdma_hwq_inc_head(&srq->rq); 2408 wr = wr->next; 2409 } 2410 spin_unlock_irqrestore(&srq->q_lock, flags); 2411 return status; 2412 } 2413 2414 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status) 2415 { 2416 enum ib_wc_status ibwc_status; 2417 2418 switch (status) { 2419 case OCRDMA_CQE_GENERAL_ERR: 2420 ibwc_status = IB_WC_GENERAL_ERR; 2421 break; 2422 case OCRDMA_CQE_LOC_LEN_ERR: 2423 ibwc_status = IB_WC_LOC_LEN_ERR; 2424 break; 2425 case OCRDMA_CQE_LOC_QP_OP_ERR: 2426 ibwc_status = IB_WC_LOC_QP_OP_ERR; 2427 break; 2428 case OCRDMA_CQE_LOC_EEC_OP_ERR: 2429 ibwc_status = IB_WC_LOC_EEC_OP_ERR; 2430 break; 2431 case OCRDMA_CQE_LOC_PROT_ERR: 2432 ibwc_status = IB_WC_LOC_PROT_ERR; 2433 break; 2434 case OCRDMA_CQE_WR_FLUSH_ERR: 2435 ibwc_status = IB_WC_WR_FLUSH_ERR; 2436 break; 2437 case OCRDMA_CQE_MW_BIND_ERR: 2438 ibwc_status = IB_WC_MW_BIND_ERR; 2439 break; 2440 case OCRDMA_CQE_BAD_RESP_ERR: 2441 ibwc_status = IB_WC_BAD_RESP_ERR; 2442 break; 2443 case OCRDMA_CQE_LOC_ACCESS_ERR: 2444 ibwc_status = IB_WC_LOC_ACCESS_ERR; 2445 break; 2446 case OCRDMA_CQE_REM_INV_REQ_ERR: 2447 ibwc_status = IB_WC_REM_INV_REQ_ERR; 2448 break; 2449 case OCRDMA_CQE_REM_ACCESS_ERR: 2450 ibwc_status = IB_WC_REM_ACCESS_ERR; 2451 break; 2452 case OCRDMA_CQE_REM_OP_ERR: 2453 ibwc_status = IB_WC_REM_OP_ERR; 2454 break; 2455 case OCRDMA_CQE_RETRY_EXC_ERR: 2456 ibwc_status = IB_WC_RETRY_EXC_ERR; 2457 break; 2458 case OCRDMA_CQE_RNR_RETRY_EXC_ERR: 2459 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR; 2460 break; 2461 case OCRDMA_CQE_LOC_RDD_VIOL_ERR: 2462 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR; 2463 break; 2464 case OCRDMA_CQE_REM_INV_RD_REQ_ERR: 2465 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR; 2466 break; 2467 case OCRDMA_CQE_REM_ABORT_ERR: 2468 ibwc_status = IB_WC_REM_ABORT_ERR; 2469 break; 2470 case OCRDMA_CQE_INV_EECN_ERR: 2471 ibwc_status = IB_WC_INV_EECN_ERR; 2472 break; 2473 case OCRDMA_CQE_INV_EEC_STATE_ERR: 2474 ibwc_status = IB_WC_INV_EEC_STATE_ERR; 2475 break; 2476 case OCRDMA_CQE_FATAL_ERR: 2477 ibwc_status = IB_WC_FATAL_ERR; 2478 break; 2479 case OCRDMA_CQE_RESP_TIMEOUT_ERR: 2480 ibwc_status = IB_WC_RESP_TIMEOUT_ERR; 2481 break; 2482 default: 2483 ibwc_status = IB_WC_GENERAL_ERR; 2484 break; 2485 } 2486 return ibwc_status; 2487 } 2488 2489 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, 2490 u32 wqe_idx) 2491 { 2492 struct ocrdma_hdr_wqe *hdr; 2493 struct ocrdma_sge *rw; 2494 int opcode; 2495 2496 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx); 2497 2498 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid; 2499 /* Undo the hdr->cw swap */ 2500 opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK; 2501 switch (opcode) { 2502 case OCRDMA_WRITE: 2503 ibwc->opcode = IB_WC_RDMA_WRITE; 2504 break; 2505 case OCRDMA_READ: 2506 rw = (struct ocrdma_sge *)(hdr + 1); 2507 ibwc->opcode = IB_WC_RDMA_READ; 2508 ibwc->byte_len = rw->len; 2509 break; 2510 case OCRDMA_SEND: 2511 ibwc->opcode = IB_WC_SEND; 2512 break; 2513 case OCRDMA_FR_MR: 2514 ibwc->opcode = IB_WC_FAST_REG_MR; 2515 break; 2516 case OCRDMA_LKEY_INV: 2517 ibwc->opcode = IB_WC_LOCAL_INV; 2518 break; 2519 default: 2520 ibwc->status = IB_WC_GENERAL_ERR; 2521 pr_err("%s() invalid opcode received = 0x%x\n", 2522 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); 2523 break; 2524 } 2525 } 2526 2527 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, 2528 struct ocrdma_cqe *cqe) 2529 { 2530 if (is_cqe_for_sq(cqe)) { 2531 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2532 cqe->flags_status_srcqpn) & 2533 ~OCRDMA_CQE_STATUS_MASK); 2534 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2535 cqe->flags_status_srcqpn) | 2536 (OCRDMA_CQE_WR_FLUSH_ERR << 2537 OCRDMA_CQE_STATUS_SHIFT)); 2538 } else { 2539 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { 2540 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2541 cqe->flags_status_srcqpn) & 2542 ~OCRDMA_CQE_UD_STATUS_MASK); 2543 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2544 cqe->flags_status_srcqpn) | 2545 (OCRDMA_CQE_WR_FLUSH_ERR << 2546 OCRDMA_CQE_UD_STATUS_SHIFT)); 2547 } else { 2548 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2549 cqe->flags_status_srcqpn) & 2550 ~OCRDMA_CQE_STATUS_MASK); 2551 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2552 cqe->flags_status_srcqpn) | 2553 (OCRDMA_CQE_WR_FLUSH_ERR << 2554 OCRDMA_CQE_STATUS_SHIFT)); 2555 } 2556 } 2557 } 2558 2559 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, 2560 struct ocrdma_qp *qp, int status) 2561 { 2562 bool expand = false; 2563 2564 ibwc->byte_len = 0; 2565 ibwc->qp = &qp->ibqp; 2566 ibwc->status = ocrdma_to_ibwc_err(status); 2567 2568 ocrdma_flush_qp(qp); 2569 ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL); 2570 2571 /* if wqe/rqe pending for which cqe needs to be returned, 2572 * trigger inflating it. 2573 */ 2574 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) { 2575 expand = true; 2576 ocrdma_set_cqe_status_flushed(qp, cqe); 2577 } 2578 return expand; 2579 } 2580 2581 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, 2582 struct ocrdma_qp *qp, int status) 2583 { 2584 ibwc->opcode = IB_WC_RECV; 2585 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; 2586 ocrdma_hwq_inc_tail(&qp->rq); 2587 2588 return ocrdma_update_err_cqe(ibwc, cqe, qp, status); 2589 } 2590 2591 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, 2592 struct ocrdma_qp *qp, int status) 2593 { 2594 ocrdma_update_wc(qp, ibwc, qp->sq.tail); 2595 ocrdma_hwq_inc_tail(&qp->sq); 2596 2597 return ocrdma_update_err_cqe(ibwc, cqe, qp, status); 2598 } 2599 2600 2601 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, 2602 struct ocrdma_cqe *cqe, struct ib_wc *ibwc, 2603 bool *polled, bool *stop) 2604 { 2605 bool expand; 2606 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2607 int status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2608 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2609 if (status < OCRDMA_MAX_CQE_ERR) 2610 atomic_inc(&dev->cqe_err_stats[status]); 2611 2612 /* when hw sq is empty, but rq is not empty, so we continue 2613 * to keep the cqe in order to get the cq event again. 2614 */ 2615 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) { 2616 /* when cq for rq and sq is same, it is safe to return 2617 * flush cqe for RQEs. 2618 */ 2619 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { 2620 *polled = true; 2621 status = OCRDMA_CQE_WR_FLUSH_ERR; 2622 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); 2623 } else { 2624 /* stop processing further cqe as this cqe is used for 2625 * triggering cq event on buddy cq of RQ. 2626 * When QP is destroyed, this cqe will be removed 2627 * from the cq's hardware q. 2628 */ 2629 *polled = false; 2630 *stop = true; 2631 expand = false; 2632 } 2633 } else if (is_hw_sq_empty(qp)) { 2634 /* Do nothing */ 2635 expand = false; 2636 *polled = false; 2637 *stop = false; 2638 } else { 2639 *polled = true; 2640 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); 2641 } 2642 return expand; 2643 } 2644 2645 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp, 2646 struct ocrdma_cqe *cqe, 2647 struct ib_wc *ibwc, bool *polled) 2648 { 2649 bool expand = false; 2650 int tail = qp->sq.tail; 2651 u32 wqe_idx; 2652 2653 if (!qp->wqe_wr_id_tbl[tail].signaled) { 2654 *polled = false; /* WC cannot be consumed yet */ 2655 } else { 2656 ibwc->status = IB_WC_SUCCESS; 2657 ibwc->wc_flags = 0; 2658 ibwc->qp = &qp->ibqp; 2659 ocrdma_update_wc(qp, ibwc, tail); 2660 *polled = true; 2661 } 2662 wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) & 2663 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx; 2664 if (tail != wqe_idx) 2665 expand = true; /* Coalesced CQE can't be consumed yet */ 2666 2667 ocrdma_hwq_inc_tail(&qp->sq); 2668 return expand; 2669 } 2670 2671 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, 2672 struct ib_wc *ibwc, bool *polled, bool *stop) 2673 { 2674 int status; 2675 bool expand; 2676 2677 status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2678 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2679 2680 if (status == OCRDMA_CQE_SUCCESS) 2681 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled); 2682 else 2683 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop); 2684 return expand; 2685 } 2686 2687 static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe) 2688 { 2689 int status; 2690 2691 status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2692 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; 2693 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) & 2694 OCRDMA_CQE_SRCQP_MASK; 2695 ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) & 2696 OCRDMA_CQE_PKEY_MASK; 2697 ibwc->wc_flags = IB_WC_GRH; 2698 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >> 2699 OCRDMA_CQE_UD_XFER_LEN_SHIFT); 2700 return status; 2701 } 2702 2703 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc, 2704 struct ocrdma_cqe *cqe, 2705 struct ocrdma_qp *qp) 2706 { 2707 unsigned long flags; 2708 struct ocrdma_srq *srq; 2709 u32 wqe_idx; 2710 2711 srq = get_ocrdma_srq(qp->ibqp.srq); 2712 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> 2713 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx; 2714 if (wqe_idx < 1) 2715 BUG(); 2716 2717 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx]; 2718 spin_lock_irqsave(&srq->q_lock, flags); 2719 ocrdma_srq_toggle_bit(srq, wqe_idx - 1); 2720 spin_unlock_irqrestore(&srq->q_lock, flags); 2721 ocrdma_hwq_inc_tail(&srq->rq); 2722 } 2723 2724 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, 2725 struct ib_wc *ibwc, bool *polled, bool *stop, 2726 int status) 2727 { 2728 bool expand; 2729 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2730 2731 if (status < OCRDMA_MAX_CQE_ERR) 2732 atomic_inc(&dev->cqe_err_stats[status]); 2733 2734 /* when hw_rq is empty, but wq is not empty, so continue 2735 * to keep the cqe to get the cq event again. 2736 */ 2737 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) { 2738 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { 2739 *polled = true; 2740 status = OCRDMA_CQE_WR_FLUSH_ERR; 2741 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); 2742 } else { 2743 *polled = false; 2744 *stop = true; 2745 expand = false; 2746 } 2747 } else if (is_hw_rq_empty(qp)) { 2748 /* Do nothing */ 2749 expand = false; 2750 *polled = false; 2751 *stop = false; 2752 } else { 2753 *polled = true; 2754 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); 2755 } 2756 return expand; 2757 } 2758 2759 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp, 2760 struct ocrdma_cqe *cqe, struct ib_wc *ibwc) 2761 { 2762 ibwc->opcode = IB_WC_RECV; 2763 ibwc->qp = &qp->ibqp; 2764 ibwc->status = IB_WC_SUCCESS; 2765 2766 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) 2767 ocrdma_update_ud_rcqe(ibwc, cqe); 2768 else 2769 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen); 2770 2771 if (is_cqe_imm(cqe)) { 2772 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); 2773 ibwc->wc_flags |= IB_WC_WITH_IMM; 2774 } else if (is_cqe_wr_imm(cqe)) { 2775 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 2776 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); 2777 ibwc->wc_flags |= IB_WC_WITH_IMM; 2778 } else if (is_cqe_invalidated(cqe)) { 2779 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt); 2780 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE; 2781 } 2782 if (qp->ibqp.srq) { 2783 ocrdma_update_free_srq_cqe(ibwc, cqe, qp); 2784 } else { 2785 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; 2786 ocrdma_hwq_inc_tail(&qp->rq); 2787 } 2788 } 2789 2790 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, 2791 struct ib_wc *ibwc, bool *polled, bool *stop) 2792 { 2793 int status; 2794 bool expand = false; 2795 2796 ibwc->wc_flags = 0; 2797 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { 2798 status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2799 OCRDMA_CQE_UD_STATUS_MASK) >> 2800 OCRDMA_CQE_UD_STATUS_SHIFT; 2801 } else { 2802 status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2803 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2804 } 2805 2806 if (status == OCRDMA_CQE_SUCCESS) { 2807 *polled = true; 2808 ocrdma_poll_success_rcqe(qp, cqe, ibwc); 2809 } else { 2810 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop, 2811 status); 2812 } 2813 return expand; 2814 } 2815 2816 static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe, 2817 u16 cur_getp) 2818 { 2819 if (cq->phase_change) { 2820 if (cur_getp == 0) 2821 cq->phase = (~cq->phase & OCRDMA_CQE_VALID); 2822 } else { 2823 /* clear valid bit */ 2824 cqe->flags_status_srcqpn = 0; 2825 } 2826 } 2827 2828 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries, 2829 struct ib_wc *ibwc) 2830 { 2831 u16 qpn = 0; 2832 int i = 0; 2833 bool expand = false; 2834 int polled_hw_cqes = 0; 2835 struct ocrdma_qp *qp = NULL; 2836 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); 2837 struct ocrdma_cqe *cqe; 2838 u16 cur_getp; bool polled = false; bool stop = false; 2839 2840 cur_getp = cq->getp; 2841 while (num_entries) { 2842 cqe = cq->va + cur_getp; 2843 /* check whether valid cqe or not */ 2844 if (!is_cqe_valid(cq, cqe)) 2845 break; 2846 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK); 2847 /* ignore discarded cqe */ 2848 if (qpn == 0) 2849 goto skip_cqe; 2850 qp = dev->qp_tbl[qpn]; 2851 BUG_ON(qp == NULL); 2852 2853 if (is_cqe_for_sq(cqe)) { 2854 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled, 2855 &stop); 2856 } else { 2857 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled, 2858 &stop); 2859 } 2860 if (expand) 2861 goto expand_cqe; 2862 if (stop) 2863 goto stop_cqe; 2864 /* clear qpn to avoid duplicate processing by discard_cqe() */ 2865 cqe->cmn.qpn = 0; 2866 skip_cqe: 2867 polled_hw_cqes += 1; 2868 cur_getp = (cur_getp + 1) % cq->max_hw_cqe; 2869 ocrdma_change_cq_phase(cq, cqe, cur_getp); 2870 expand_cqe: 2871 if (polled) { 2872 num_entries -= 1; 2873 i += 1; 2874 ibwc = ibwc + 1; 2875 polled = false; 2876 } 2877 } 2878 stop_cqe: 2879 cq->getp = cur_getp; 2880 if (cq->deferred_arm) { 2881 ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol, 2882 polled_hw_cqes); 2883 cq->deferred_arm = false; 2884 cq->deferred_sol = false; 2885 } else { 2886 /* We need to pop the CQE. No need to arm */ 2887 ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol, 2888 polled_hw_cqes); 2889 cq->deferred_sol = false; 2890 } 2891 2892 return i; 2893 } 2894 2895 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */ 2896 static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries, 2897 struct ocrdma_qp *qp, struct ib_wc *ibwc) 2898 { 2899 int err_cqes = 0; 2900 2901 while (num_entries) { 2902 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp)) 2903 break; 2904 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) { 2905 ocrdma_update_wc(qp, ibwc, qp->sq.tail); 2906 ocrdma_hwq_inc_tail(&qp->sq); 2907 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) { 2908 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; 2909 ocrdma_hwq_inc_tail(&qp->rq); 2910 } else { 2911 return err_cqes; 2912 } 2913 ibwc->byte_len = 0; 2914 ibwc->status = IB_WC_WR_FLUSH_ERR; 2915 ibwc = ibwc + 1; 2916 err_cqes += 1; 2917 num_entries -= 1; 2918 } 2919 return err_cqes; 2920 } 2921 2922 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 2923 { 2924 int cqes_to_poll = num_entries; 2925 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 2926 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); 2927 int num_os_cqe = 0, err_cqes = 0; 2928 struct ocrdma_qp *qp; 2929 unsigned long flags; 2930 2931 /* poll cqes from adapter CQ */ 2932 spin_lock_irqsave(&cq->cq_lock, flags); 2933 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc); 2934 spin_unlock_irqrestore(&cq->cq_lock, flags); 2935 cqes_to_poll -= num_os_cqe; 2936 2937 if (cqes_to_poll) { 2938 wc = wc + num_os_cqe; 2939 /* adapter returns single error cqe when qp moves to 2940 * error state. So insert error cqes with wc_status as 2941 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ 2942 * respectively which uses this CQ. 2943 */ 2944 spin_lock_irqsave(&dev->flush_q_lock, flags); 2945 list_for_each_entry(qp, &cq->sq_head, sq_entry) { 2946 if (cqes_to_poll == 0) 2947 break; 2948 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc); 2949 cqes_to_poll -= err_cqes; 2950 num_os_cqe += err_cqes; 2951 wc = wc + err_cqes; 2952 } 2953 spin_unlock_irqrestore(&dev->flush_q_lock, flags); 2954 } 2955 return num_os_cqe; 2956 } 2957 2958 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) 2959 { 2960 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 2961 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); 2962 u16 cq_id; 2963 unsigned long flags; 2964 bool arm_needed = false, sol_needed = false; 2965 2966 cq_id = cq->id; 2967 2968 spin_lock_irqsave(&cq->cq_lock, flags); 2969 if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED) 2970 arm_needed = true; 2971 if (cq_flags & IB_CQ_SOLICITED) 2972 sol_needed = true; 2973 2974 if (cq->first_arm) { 2975 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); 2976 cq->first_arm = false; 2977 } 2978 2979 cq->deferred_arm = true; 2980 cq->deferred_sol = sol_needed; 2981 spin_unlock_irqrestore(&cq->cq_lock, flags); 2982 2983 return 0; 2984 } 2985 2986 struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len) 2987 { 2988 int status; 2989 struct ocrdma_mr *mr; 2990 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 2991 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 2992 2993 if (max_page_list_len > dev->attr.max_pages_per_frmr) 2994 return ERR_PTR(-EINVAL); 2995 2996 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 2997 if (!mr) 2998 return ERR_PTR(-ENOMEM); 2999 3000 status = ocrdma_get_pbl_info(dev, mr, max_page_list_len); 3001 if (status) 3002 goto pbl_err; 3003 mr->hwmr.fr_mr = 1; 3004 mr->hwmr.remote_rd = 0; 3005 mr->hwmr.remote_wr = 0; 3006 mr->hwmr.local_rd = 0; 3007 mr->hwmr.local_wr = 0; 3008 mr->hwmr.mw_bind = 0; 3009 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); 3010 if (status) 3011 goto pbl_err; 3012 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0); 3013 if (status) 3014 goto mbx_err; 3015 mr->ibmr.rkey = mr->hwmr.lkey; 3016 mr->ibmr.lkey = mr->hwmr.lkey; 3017 dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = 3018 (unsigned long) mr; 3019 return &mr->ibmr; 3020 mbx_err: 3021 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 3022 pbl_err: 3023 kfree(mr); 3024 return ERR_PTR(-ENOMEM); 3025 } 3026 3027 struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device 3028 *ibdev, 3029 int page_list_len) 3030 { 3031 struct ib_fast_reg_page_list *frmr_list; 3032 int size; 3033 3034 size = sizeof(*frmr_list) + (page_list_len * sizeof(u64)); 3035 frmr_list = kzalloc(size, GFP_KERNEL); 3036 if (!frmr_list) 3037 return ERR_PTR(-ENOMEM); 3038 frmr_list->page_list = (u64 *)(frmr_list + 1); 3039 return frmr_list; 3040 } 3041 3042 void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list) 3043 { 3044 kfree(page_list); 3045 } 3046 3047 #define MAX_KERNEL_PBE_SIZE 65536 3048 static inline int count_kernel_pbes(struct ib_phys_buf *buf_list, 3049 int buf_cnt, u32 *pbe_size) 3050 { 3051 u64 total_size = 0; 3052 u64 buf_size = 0; 3053 int i; 3054 *pbe_size = roundup(buf_list[0].size, PAGE_SIZE); 3055 *pbe_size = roundup_pow_of_two(*pbe_size); 3056 3057 /* find the smallest PBE size that we can have */ 3058 for (i = 0; i < buf_cnt; i++) { 3059 /* first addr may not be page aligned, so ignore checking */ 3060 if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) || 3061 (buf_list[i].size & ~PAGE_MASK))) { 3062 return 0; 3063 } 3064 3065 /* if configured PBE size is greater then the chosen one, 3066 * reduce the PBE size. 3067 */ 3068 buf_size = roundup(buf_list[i].size, PAGE_SIZE); 3069 /* pbe_size has to be even multiple of 4K 1,2,4,8...*/ 3070 buf_size = roundup_pow_of_two(buf_size); 3071 if (*pbe_size > buf_size) 3072 *pbe_size = buf_size; 3073 3074 total_size += buf_size; 3075 } 3076 *pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ? 3077 (MAX_KERNEL_PBE_SIZE) : (*pbe_size); 3078 3079 /* num_pbes = total_size / (*pbe_size); this is implemented below. */ 3080 3081 return total_size >> ilog2(*pbe_size); 3082 } 3083 3084 static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt, 3085 u32 pbe_size, struct ocrdma_pbl *pbl_tbl, 3086 struct ocrdma_hw_mr *hwmr) 3087 { 3088 int i; 3089 int idx; 3090 int pbes_per_buf = 0; 3091 u64 buf_addr = 0; 3092 int num_pbes; 3093 struct ocrdma_pbe *pbe; 3094 int total_num_pbes = 0; 3095 3096 if (!hwmr->num_pbes) 3097 return; 3098 3099 pbe = (struct ocrdma_pbe *)pbl_tbl->va; 3100 num_pbes = 0; 3101 3102 /* go through the OS phy regions & fill hw pbe entries into pbls. */ 3103 for (i = 0; i < ib_buf_cnt; i++) { 3104 buf_addr = buf_list[i].addr; 3105 pbes_per_buf = 3106 roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) / 3107 pbe_size; 3108 hwmr->len += buf_list[i].size; 3109 /* number of pbes can be more for one OS buf, when 3110 * buffers are of different sizes. 3111 * split the ib_buf to one or more pbes. 3112 */ 3113 for (idx = 0; idx < pbes_per_buf; idx++) { 3114 /* we program always page aligned addresses, 3115 * first unaligned address is taken care by fbo. 3116 */ 3117 if (i == 0) { 3118 /* for non zero fbo, assign the 3119 * start of the page. 3120 */ 3121 pbe->pa_lo = 3122 cpu_to_le32((u32) (buf_addr & PAGE_MASK)); 3123 pbe->pa_hi = 3124 cpu_to_le32((u32) upper_32_bits(buf_addr)); 3125 } else { 3126 pbe->pa_lo = 3127 cpu_to_le32((u32) (buf_addr & 0xffffffff)); 3128 pbe->pa_hi = 3129 cpu_to_le32((u32) upper_32_bits(buf_addr)); 3130 } 3131 buf_addr += pbe_size; 3132 num_pbes += 1; 3133 total_num_pbes += 1; 3134 pbe++; 3135 3136 if (total_num_pbes == hwmr->num_pbes) 3137 goto mr_tbl_done; 3138 /* if the pbl is full storing the pbes, 3139 * move to next pbl. 3140 */ 3141 if (num_pbes == (hwmr->pbl_size/sizeof(u64))) { 3142 pbl_tbl++; 3143 pbe = (struct ocrdma_pbe *)pbl_tbl->va; 3144 num_pbes = 0; 3145 } 3146 } 3147 } 3148 mr_tbl_done: 3149 return; 3150 } 3151 3152 struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd, 3153 struct ib_phys_buf *buf_list, 3154 int buf_cnt, int acc, u64 *iova_start) 3155 { 3156 int status = -ENOMEM; 3157 struct ocrdma_mr *mr; 3158 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 3159 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 3160 u32 num_pbes; 3161 u32 pbe_size = 0; 3162 3163 if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE)) 3164 return ERR_PTR(-EINVAL); 3165 3166 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 3167 if (!mr) 3168 return ERR_PTR(status); 3169 3170 num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size); 3171 if (num_pbes == 0) { 3172 status = -EINVAL; 3173 goto pbl_err; 3174 } 3175 status = ocrdma_get_pbl_info(dev, mr, num_pbes); 3176 if (status) 3177 goto pbl_err; 3178 3179 mr->hwmr.pbe_size = pbe_size; 3180 mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK); 3181 mr->hwmr.va = *iova_start; 3182 mr->hwmr.local_rd = 1; 3183 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; 3184 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; 3185 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; 3186 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; 3187 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0; 3188 3189 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); 3190 if (status) 3191 goto pbl_err; 3192 build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table, 3193 &mr->hwmr); 3194 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); 3195 if (status) 3196 goto mbx_err; 3197 3198 mr->ibmr.lkey = mr->hwmr.lkey; 3199 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) 3200 mr->ibmr.rkey = mr->hwmr.lkey; 3201 return &mr->ibmr; 3202 3203 mbx_err: 3204 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 3205 pbl_err: 3206 kfree(mr); 3207 return ERR_PTR(status); 3208 } 3209